diff options
Diffstat (limited to 'drivers/net')
777 files changed, 78583 insertions, 28654 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 0936da592e12..944ec3c9282c 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -497,4 +497,15 @@ config THUNDERBOLT_NET source "drivers/net/hyperv/Kconfig" +config NETDEVSIM + tristate "Simulated networking device" + depends on DEBUG_FS + help + This driver is a developer testing tool and software model that can + be used to test various control path networking APIs, especially + HW-offload related. + + To compile this driver as a module, choose M here: the module + will be called netdevsim. + endif # NETDEVICES diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 766f62d02a0b..04c3b747812c 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -78,3 +78,4 @@ obj-$(CONFIG_FUJITSU_ES) += fjes/ thunderbolt-net-y += thunderbolt.o obj-$(CONFIG_THUNDERBOLT_NET) += thunderbolt-net.o +obj-$(CONFIG_NETDEVSIM) += netdevsim/ diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 8a9b085c2a98..58c705f24f96 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -1431,13 +1431,9 @@ static int bond_option_ad_actor_system_set(struct bonding *bond, { u8 macaddr[ETH_ALEN]; u8 *mac; - int i; if (newval->string) { - i = sscanf(newval->string, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", - &macaddr[0], &macaddr[1], &macaddr[2], - &macaddr[3], &macaddr[4], &macaddr[5]); - if (i != ETH_ALEN) + if (!mac_pton(newval->string, macaddr)) goto err; mac = macaddr; } else { diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c index b8029ea03307..433a14b9f731 100644 --- a/drivers/net/caif/caif_hsi.c +++ b/drivers/net/caif/caif_hsi.c @@ -264,7 +264,6 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi) } /* Create payload CAIF frames. */ - pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ; while (nfrms < CFHSI_MAX_PKTS) { struct caif_payload_info *info; int hpad; diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c index d065c0e2d18e..406b4847e5dc 100644 --- a/drivers/net/can/c_can/c_can_pci.c +++ b/drivers/net/can/c_can/c_can_pci.c @@ -251,14 +251,14 @@ static void c_can_pci_remove(struct pci_dev *pdev) pci_disable_device(pdev); } -static struct c_can_pci_data c_can_sta2x11= { +static const struct c_can_pci_data c_can_sta2x11= { .type = BOSCH_C_CAN, .reg_align = C_CAN_REG_ALIGN_32, .freq = 52000000, /* 52 Mhz */ .bar = 0, }; -static struct c_can_pci_data c_can_pch = { +static const struct c_can_pci_data c_can_pch = { .type = BOSCH_C_CAN, .reg_align = C_CAN_REG_32, .freq = 50000000, /* 50 MHz */ diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 365a8cc62405..b1779566c5bb 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -27,6 +27,7 @@ #include <linux/can/skb.h> #include <linux/can/netlink.h> #include <linux/can/led.h> +#include <linux/of.h> #include <net/rtnetlink.h> #define MOD_DESC "CAN device driver interface" @@ -411,7 +412,7 @@ EXPORT_SYMBOL_GPL(can_change_state); * Local echo of CAN messages * * CAN network devices *should* support a local echo functionality - * (see Documentation/networking/can.txt). To test the handling of CAN + * (see Documentation/networking/can.rst). To test the handling of CAN * interfaces that do not support the local echo both driver types are * implemented. In the case that the driver does not support the echo * the IFF_ECHO remains clear in dev->flags. This causes the PF_CAN core @@ -814,6 +815,29 @@ int open_candev(struct net_device *dev) } EXPORT_SYMBOL_GPL(open_candev); +#ifdef CONFIG_OF +/* Common function that can be used to understand the limitation of + * a transceiver when it provides no means to determine these limitations + * at runtime. + */ +void of_can_transceiver(struct net_device *dev) +{ + struct device_node *dn; + struct can_priv *priv = netdev_priv(dev); + struct device_node *np = dev->dev.parent->of_node; + int ret; + + dn = of_get_child_by_name(np, "can-transceiver"); + if (!dn) + return; + + ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max); + if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max)) + netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n"); +} +EXPORT_SYMBOL_GPL(of_can_transceiver); +#endif + /* * Common close function for cleanup before the device gets closed. * @@ -913,6 +937,13 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], priv->bitrate_const_cnt); if (err) return err; + + if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) { + netdev_err(dev, "arbitration bitrate surpasses transceiver capabilities of %d bps\n", + priv->bitrate_max); + return -EINVAL; + } + memcpy(&priv->bittiming, &bt, sizeof(bt)); if (priv->do_set_bittiming) { @@ -997,6 +1028,13 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], priv->data_bitrate_const_cnt); if (err) return err; + + if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) { + netdev_err(dev, "canfd data bitrate surpasses transceiver capabilities of %d bps\n", + priv->bitrate_max); + return -EINVAL; + } + memcpy(&priv->data_bittiming, &dbt, sizeof(dbt)); if (priv->do_set_data_bittiming) { @@ -1064,6 +1102,7 @@ static size_t can_get_size(const struct net_device *dev) if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */ size += nla_total_size(sizeof(*priv->data_bitrate_const) * priv->data_bitrate_const_cnt); + size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */ return size; } @@ -1121,7 +1160,11 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST, sizeof(*priv->data_bitrate_const) * priv->data_bitrate_const_cnt, - priv->data_bitrate_const)) + priv->data_bitrate_const)) || + + (nla_put(skb, IFLA_CAN_BITRATE_MAX, + sizeof(priv->bitrate_max), + &priv->bitrate_max)) ) return -EMSGSIZE; diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 760d2c07e3a2..634c51e6b8ae 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -190,6 +190,7 @@ * MX53 FlexCAN2 03.00.00.00 yes no no no no * MX6s FlexCAN3 10.00.12.00 yes yes no no yes * VF610 FlexCAN3 ? no yes no yes yes? + * LS1021A FlexCAN2 03.00.04.00 no yes no no yes * * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected. */ @@ -279,6 +280,10 @@ struct flexcan_priv { struct clk *clk_per; const struct flexcan_devtype_data *devtype_data; struct regulator *reg_xceiver; + + /* Read and Write APIs */ + u32 (*read)(void __iomem *addr); + void (*write)(u32 val, void __iomem *addr); }; static const struct flexcan_devtype_data fsl_p1010_devtype_data = { @@ -301,6 +306,12 @@ static const struct flexcan_devtype_data fsl_vf610_devtype_data = { FLEXCAN_QUIRK_BROKEN_PERR_STATE, }; +static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = { + .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | + FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE | + FLEXCAN_QUIRK_USE_OFF_TIMESTAMP, +}; + static const struct can_bittiming_const flexcan_bittiming_const = { .name = DRV_NAME, .tseg1_min = 4, @@ -313,39 +324,45 @@ static const struct can_bittiming_const flexcan_bittiming_const = { .brp_inc = 1, }; -/* Abstract off the read/write for arm versus ppc. This - * assumes that PPC uses big-endian registers and everything - * else uses little-endian registers, independent of CPU - * endianness. +/* FlexCAN module is essentially modelled as a little-endian IP in most + * SoCs, i.e the registers as well as the message buffer areas are + * implemented in a little-endian fashion. + * + * However there are some SoCs (e.g. LS1021A) which implement the FlexCAN + * module in a big-endian fashion (i.e the registers as well as the + * message buffer areas are implemented in a big-endian way). + * + * In addition, the FlexCAN module can be found on SoCs having ARM or + * PPC cores. So, we need to abstract off the register read/write + * functions, ensuring that these cater to all the combinations of module + * endianness and underlying CPU endianness. */ -#if defined(CONFIG_PPC) -static inline u32 flexcan_read(void __iomem *addr) +static inline u32 flexcan_read_be(void __iomem *addr) { - return in_be32(addr); + return ioread32be(addr); } -static inline void flexcan_write(u32 val, void __iomem *addr) +static inline void flexcan_write_be(u32 val, void __iomem *addr) { - out_be32(addr, val); + iowrite32be(val, addr); } -#else -static inline u32 flexcan_read(void __iomem *addr) + +static inline u32 flexcan_read_le(void __iomem *addr) { - return readl(addr); + return ioread32(addr); } -static inline void flexcan_write(u32 val, void __iomem *addr) +static inline void flexcan_write_le(u32 val, void __iomem *addr) { - writel(val, addr); + iowrite32(val, addr); } -#endif static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; u32 reg_ctrl = (priv->reg_ctrl_default | FLEXCAN_CTRL_ERR_MSK); - flexcan_write(reg_ctrl, ®s->ctrl); + priv->write(reg_ctrl, ®s->ctrl); } static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv) @@ -353,7 +370,7 @@ static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv) struct flexcan_regs __iomem *regs = priv->regs; u32 reg_ctrl = (priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_MSK); - flexcan_write(reg_ctrl, ®s->ctrl); + priv->write(reg_ctrl, ®s->ctrl); } static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv) @@ -378,14 +395,14 @@ static int flexcan_chip_enable(struct flexcan_priv *priv) unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; u32 reg; - reg = flexcan_read(®s->mcr); + reg = priv->read(®s->mcr); reg &= ~FLEXCAN_MCR_MDIS; - flexcan_write(reg, ®s->mcr); + priv->write(reg, ®s->mcr); - while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) + while (timeout-- && (priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) udelay(10); - if (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK) + if (priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK) return -ETIMEDOUT; return 0; @@ -397,14 +414,14 @@ static int flexcan_chip_disable(struct flexcan_priv *priv) unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; u32 reg; - reg = flexcan_read(®s->mcr); + reg = priv->read(®s->mcr); reg |= FLEXCAN_MCR_MDIS; - flexcan_write(reg, ®s->mcr); + priv->write(reg, ®s->mcr); - while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) + while (timeout-- && !(priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) udelay(10); - if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) + if (!(priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) return -ETIMEDOUT; return 0; @@ -416,14 +433,14 @@ static int flexcan_chip_freeze(struct flexcan_priv *priv) unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate; u32 reg; - reg = flexcan_read(®s->mcr); + reg = priv->read(®s->mcr); reg |= FLEXCAN_MCR_HALT; - flexcan_write(reg, ®s->mcr); + priv->write(reg, ®s->mcr); - while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) + while (timeout-- && !(priv->read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) udelay(100); - if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) + if (!(priv->read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) return -ETIMEDOUT; return 0; @@ -435,14 +452,14 @@ static int flexcan_chip_unfreeze(struct flexcan_priv *priv) unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; u32 reg; - reg = flexcan_read(®s->mcr); + reg = priv->read(®s->mcr); reg &= ~FLEXCAN_MCR_HALT; - flexcan_write(reg, ®s->mcr); + priv->write(reg, ®s->mcr); - while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) + while (timeout-- && (priv->read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) udelay(10); - if (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK) + if (priv->read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK) return -ETIMEDOUT; return 0; @@ -453,11 +470,11 @@ static int flexcan_chip_softreset(struct flexcan_priv *priv) struct flexcan_regs __iomem *regs = priv->regs; unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; - flexcan_write(FLEXCAN_MCR_SOFTRST, ®s->mcr); - while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST)) + priv->write(FLEXCAN_MCR_SOFTRST, ®s->mcr); + while (timeout-- && (priv->read(®s->mcr) & FLEXCAN_MCR_SOFTRST)) udelay(10); - if (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST) + if (priv->read(®s->mcr) & FLEXCAN_MCR_SOFTRST) return -ETIMEDOUT; return 0; @@ -468,7 +485,7 @@ static int __flexcan_get_berr_counter(const struct net_device *dev, { const struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->regs; - u32 reg = flexcan_read(®s->ecr); + u32 reg = priv->read(®s->ecr); bec->txerr = (reg >> 0) & 0xff; bec->rxerr = (reg >> 8) & 0xff; @@ -524,24 +541,24 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) if (cf->can_dlc > 0) { data = be32_to_cpup((__be32 *)&cf->data[0]); - flexcan_write(data, &priv->tx_mb->data[0]); + priv->write(data, &priv->tx_mb->data[0]); } if (cf->can_dlc > 4) { data = be32_to_cpup((__be32 *)&cf->data[4]); - flexcan_write(data, &priv->tx_mb->data[1]); + priv->write(data, &priv->tx_mb->data[1]); } can_put_echo_skb(skb, dev, 0); - flexcan_write(can_id, &priv->tx_mb->can_id); - flexcan_write(ctrl, &priv->tx_mb->can_ctrl); + priv->write(can_id, &priv->tx_mb->can_id); + priv->write(ctrl, &priv->tx_mb->can_ctrl); /* Errata ERR005829 step8: * Write twice INACTIVE(0x8) code to first MB. */ - flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, + priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, &priv->tx_mb_reserved->can_ctrl); - flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, + priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, &priv->tx_mb_reserved->can_ctrl); return NETDEV_TX_OK; @@ -660,7 +677,7 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload, u32 code; do { - reg_ctrl = flexcan_read(&mb->can_ctrl); + reg_ctrl = priv->read(&mb->can_ctrl); } while (reg_ctrl & FLEXCAN_MB_CODE_RX_BUSY_BIT); /* is this MB empty? */ @@ -675,17 +692,17 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload, offload->dev->stats.rx_errors++; } } else { - reg_iflag1 = flexcan_read(®s->iflag1); + reg_iflag1 = priv->read(®s->iflag1); if (!(reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE)) return 0; - reg_ctrl = flexcan_read(&mb->can_ctrl); + reg_ctrl = priv->read(&mb->can_ctrl); } /* increase timstamp to full 32 bit */ *timestamp = reg_ctrl << 16; - reg_id = flexcan_read(&mb->can_id); + reg_id = priv->read(&mb->can_id); if (reg_ctrl & FLEXCAN_MB_CNT_IDE) cf->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG; else @@ -695,19 +712,19 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload, cf->can_id |= CAN_RTR_FLAG; cf->can_dlc = get_can_dlc((reg_ctrl >> 16) & 0xf); - *(__be32 *)(cf->data + 0) = cpu_to_be32(flexcan_read(&mb->data[0])); - *(__be32 *)(cf->data + 4) = cpu_to_be32(flexcan_read(&mb->data[1])); + *(__be32 *)(cf->data + 0) = cpu_to_be32(priv->read(&mb->data[0])); + *(__be32 *)(cf->data + 4) = cpu_to_be32(priv->read(&mb->data[1])); /* mark as read */ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { /* Clear IRQ */ if (n < 32) - flexcan_write(BIT(n), ®s->iflag1); + priv->write(BIT(n), ®s->iflag1); else - flexcan_write(BIT(n - 32), ®s->iflag2); + priv->write(BIT(n - 32), ®s->iflag2); } else { - flexcan_write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, ®s->iflag1); - flexcan_read(®s->timer); + priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, ®s->iflag1); + priv->read(®s->timer); } return 1; @@ -719,8 +736,8 @@ static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv) struct flexcan_regs __iomem *regs = priv->regs; u32 iflag1, iflag2; - iflag2 = flexcan_read(®s->iflag2) & priv->reg_imask2_default; - iflag1 = flexcan_read(®s->iflag1) & priv->reg_imask1_default & + iflag2 = priv->read(®s->iflag2) & priv->reg_imask2_default; + iflag1 = priv->read(®s->iflag1) & priv->reg_imask1_default & ~FLEXCAN_IFLAG_MB(priv->tx_mb_idx); return (u64)iflag2 << 32 | iflag1; @@ -736,7 +753,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) u32 reg_iflag1, reg_esr; enum can_state last_state = priv->can.state; - reg_iflag1 = flexcan_read(®s->iflag1); + reg_iflag1 = priv->read(®s->iflag1); /* reception interrupt */ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { @@ -759,7 +776,8 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) /* FIFO overflow interrupt */ if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) { handled = IRQ_HANDLED; - flexcan_write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, ®s->iflag1); + priv->write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, + ®s->iflag1); dev->stats.rx_over_errors++; dev->stats.rx_errors++; } @@ -773,18 +791,18 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) can_led_event(dev, CAN_LED_EVENT_TX); /* after sending a RTR frame MB is in RX mode */ - flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, - &priv->tx_mb->can_ctrl); - flexcan_write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), ®s->iflag1); + priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, + &priv->tx_mb->can_ctrl); + priv->write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), ®s->iflag1); netif_wake_queue(dev); } - reg_esr = flexcan_read(®s->esr); + reg_esr = priv->read(®s->esr); /* ACK all bus error and state change IRQ sources */ if (reg_esr & FLEXCAN_ESR_ALL_INT) { handled = IRQ_HANDLED; - flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, ®s->esr); + priv->write(reg_esr & FLEXCAN_ESR_ALL_INT, ®s->esr); } /* state change interrupt or broken error state quirk fix is enabled */ @@ -846,7 +864,7 @@ static void flexcan_set_bittiming(struct net_device *dev) struct flexcan_regs __iomem *regs = priv->regs; u32 reg; - reg = flexcan_read(®s->ctrl); + reg = priv->read(®s->ctrl); reg &= ~(FLEXCAN_CTRL_PRESDIV(0xff) | FLEXCAN_CTRL_RJW(0x3) | FLEXCAN_CTRL_PSEG1(0x7) | @@ -870,11 +888,11 @@ static void flexcan_set_bittiming(struct net_device *dev) reg |= FLEXCAN_CTRL_SMP; netdev_dbg(dev, "writing ctrl=0x%08x\n", reg); - flexcan_write(reg, ®s->ctrl); + priv->write(reg, ®s->ctrl); /* print chip status */ netdev_dbg(dev, "%s: mcr=0x%08x ctrl=0x%08x\n", __func__, - flexcan_read(®s->mcr), flexcan_read(®s->ctrl)); + priv->read(®s->mcr), priv->read(®s->ctrl)); } /* flexcan_chip_start @@ -913,7 +931,7 @@ static int flexcan_chip_start(struct net_device *dev) * choose format C * set max mailbox number */ - reg_mcr = flexcan_read(®s->mcr); + reg_mcr = priv->read(®s->mcr); reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff); reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ | @@ -927,7 +945,7 @@ static int flexcan_chip_start(struct net_device *dev) FLEXCAN_MCR_MAXMB(priv->tx_mb_idx); } netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr); - flexcan_write(reg_mcr, ®s->mcr); + priv->write(reg_mcr, ®s->mcr); /* CTRL * @@ -940,7 +958,7 @@ static int flexcan_chip_start(struct net_device *dev) * enable bus off interrupt * (== FLEXCAN_CTRL_ERR_STATE) */ - reg_ctrl = flexcan_read(®s->ctrl); + reg_ctrl = priv->read(®s->ctrl); reg_ctrl &= ~FLEXCAN_CTRL_TSYN; reg_ctrl |= FLEXCAN_CTRL_BOFF_REC | FLEXCAN_CTRL_LBUF | FLEXCAN_CTRL_ERR_STATE; @@ -960,45 +978,45 @@ static int flexcan_chip_start(struct net_device *dev) /* leave interrupts disabled for now */ reg_ctrl &= ~FLEXCAN_CTRL_ERR_ALL; netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); - flexcan_write(reg_ctrl, ®s->ctrl); + priv->write(reg_ctrl, ®s->ctrl); if ((priv->devtype_data->quirks & FLEXCAN_QUIRK_ENABLE_EACEN_RRS)) { - reg_ctrl2 = flexcan_read(®s->ctrl2); + reg_ctrl2 = priv->read(®s->ctrl2); reg_ctrl2 |= FLEXCAN_CTRL2_EACEN | FLEXCAN_CTRL2_RRS; - flexcan_write(reg_ctrl2, ®s->ctrl2); + priv->write(reg_ctrl2, ®s->ctrl2); } /* clear and invalidate all mailboxes first */ for (i = priv->tx_mb_idx; i < ARRAY_SIZE(regs->mb); i++) { - flexcan_write(FLEXCAN_MB_CODE_RX_INACTIVE, - ®s->mb[i].can_ctrl); + priv->write(FLEXCAN_MB_CODE_RX_INACTIVE, + ®s->mb[i].can_ctrl); } if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) - flexcan_write(FLEXCAN_MB_CODE_RX_EMPTY, - ®s->mb[i].can_ctrl); + priv->write(FLEXCAN_MB_CODE_RX_EMPTY, + ®s->mb[i].can_ctrl); } /* Errata ERR005829: mark first TX mailbox as INACTIVE */ - flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, - &priv->tx_mb_reserved->can_ctrl); + priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, + &priv->tx_mb_reserved->can_ctrl); /* mark TX mailbox as INACTIVE */ - flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, - &priv->tx_mb->can_ctrl); + priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, + &priv->tx_mb->can_ctrl); /* acceptance mask/acceptance code (accept everything) */ - flexcan_write(0x0, ®s->rxgmask); - flexcan_write(0x0, ®s->rx14mask); - flexcan_write(0x0, ®s->rx15mask); + priv->write(0x0, ®s->rxgmask); + priv->write(0x0, ®s->rx14mask); + priv->write(0x0, ®s->rx15mask); if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_RXFG) - flexcan_write(0x0, ®s->rxfgmask); + priv->write(0x0, ®s->rxfgmask); /* clear acceptance filters */ for (i = 0; i < ARRAY_SIZE(regs->mb); i++) - flexcan_write(0, ®s->rximr[i]); + priv->write(0, ®s->rximr[i]); /* On Vybrid, disable memory error detection interrupts * and freeze mode. @@ -1011,16 +1029,16 @@ static int flexcan_chip_start(struct net_device *dev) * and Correction of Memory Errors" to write to * MECR register */ - reg_ctrl2 = flexcan_read(®s->ctrl2); + reg_ctrl2 = priv->read(®s->ctrl2); reg_ctrl2 |= FLEXCAN_CTRL2_ECRWRE; - flexcan_write(reg_ctrl2, ®s->ctrl2); + priv->write(reg_ctrl2, ®s->ctrl2); - reg_mecr = flexcan_read(®s->mecr); + reg_mecr = priv->read(®s->mecr); reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS; - flexcan_write(reg_mecr, ®s->mecr); + priv->write(reg_mecr, ®s->mecr); reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK | FLEXCAN_MECR_FANCEI_MSK); - flexcan_write(reg_mecr, ®s->mecr); + priv->write(reg_mecr, ®s->mecr); } err = flexcan_transceiver_enable(priv); @@ -1036,14 +1054,14 @@ static int flexcan_chip_start(struct net_device *dev) /* enable interrupts atomically */ disable_irq(dev->irq); - flexcan_write(priv->reg_ctrl_default, ®s->ctrl); - flexcan_write(priv->reg_imask1_default, ®s->imask1); - flexcan_write(priv->reg_imask2_default, ®s->imask2); + priv->write(priv->reg_ctrl_default, ®s->ctrl); + priv->write(priv->reg_imask1_default, ®s->imask1); + priv->write(priv->reg_imask2_default, ®s->imask2); enable_irq(dev->irq); /* print chip status */ netdev_dbg(dev, "%s: reading mcr=0x%08x ctrl=0x%08x\n", __func__, - flexcan_read(®s->mcr), flexcan_read(®s->ctrl)); + priv->read(®s->mcr), priv->read(®s->ctrl)); return 0; @@ -1068,10 +1086,10 @@ static void flexcan_chip_stop(struct net_device *dev) flexcan_chip_disable(priv); /* Disable all interrupts */ - flexcan_write(0, ®s->imask2); - flexcan_write(0, ®s->imask1); - flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL, - ®s->ctrl); + priv->write(0, ®s->imask2); + priv->write(0, ®s->imask1); + priv->write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL, + ®s->ctrl); flexcan_transceiver_disable(priv); priv->can.state = CAN_STATE_STOPPED; @@ -1186,26 +1204,26 @@ static int register_flexcandev(struct net_device *dev) err = flexcan_chip_disable(priv); if (err) goto out_disable_per; - reg = flexcan_read(®s->ctrl); + reg = priv->read(®s->ctrl); reg |= FLEXCAN_CTRL_CLK_SRC; - flexcan_write(reg, ®s->ctrl); + priv->write(reg, ®s->ctrl); err = flexcan_chip_enable(priv); if (err) goto out_chip_disable; /* set freeze, halt and activate FIFO, restrict register access */ - reg = flexcan_read(®s->mcr); + reg = priv->read(®s->mcr); reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV; - flexcan_write(reg, ®s->mcr); + priv->write(reg, ®s->mcr); /* Currently we only support newer versions of this core * featuring a RX hardware FIFO (although this driver doesn't * make use of it on some cores). Older cores, found on some * Coldfire derivates are not tested. */ - reg = flexcan_read(®s->mcr); + reg = priv->read(®s->mcr); if (!(reg & FLEXCAN_MCR_FEN)) { netdev_err(dev, "Could not enable RX FIFO, unsupported core\n"); err = -ENODEV; @@ -1233,8 +1251,12 @@ static void unregister_flexcandev(struct net_device *dev) static const struct of_device_id flexcan_of_match[] = { { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, }, { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, }, + { .compatible = "fsl,imx53-flexcan", .data = &fsl_p1010_devtype_data, }, + { .compatible = "fsl,imx35-flexcan", .data = &fsl_p1010_devtype_data, }, + { .compatible = "fsl,imx25-flexcan", .data = &fsl_p1010_devtype_data, }, { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, }, { .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, }, + { .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, flexcan_of_match); @@ -1314,6 +1336,21 @@ static int flexcan_probe(struct platform_device *pdev) dev->flags |= IFF_ECHO; priv = netdev_priv(dev); + + if (of_property_read_bool(pdev->dev.of_node, "big-endian")) { + priv->read = flexcan_read_be; + priv->write = flexcan_write_be; + } else { + if (of_device_is_compatible(pdev->dev.of_node, + "fsl,p1010-flexcan")) { + priv->read = flexcan_read_be; + priv->write = flexcan_write_be; + } else { + priv->read = flexcan_read_le; + priv->write = flexcan_write_le; + } + } + priv->can.clock.freq = clock_freq; priv->can.bittiming_const = &flexcan_bittiming_const; priv->can.do_set_mode = flexcan_set_mode; diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index f4947a74b65f..2594f7779c6f 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -23,6 +23,7 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/iopoll.h> #include <linux/can/dev.h> @@ -126,6 +127,12 @@ enum m_can_mram_cfg { #define DBTP_DSJW_SHIFT 0 #define DBTP_DSJW_MASK (0xf << DBTP_DSJW_SHIFT) +/* Transmitter Delay Compensation Register (TDCR) */ +#define TDCR_TDCO_SHIFT 8 +#define TDCR_TDCO_MASK (0x7F << TDCR_TDCO_SHIFT) +#define TDCR_TDCF_SHIFT 0 +#define TDCR_TDCF_MASK (0x7F << TDCR_TDCF_SHIFT) + /* Test Register (TEST) */ #define TEST_LBCK BIT(4) @@ -625,21 +632,16 @@ static int m_can_clk_start(struct m_can_priv *priv) { int err; - err = clk_prepare_enable(priv->hclk); + err = pm_runtime_get_sync(priv->device); if (err) - return err; - - err = clk_prepare_enable(priv->cclk); - if (err) - clk_disable_unprepare(priv->hclk); + pm_runtime_put_noidle(priv->device); return err; } static void m_can_clk_stop(struct m_can_priv *priv) { - clk_disable_unprepare(priv->cclk); - clk_disable_unprepare(priv->hclk); + pm_runtime_put_sync(priv->device); } static int m_can_get_berr_counter(const struct net_device *dev, @@ -987,13 +989,47 @@ static int m_can_set_bittiming(struct net_device *dev) m_can_write(priv, M_CAN_NBTP, reg_btp); if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { + reg_btp = 0; brp = dbt->brp - 1; sjw = dbt->sjw - 1; tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1; tseg2 = dbt->phase_seg2 - 1; - reg_btp = (brp << DBTP_DBRP_SHIFT) | (sjw << DBTP_DSJW_SHIFT) | - (tseg1 << DBTP_DTSEG1_SHIFT) | - (tseg2 << DBTP_DTSEG2_SHIFT); + + /* TDC is only needed for bitrates beyond 2.5 MBit/s. + * This is mentioned in the "Bit Time Requirements for CAN FD" + * paper presented at the International CAN Conference 2013 + */ + if (dbt->bitrate > 2500000) { + u32 tdco, ssp; + + /* Use the same value of secondary sampling point + * as the data sampling point + */ + ssp = dbt->sample_point; + + /* Equation based on Bosch's M_CAN User Manual's + * Transmitter Delay Compensation Section + */ + tdco = (priv->can.clock.freq / 1000) * + ssp / dbt->bitrate; + + /* Max valid TDCO value is 127 */ + if (tdco > 127) { + netdev_warn(dev, "TDCO value of %u is beyond maximum. Using maximum possible value\n", + tdco); + tdco = 127; + } + + reg_btp |= DBTP_TDC; + m_can_write(priv, M_CAN_TDCR, + tdco << TDCR_TDCO_SHIFT); + } + + reg_btp |= (brp << DBTP_DBRP_SHIFT) | + (sjw << DBTP_DSJW_SHIFT) | + (tseg1 << DBTP_DTSEG1_SHIFT) | + (tseg2 << DBTP_DTSEG2_SHIFT); + m_can_write(priv, M_CAN_DBTP, reg_btp); } @@ -1143,11 +1179,6 @@ static int m_can_set_mode(struct net_device *dev, enum can_mode mode) return 0; } -static void free_m_can_dev(struct net_device *dev) -{ - free_candev(dev); -} - /* Checks core release number of M_CAN * returns 0 if an unsupported device is detected * else it returns the release and step coded as: @@ -1207,31 +1238,20 @@ static bool m_can_niso_supported(const struct m_can_priv *priv) return !niso_timeout; } -static struct net_device *alloc_m_can_dev(struct platform_device *pdev, - void __iomem *addr, u32 tx_fifo_size) +static int m_can_dev_setup(struct platform_device *pdev, struct net_device *dev, + void __iomem *addr) { - struct net_device *dev; struct m_can_priv *priv; int m_can_version; - unsigned int echo_buffer_count; m_can_version = m_can_check_core_release(addr); /* return if unsupported version */ if (!m_can_version) { - dev = NULL; - goto return_dev; + dev_err(&pdev->dev, "Unsupported version number: %2d", + m_can_version); + return -EINVAL; } - /* If version < 3.1.x, then only one echo buffer is used */ - echo_buffer_count = ((m_can_version == 30) - ? 1U - : (unsigned int)tx_fifo_size); - - dev = alloc_candev(sizeof(*priv), echo_buffer_count); - if (!dev) { - dev = NULL; - goto return_dev; - } priv = netdev_priv(dev); netif_napi_add(dev, &priv->napi, m_can_poll, M_CAN_NAPI_WEIGHT); @@ -1273,16 +1293,12 @@ static struct net_device *alloc_m_can_dev(struct platform_device *pdev, : 0); break; default: - /* Unsupported device: free candev */ - free_m_can_dev(dev); dev_err(&pdev->dev, "Unsupported version number: %2d", priv->version); - dev = NULL; - break; + return -EINVAL; } -return_dev: - return dev; + return 0; } static int m_can_open(struct net_device *dev) @@ -1574,37 +1590,26 @@ static int m_can_plat_probe(struct platform_device *pdev) goto failed_ret; } - /* Enable clocks. Necessary to read Core Release in order to determine - * M_CAN version - */ - ret = clk_prepare_enable(hclk); - if (ret) - goto disable_hclk_ret; - - ret = clk_prepare_enable(cclk); - if (ret) - goto disable_cclk_ret; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can"); addr = devm_ioremap_resource(&pdev->dev, res); irq = platform_get_irq_byname(pdev, "int0"); if (IS_ERR(addr) || irq < 0) { ret = -EINVAL; - goto disable_cclk_ret; + goto failed_ret; } /* message ram could be shared */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram"); if (!res) { ret = -ENODEV; - goto disable_cclk_ret; + goto failed_ret; } mram_addr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!mram_addr) { ret = -ENOMEM; - goto disable_cclk_ret; + goto failed_ret; } /* get message ram configuration */ @@ -1613,7 +1618,7 @@ static int m_can_plat_probe(struct platform_device *pdev) sizeof(mram_config_vals) / 4); if (ret) { dev_err(&pdev->dev, "Could not get Message RAM configuration."); - goto disable_cclk_ret; + goto failed_ret; } /* Get TX FIFO size @@ -1622,11 +1627,12 @@ static int m_can_plat_probe(struct platform_device *pdev) tx_fifo_size = mram_config_vals[7]; /* allocate the m_can device */ - dev = alloc_m_can_dev(pdev, addr, tx_fifo_size); + dev = alloc_candev(sizeof(*priv), tx_fifo_size); if (!dev) { ret = -ENOMEM; - goto disable_cclk_ret; + goto failed_ret; } + priv = netdev_priv(dev); dev->irq = irq; priv->device = &pdev->dev; @@ -1640,30 +1646,42 @@ static int m_can_plat_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); + /* Enable clocks. Necessary to read Core Release in order to determine + * M_CAN version + */ + pm_runtime_enable(&pdev->dev); + ret = m_can_clk_start(priv); + if (ret) + goto pm_runtime_fail; + + ret = m_can_dev_setup(pdev, dev, addr); + if (ret) + goto clk_disable; + ret = register_m_can_dev(dev); if (ret) { dev_err(&pdev->dev, "registering %s failed (err=%d)\n", KBUILD_MODNAME, ret); - goto failed_free_dev; + goto clk_disable; } devm_can_led_init(dev); + of_can_transceiver(dev); + dev_info(&pdev->dev, "%s device registered (irq=%d, version=%d)\n", KBUILD_MODNAME, dev->irq, priv->version); /* Probe finished * Stop clocks. They will be reactivated once the M_CAN device is opened */ - - goto disable_cclk_ret; - -failed_free_dev: - free_m_can_dev(dev); -disable_cclk_ret: - clk_disable_unprepare(cclk); -disable_hclk_ret: - clk_disable_unprepare(hclk); +clk_disable: + m_can_clk_stop(priv); +pm_runtime_fail: + if (ret) { + pm_runtime_disable(&pdev->dev); + free_candev(dev); + } failed_ret: return ret; } @@ -1721,14 +1739,47 @@ static int m_can_plat_remove(struct platform_device *pdev) struct net_device *dev = platform_get_drvdata(pdev); unregister_m_can_dev(dev); + + pm_runtime_disable(&pdev->dev); + platform_set_drvdata(pdev, NULL); - free_m_can_dev(dev); + free_candev(dev); return 0; } +static int __maybe_unused m_can_runtime_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct m_can_priv *priv = netdev_priv(ndev); + + clk_disable_unprepare(priv->cclk); + clk_disable_unprepare(priv->hclk); + + return 0; +} + +static int __maybe_unused m_can_runtime_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct m_can_priv *priv = netdev_priv(ndev); + int err; + + err = clk_prepare_enable(priv->hclk); + if (err) + return err; + + err = clk_prepare_enable(priv->cclk); + if (err) + clk_disable_unprepare(priv->hclk); + + return err; +} + static const struct dev_pm_ops m_can_pmops = { + SET_RUNTIME_PM_OPS(m_can_runtime_suspend, + m_can_runtime_resume, NULL) SET_SYSTEM_SLEEP_PM_OPS(m_can_suspend, m_can_resume) }; diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c index f394f77d7528..d94dae216820 100644 --- a/drivers/net/can/rx-offload.c +++ b/drivers/net/can/rx-offload.c @@ -256,7 +256,7 @@ int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload * weight = offload->mb_first - offload->mb_last; } - return can_rx_offload_init_queue(dev, offload, weight);; + return can_rx_offload_init_queue(dev, offload, weight); } EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp); diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index 5d067c1b987f..89d60d8e467c 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -508,7 +508,7 @@ static void slc_sync(void) } /* Find a free SLCAN channel, and link in this `tty' line. */ -static struct slcan *slc_alloc(dev_t line) +static struct slcan *slc_alloc(void) { int i; char name[IFNAMSIZ]; @@ -583,7 +583,7 @@ static int slcan_open(struct tty_struct *tty) /* OK. Find a free SLCAN channel to use. */ err = -ENFILE; - sl = slc_alloc(tty_devnum(tty)); + sl = slc_alloc(); if (sl == NULL) goto err_exit; diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index f3f05fea8e1f..98d118b3aaf4 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -612,8 +612,7 @@ static int mcp251x_do_set_bittiming(struct net_device *net) return 0; } -static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv, - struct spi_device *spi) +static int mcp251x_setup(struct net_device *net, struct spi_device *spi) { mcp251x_do_set_bittiming(net); @@ -775,7 +774,7 @@ static void mcp251x_restart_work_handler(struct work_struct *ws) mutex_lock(&priv->mcp_lock); if (priv->after_suspend) { mcp251x_hw_reset(spi); - mcp251x_setup(net, priv, spi); + mcp251x_setup(net, spi); if (priv->after_suspend & AFTER_SUSPEND_RESTART) { mcp251x_set_normal_mode(spi); } else if (priv->after_suspend & AFTER_SUSPEND_UP) { @@ -971,7 +970,7 @@ static int mcp251x_open(struct net_device *net) mcp251x_open_clean(net); goto open_unlock; } - ret = mcp251x_setup(net, priv, spi); + ret = mcp251x_setup(net, spi); if (ret) { mcp251x_open_clean(net); goto open_unlock; diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 8bf80ad9dc44..17c21ad3b95e 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -243,7 +243,7 @@ static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev, return NULL; } -static int gs_cmd_reset(struct gs_usb *gsusb, struct gs_can *gsdev) +static int gs_cmd_reset(struct gs_can *gsdev) { struct gs_device_mode *dm; struct usb_interface *intf = gsdev->iface; @@ -709,7 +709,7 @@ static int gs_can_close(struct net_device *netdev) atomic_set(&dev->active_tx_urbs, 0); /* reset the device */ - rc = gs_cmd_reset(parent, dev); + rc = gs_cmd_reset(dev); if (rc < 0) netdev_warn(netdev, "Couldn't shutdown device (err=%d)", rc); diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index 25a9b79cc42d..f530a80f5051 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -408,7 +408,6 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n, { struct sk_buff *skb; struct can_frame *cf; - struct timeval tv; enum can_state new_state; /* ignore this error until 1st ts received */ @@ -525,8 +524,8 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n, if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) { struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb); - peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv); - hwts->hwtstamp = timeval_to_ktime(tv); + peak_usb_get_ts_time(&mc->pdev->time_ref, mc->ts16, + &hwts->hwtstamp); } mc->netdev->stats.rx_packets++; @@ -610,7 +609,6 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) u8 rec_len = status_len & PCAN_USB_STATUSLEN_DLC; struct sk_buff *skb; struct can_frame *cf; - struct timeval tv; struct skb_shared_hwtstamps *hwts; skb = alloc_can_skb(mc->netdev, &cf); @@ -658,9 +656,8 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) } /* convert timestamp into kernel time */ - peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv); hwts = skb_hwtstamps(skb); - hwts->hwtstamp = timeval_to_ktime(tv); + peak_usb_get_ts_time(&mc->pdev->time_ref, mc->ts16, &hwts->hwtstamp); /* update statistics */ mc->netdev->stats.rx_packets++; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index 1ca76e03e965..50e911428638 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -80,21 +80,6 @@ void peak_usb_init_time_ref(struct peak_time_ref *time_ref, } } -static void peak_usb_add_us(struct timeval *tv, u32 delta_us) -{ - /* number of s. to add to final time */ - u32 delta_s = delta_us / 1000000; - - delta_us -= delta_s * 1000000; - - tv->tv_usec += delta_us; - if (tv->tv_usec >= 1000000) { - tv->tv_usec -= 1000000; - delta_s++; - } - tv->tv_sec += delta_s; -} - /* * sometimes, another now may be more recent than current one... */ @@ -103,7 +88,7 @@ void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now) time_ref->ts_dev_2 = ts_now; /* should wait at least two passes before computing */ - if (time_ref->tv_host.tv_sec > 0) { + if (ktime_to_ns(time_ref->tv_host) > 0) { u32 delta_ts = time_ref->ts_dev_2 - time_ref->ts_dev_1; if (time_ref->ts_dev_2 < time_ref->ts_dev_1) @@ -118,26 +103,26 @@ void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now) */ void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now) { - if (time_ref->tv_host_0.tv_sec == 0) { + if (ktime_to_ns(time_ref->tv_host_0) == 0) { /* use monotonic clock to correctly compute further deltas */ - time_ref->tv_host_0 = ktime_to_timeval(ktime_get()); - time_ref->tv_host.tv_sec = 0; + time_ref->tv_host_0 = ktime_get(); + time_ref->tv_host = ktime_set(0, 0); } else { /* - * delta_us should not be >= 2^32 => delta_s should be < 4294 + * delta_us should not be >= 2^32 => delta should be < 4294s * handle 32-bits wrapping here: if count of s. reaches 4200, * reset counters and change time base */ - if (time_ref->tv_host.tv_sec != 0) { - u32 delta_s = time_ref->tv_host.tv_sec - - time_ref->tv_host_0.tv_sec; - if (delta_s > 4200) { + if (ktime_to_ns(time_ref->tv_host)) { + ktime_t delta = ktime_sub(time_ref->tv_host, + time_ref->tv_host_0); + if (ktime_to_ns(delta) > (4200ull * NSEC_PER_SEC)) { time_ref->tv_host_0 = time_ref->tv_host; time_ref->ts_total = 0; } } - time_ref->tv_host = ktime_to_timeval(ktime_get()); + time_ref->tv_host = ktime_get(); time_ref->tick_count++; } @@ -146,13 +131,12 @@ void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now) } /* - * compute timeval according to current ts and time_ref data + * compute time according to current ts and time_ref data */ -void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts, - struct timeval *tv) +void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *time) { - /* protect from getting timeval before setting now */ - if (time_ref->tv_host.tv_sec > 0) { + /* protect from getting time before setting now */ + if (ktime_to_ns(time_ref->tv_host)) { u64 delta_us; delta_us = ts - time_ref->ts_dev_2; @@ -164,10 +148,9 @@ void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts, delta_us *= time_ref->adapter->us_per_ts_scale; delta_us >>= time_ref->adapter->us_per_ts_shift; - *tv = time_ref->tv_host_0; - peak_usb_add_us(tv, (u32)delta_us); + *time = ktime_add_us(time_ref->tv_host_0, delta_us); } else { - *tv = ktime_to_timeval(ktime_get()); + *time = ktime_get(); } } @@ -175,13 +158,11 @@ void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts, * post received skb after having set any hw timestamp */ int peak_usb_netif_rx(struct sk_buff *skb, - struct peak_time_ref *time_ref, u32 ts_low, u32 ts_high) + struct peak_time_ref *time_ref, u32 ts_low) { struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb); - struct timeval tv; - peak_usb_get_ts_tv(time_ref, ts_low, &tv); - hwts->hwtstamp = timeval_to_ktime(tv); + peak_usb_get_ts_time(time_ref, ts_low, &hwts->hwtstamp); return netif_rx(skb); } diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h index c01316cac354..fb23489e1cb8 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h @@ -96,7 +96,7 @@ extern const struct peak_usb_adapter pcan_usb_pro_fd; extern const struct peak_usb_adapter pcan_usb_x6; struct peak_time_ref { - struct timeval tv_host_0, tv_host; + ktime_t tv_host_0, tv_host; u32 ts_dev_1, ts_dev_2; u64 ts_total; u32 tick_count; @@ -151,10 +151,9 @@ void peak_usb_init_time_ref(struct peak_time_ref *time_ref, const struct peak_usb_adapter *adapter); void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now); void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now); -void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts, - struct timeval *tv); +void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *tv); int peak_usb_netif_rx(struct sk_buff *skb, - struct peak_time_ref *time_ref, u32 ts_low, u32 ts_high); + struct peak_time_ref *time_ref, u32 ts_low); void peak_usb_async_complete(struct urb *urb); void peak_usb_restart_complete(struct peak_usb_device *dev); diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index 53d6bb045e9e..dd161c5eea8e 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c @@ -514,8 +514,7 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if, else memcpy(cfd->data, rm->d, cfd->len); - peak_usb_netif_rx(skb, &usb_if->time_ref, - le32_to_cpu(rm->ts_low), le32_to_cpu(rm->ts_high)); + peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low)); netdev->stats.rx_packets++; netdev->stats.rx_bytes += cfd->len; @@ -575,8 +574,7 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if, if (!skb) return -ENOMEM; - peak_usb_netif_rx(skb, &usb_if->time_ref, - le32_to_cpu(sm->ts_low), le32_to_cpu(sm->ts_high)); + peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low)); netdev->stats.rx_packets++; netdev->stats.rx_bytes += cf->can_dlc; @@ -618,8 +616,7 @@ static int pcan_usb_fd_decode_overrun(struct pcan_usb_fd_if *usb_if, cf->can_id |= CAN_ERR_CRTL; cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; - peak_usb_netif_rx(skb, &usb_if->time_ref, - le32_to_cpu(ov->ts_low), le32_to_cpu(ov->ts_high)); + peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(ov->ts_low)); netdev->stats.rx_over_errors++; netdev->stats.rx_errors++; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index bbdd6058cd2f..0105fbfea273 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c @@ -531,7 +531,6 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if, struct net_device *netdev = dev->netdev; struct can_frame *can_frame; struct sk_buff *skb; - struct timeval tv; struct skb_shared_hwtstamps *hwts; skb = alloc_can_skb(netdev, &can_frame); @@ -549,9 +548,9 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if, else memcpy(can_frame->data, rx->data, can_frame->can_dlc); - peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(rx->ts32), &tv); hwts = skb_hwtstamps(skb); - hwts->hwtstamp = timeval_to_ktime(tv); + peak_usb_get_ts_time(&usb_if->time_ref, le32_to_cpu(rx->ts32), + &hwts->hwtstamp); netdev->stats.rx_packets++; netdev->stats.rx_bytes += can_frame->can_dlc; @@ -571,7 +570,6 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if, enum can_state new_state = CAN_STATE_ERROR_ACTIVE; u8 err_mask = 0; struct sk_buff *skb; - struct timeval tv; struct skb_shared_hwtstamps *hwts; /* nothing should be sent while in BUS_OFF state */ @@ -667,9 +665,8 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if, dev->can.state = new_state; - peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv); hwts = skb_hwtstamps(skb); - hwts->hwtstamp = timeval_to_ktime(tv); + peak_usb_get_ts_time(&usb_if->time_ref, le32_to_cpu(er->ts32), &hwts->hwtstamp); netdev->stats.rx_packets++; netdev->stats.rx_bytes += can_frame->can_dlc; netif_rx(skb); diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c index a8cb33264ff1..c2b04f505e16 100644 --- a/drivers/net/can/vcan.c +++ b/drivers/net/can/vcan.c @@ -61,7 +61,7 @@ MODULE_ALIAS_RTNL_LINK(DRV_NAME); /* * CAN test feature: * Enable the echo on driver level for testing the CAN core echo modes. - * See Documentation/networking/can.txt for details. + * See Documentation/networking/can.rst for details. */ static bool echo; /* echo testing. Default: 0 (Off) */ diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index b4c4a2c76437..ed6828821fbd 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c @@ -227,10 +227,8 @@ static int vxcan_newlink(struct net *net, struct net_device *dev, netif_carrier_off(peer); err = rtnl_configure_link(peer, ifmp); - if (err < 0) { - unregister_netdevice(peer); - return err; - } + if (err < 0) + goto unregister_network_device; /* register first device */ if (tb[IFLA_IFNAME]) @@ -239,10 +237,8 @@ static int vxcan_newlink(struct net *net, struct net_device *dev, snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d"); err = register_netdevice(dev); - if (err < 0) { - unregister_netdevice(peer); - return err; - } + if (err < 0) + goto unregister_network_device; netif_carrier_off(dev); @@ -254,6 +250,10 @@ static int vxcan_newlink(struct net *net, struct net_device *dev, rcu_assign_pointer(priv->peer, dev); return 0; + +unregister_network_device: + unregister_netdevice(peer); + return err; } static void vxcan_dellink(struct net_device *dev, struct list_head *head) diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 83a9bc892a3b..2b81b97e994f 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -33,7 +33,7 @@ config NET_DSA_MT7530 config NET_DSA_MV88E6060 tristate "Marvell 88E6060 ethernet switch chip support" - depends on NET_DSA + depends on NET_DSA && NET_DSA_LEGACY select NET_DSA_TAG_TRAILER ---help--- This enables support for the Marvell 88E6060 ethernet switch diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 4498ab897d94..db830a1141d9 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1029,8 +1029,7 @@ int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) EXPORT_SYMBOL(b53_vlan_filtering); int b53_vlan_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct switchdev_trans *trans) + const struct switchdev_obj_port_vlan *vlan) { struct b53_device *dev = ds->priv; @@ -1047,8 +1046,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port, EXPORT_SYMBOL(b53_vlan_prepare); void b53_vlan_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct switchdev_trans *trans) + const struct switchdev_obj_port_vlan *vlan) { struct b53_device *dev = ds->priv; bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; @@ -1495,8 +1493,7 @@ static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port) return false; } -static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, - int port) +enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port) { struct b53_device *dev = ds->priv; @@ -1517,6 +1514,7 @@ static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, return DSA_TAG_PROTO_BRCM; } +EXPORT_SYMBOL(b53_get_tag_protocol); int b53_mirror_add(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror, bool ingress) diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index daaaa1ecb996..d954cf36ecd8 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -295,11 +295,9 @@ void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state); void b53_br_fast_age(struct dsa_switch *ds, int port); int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering); int b53_vlan_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct switchdev_trans *trans); + const struct switchdev_obj_port_vlan *vlan); void b53_vlan_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct switchdev_trans *trans); + const struct switchdev_obj_port_vlan *vlan); int b53_vlan_del(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan); int b53_fdb_add(struct dsa_switch *ds, int port, @@ -310,6 +308,7 @@ int b53_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb, void *data); int b53_mirror_add(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror, bool ingress); +enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port); void b53_mirror_del(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror); int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy); diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index b62d47210db8..0378eded31f2 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -34,12 +34,6 @@ #include "b53/b53_priv.h" #include "b53/b53_regs.h" -static enum dsa_tag_protocol bcm_sf2_sw_get_tag_protocol(struct dsa_switch *ds, - int port) -{ - return DSA_TAG_PROTO_BRCM; -} - static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); @@ -860,7 +854,7 @@ static const struct b53_io_ops bcm_sf2_io_ops = { }; static const struct dsa_switch_ops bcm_sf2_ops = { - .get_tag_protocol = bcm_sf2_sw_get_tag_protocol, + .get_tag_protocol = b53_get_tag_protocol, .setup = bcm_sf2_sw_setup, .get_strings = b53_get_strings, .get_ethtool_stats = b53_get_ethtool_stats, @@ -954,6 +948,9 @@ static const struct of_device_id bcm_sf2_of_match[] = { { .compatible = "brcm,bcm7278-switch-v4.0", .data = &bcm_sf2_7278_data }, + { .compatible = "brcm,bcm7278-switch-v4.8", + .data = &bcm_sf2_7278_data + }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, bcm_sf2_of_match); diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index bb71d3d6f65b..7aa84ee4e771 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -174,9 +174,9 @@ static int dsa_loop_port_vlan_filtering(struct dsa_switch *ds, int port, return 0; } -static int dsa_loop_port_vlan_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct switchdev_trans *trans) +static int +dsa_loop_port_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) { struct dsa_loop_priv *ps = ds->priv; struct mii_bus *bus = ps->bus; @@ -193,8 +193,7 @@ static int dsa_loop_port_vlan_prepare(struct dsa_switch *ds, int port, } static void dsa_loop_port_vlan_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct switchdev_trans *trans) + const struct switchdev_obj_port_vlan *vlan) { bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index b24566bb74d2..6171c0853ff1 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -249,6 +249,28 @@ static int lan9303_read(struct regmap *regmap, unsigned int offset, u32 *reg) return -EIO; } +static int lan9303_read_wait(struct lan9303 *chip, int offset, u32 mask) +{ + int i; + + for (i = 0; i < 25; i++) { + u32 reg; + int ret; + + ret = lan9303_read(chip->regmap, offset, ®); + if (ret) { + dev_err(chip->dev, "%s failed to read offset %d: %d\n", + __func__, offset, ret); + return ret; + } + if (!(reg & mask)) + return 0; + usleep_range(1000, 2000); + } + + return -ETIMEDOUT; +} + static int lan9303_virt_phy_reg_read(struct lan9303 *chip, int regnum) { int ret; @@ -274,22 +296,8 @@ static int lan9303_virt_phy_reg_write(struct lan9303 *chip, int regnum, u16 val) static int lan9303_indirect_phy_wait_for_completion(struct lan9303 *chip) { - int ret, i; - u32 reg; - - for (i = 0; i < 25; i++) { - ret = lan9303_read(chip->regmap, LAN9303_PMI_ACCESS, ®); - if (ret) { - dev_err(chip->dev, - "Failed to read pmi access status: %d\n", ret); - return ret; - } - if (!(reg & LAN9303_PMI_ACCESS_MII_BUSY)) - return 0; - usleep_range(1000, 2000); - } - - return -EIO; + return lan9303_read_wait(chip, LAN9303_PMI_ACCESS, + LAN9303_PMI_ACCESS_MII_BUSY); } static int lan9303_indirect_phy_read(struct lan9303 *chip, int addr, int regnum) @@ -366,22 +374,8 @@ EXPORT_SYMBOL_GPL(lan9303_indirect_phy_ops); static int lan9303_switch_wait_for_completion(struct lan9303 *chip) { - int ret, i; - u32 reg; - - for (i = 0; i < 25; i++) { - ret = lan9303_read(chip->regmap, LAN9303_SWITCH_CSR_CMD, ®); - if (ret) { - dev_err(chip->dev, - "Failed to read csr command status: %d\n", ret); - return ret; - } - if (!(reg & LAN9303_SWITCH_CSR_CMD_BUSY)) - return 0; - usleep_range(1000, 2000); - } - - return -EIO; + return lan9303_read_wait(chip, LAN9303_SWITCH_CSR_CMD, + LAN9303_SWITCH_CSR_CMD_BUSY); } static int lan9303_write_switch_reg(struct lan9303 *chip, u16 regnum, u32 val) @@ -485,7 +479,8 @@ static int lan9303_detect_phy_setup(struct lan9303 *chip) { int reg; - /* depending on the 'phy_addr_sel_strap' setting, the three phys are + /* Calculate chip->phy_addr_base: + * Depending on the 'phy_addr_sel_strap' setting, the three phys are * using IDs 0-1-2 or IDs 1-2-3. We cannot read back the * 'phy_addr_sel_strap' setting directly, so we need a test, which * configuration is active: @@ -500,13 +495,10 @@ static int lan9303_detect_phy_setup(struct lan9303 *chip) return reg; } - if ((reg != 0) && (reg != 0xffff)) - chip->phy_addr_sel_strap = 1; - else - chip->phy_addr_sel_strap = 0; + chip->phy_addr_base = reg != 0 && reg != 0xffff; dev_dbg(chip->dev, "Phy setup '%s' detected\n", - chip->phy_addr_sel_strap ? "1-2-3" : "0-1-2"); + chip->phy_addr_base ? "1-2-3" : "0-1-2"); return 0; } @@ -546,20 +538,19 @@ lan9303_alr_cache_find_mac(struct lan9303 *chip, const u8 *mac_addr) return NULL; } -/* Wait a while until mask & reg == value. Otherwise return timeout. */ -static int lan9303_csr_reg_wait(struct lan9303 *chip, int regno, - int mask, char value) +static int lan9303_csr_reg_wait(struct lan9303 *chip, int regno, u32 mask) { int i; - for (i = 0; i < 0x1000; i++) { + for (i = 0; i < 25; i++) { u32 reg; lan9303_read_switch_reg(chip, regno, ®); - if ((reg & mask) == value) + if (!(reg & mask)) return 0; usleep_range(1000, 2000); } + return -ETIMEDOUT; } @@ -569,8 +560,7 @@ static int lan9303_alr_make_entry_raw(struct lan9303 *chip, u32 dat0, u32 dat1) lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_WR_DAT_1, dat1); lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, LAN9303_ALR_CMD_MAKE_ENTRY); - lan9303_csr_reg_wait(chip, LAN9303_SWE_ALR_CMD_STS, ALR_STS_MAKE_PEND, - 0); + lan9303_csr_reg_wait(chip, LAN9303_SWE_ALR_CMD_STS, ALR_STS_MAKE_PEND); lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0); return 0; @@ -583,6 +573,7 @@ static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx) { int i; + mutex_lock(&chip->alr_mutex); lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, LAN9303_ALR_CMD_GET_FIRST); lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0); @@ -606,6 +597,7 @@ static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx) LAN9303_ALR_CMD_GET_NEXT); lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0); } + mutex_unlock(&chip->alr_mutex); } static void alr_reg_to_mac(u32 dat0, u32 dat1, u8 mac[6]) @@ -694,16 +686,20 @@ static int lan9303_alr_add_port(struct lan9303 *chip, const u8 *mac, int port, { struct lan9303_alr_cache_entry *entr; + mutex_lock(&chip->alr_mutex); entr = lan9303_alr_cache_find_mac(chip, mac); if (!entr) { /*New entry */ entr = lan9303_alr_cache_find_free(chip); - if (!entr) + if (!entr) { + mutex_unlock(&chip->alr_mutex); return -ENOSPC; + } ether_addr_copy(entr->mac_addr, mac); } entr->port_map |= BIT(port); entr->stp_override = stp_override; lan9303_alr_set_entry(chip, mac, entr->port_map, stp_override); + mutex_unlock(&chip->alr_mutex); return 0; } @@ -713,15 +709,18 @@ static int lan9303_alr_del_port(struct lan9303 *chip, const u8 *mac, int port) { struct lan9303_alr_cache_entry *entr; + mutex_lock(&chip->alr_mutex); entr = lan9303_alr_cache_find_mac(chip, mac); if (!entr) - return 0; /* no static entry found */ + goto out; /* no static entry found */ entr->port_map &= ~BIT(port); if (entr->port_map == 0) /* zero means its free again */ eth_zero_addr(entr->mac_addr); lan9303_alr_set_entry(chip, mac, entr->port_map, entr->stp_override); +out: + mutex_unlock(&chip->alr_mutex); return 0; } @@ -818,18 +817,16 @@ static void lan9303_bridge_ports(struct lan9303 *chip) lan9303_alr_add_port(chip, eth_stp_addr, 0, true); } -static int lan9303_handle_reset(struct lan9303 *chip) +static void lan9303_handle_reset(struct lan9303 *chip) { if (!chip->reset_gpio) - return 0; + return; if (chip->reset_duration != 0) msleep(chip->reset_duration); /* release (deassert) reset and activate the device */ gpiod_set_value_cansleep(chip->reset_gpio, 0); - - return 0; } /* stop processing packets for all ports */ @@ -866,7 +863,7 @@ static int lan9303_check_device(struct lan9303 *chip) if ((reg >> 16) != LAN9303_CHIP_ID) { dev_err(chip->dev, "expecting LAN9303 chip, but found: %X\n", reg >> 16); - return ret; + return -ENODEV; } /* The default state of the LAN9303 device is to forward packets between @@ -1018,7 +1015,7 @@ static int lan9303_get_sset_count(struct dsa_switch *ds) static int lan9303_phy_read(struct dsa_switch *ds, int phy, int regnum) { struct lan9303 *chip = ds->priv; - int phy_base = chip->phy_addr_sel_strap; + int phy_base = chip->phy_addr_base; if (phy == phy_base) return lan9303_virt_phy_reg_read(chip, regnum); @@ -1032,7 +1029,7 @@ static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val) { struct lan9303 *chip = ds->priv; - int phy_base = chip->phy_addr_sel_strap; + int phy_base = chip->phy_addr_base; if (phy == phy_base) return lan9303_virt_phy_reg_write(chip, regnum, val); @@ -1069,7 +1066,7 @@ static void lan9303_adjust_link(struct dsa_switch *ds, int port, res = lan9303_phy_write(ds, port, MII_BMCR, ctl); - if (port == chip->phy_addr_sel_strap) { + if (port == chip->phy_addr_base) { /* Virtual Phy: Remove Turbo 200Mbit mode */ lan9303_read(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, &ctl); @@ -1093,8 +1090,7 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port, struct lan9303 *chip = ds->priv; lan9303_disable_processing_port(chip, port); - lan9303_phy_write(ds, chip->phy_addr_sel_strap + port, - MII_BMCR, BMCR_PDOWN); + lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN); } static int lan9303_port_bridge_join(struct dsa_switch *ds, int port, @@ -1217,8 +1213,7 @@ static int lan9303_port_fdb_dump(struct dsa_switch *ds, int port, } static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb, - struct switchdev_trans *trans) + const struct switchdev_obj_port_mdb *mdb) { struct lan9303 *chip = ds->priv; @@ -1235,8 +1230,7 @@ static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port, } static void lan9303_port_mdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb, - struct switchdev_trans *trans) + const struct switchdev_obj_port_mdb *mdb) { struct lan9303 *chip = ds->priv; @@ -1284,26 +1278,31 @@ static const struct dsa_switch_ops lan9303_switch_ops = { static int lan9303_register_switch(struct lan9303 *chip) { + int base; + chip->ds = dsa_switch_alloc(chip->dev, LAN9303_NUM_PORTS); if (!chip->ds) return -ENOMEM; chip->ds->priv = chip; chip->ds->ops = &lan9303_switch_ops; - chip->ds->phys_mii_mask = chip->phy_addr_sel_strap ? 0xe : 0x7; + base = chip->phy_addr_base; + chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1 + base, base); return dsa_register_switch(chip->ds); } -static void lan9303_probe_reset_gpio(struct lan9303 *chip, +static int lan9303_probe_reset_gpio(struct lan9303 *chip, struct device_node *np) { chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(chip->reset_gpio)) + return PTR_ERR(chip->reset_gpio); - if (IS_ERR(chip->reset_gpio)) { + if (!chip->reset_gpio) { dev_dbg(chip->dev, "No reset GPIO defined\n"); - return; + return 0; } chip->reset_duration = 200; @@ -1318,6 +1317,8 @@ static void lan9303_probe_reset_gpio(struct lan9303 *chip, /* A sane reset duration should not be longer than 1s */ if (chip->reset_duration > 1000) chip->reset_duration = 1000; + + return 0; } int lan9303_probe(struct lan9303 *chip, struct device_node *np) @@ -1325,13 +1326,14 @@ int lan9303_probe(struct lan9303 *chip, struct device_node *np) int ret; mutex_init(&chip->indirect_mutex); + mutex_init(&chip->alr_mutex); - lan9303_probe_reset_gpio(chip, np); - - ret = lan9303_handle_reset(chip); + ret = lan9303_probe_reset_gpio(chip, np); if (ret) return ret; + lan9303_handle_reset(chip); + ret = lan9303_check_device(chip); if (ret) return ret; diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index b5be93a1e0df..663b0d5b982b 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -559,8 +559,7 @@ static int ksz_port_vlan_filtering(struct dsa_switch *ds, int port, bool flag) } static int ksz_port_vlan_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct switchdev_trans *trans) + const struct switchdev_obj_port_vlan *vlan) { /* nothing needed */ @@ -568,8 +567,7 @@ static int ksz_port_vlan_prepare(struct dsa_switch *ds, int port, } static void ksz_port_vlan_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct switchdev_trans *trans) + const struct switchdev_obj_port_vlan *vlan) { struct ksz_device *dev = ds->priv; u32 vlan_table[3]; @@ -858,16 +856,14 @@ exit: } static int ksz_port_mdb_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb, - struct switchdev_trans *trans) + const struct switchdev_obj_port_mdb *mdb) { /* nothing to do */ return 0; } static void ksz_port_mdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb, - struct switchdev_trans *trans) + const struct switchdev_obj_port_mdb *mdb) { struct ksz_device *dev = ds->priv; u32 static_table[4]; diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 2820d69810b3..8a0bb000d056 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -805,6 +805,69 @@ mt7530_port_bridge_join(struct dsa_switch *ds, int port, } static void +mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port) +{ + struct mt7530_priv *priv = ds->priv; + bool all_user_ports_removed = true; + int i; + + /* When a port is removed from the bridge, the port would be set up + * back to the default as is at initial boot which is a VLAN-unaware + * port. + */ + mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, + MT7530_PORT_MATRIX_MODE); + mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK, + VLAN_ATTR(MT7530_VLAN_TRANSPARENT)); + + priv->ports[port].vlan_filtering = false; + + for (i = 0; i < MT7530_NUM_PORTS; i++) { + if (dsa_is_user_port(ds, i) && + priv->ports[i].vlan_filtering) { + all_user_ports_removed = false; + break; + } + } + + /* CPU port also does the same thing until all user ports belonging to + * the CPU port get out of VLAN filtering mode. + */ + if (all_user_ports_removed) { + mt7530_write(priv, MT7530_PCR_P(MT7530_CPU_PORT), + PCR_MATRIX(dsa_user_ports(priv->ds))); + mt7530_write(priv, MT7530_PVC_P(MT7530_CPU_PORT), + PORT_SPEC_TAG); + } +} + +static void +mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port) +{ + struct mt7530_priv *priv = ds->priv; + + /* The real fabric path would be decided on the membership in the + * entry of VLAN table. PCR_MATRIX set up here with ALL_MEMBERS + * means potential VLAN can be consisting of certain subset of all + * ports. + */ + mt7530_rmw(priv, MT7530_PCR_P(port), + PCR_MATRIX_MASK, PCR_MATRIX(MT7530_ALL_MEMBERS)); + + /* Trapped into security mode allows packet forwarding through VLAN + * table lookup. + */ + mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, + MT7530_PORT_SECURITY_MODE); + + /* Set the port as a user port which is to be able to recognize VID + * from incoming packets before fetching entry within the VLAN table. + */ + mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK, + VLAN_ATTR(MT7530_VLAN_USER)); +} + +static void mt7530_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *bridge) { @@ -817,8 +880,11 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port, /* Remove this port from the port matrix of the other ports * in the same bridge. If the port is disabled, port matrix * is kept and not being setup until the port becomes enabled. + * And the other port's port matrix cannot be broken when the + * other port is still a VLAN-aware port. */ - if (dsa_is_user_port(ds, i) && i != port) { + if (!priv->ports[i].vlan_filtering && + dsa_is_user_port(ds, i) && i != port) { if (dsa_to_port(ds, i)->bridge_dev != bridge) continue; if (priv->ports[i].enable) @@ -836,6 +902,8 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port, PCR_MATRIX(BIT(MT7530_CPU_PORT))); priv->ports[port].pm = PCR_MATRIX(BIT(MT7530_CPU_PORT)); + mt7530_port_set_vlan_unaware(ds, port); + mutex_unlock(&priv->reg_mutex); } @@ -906,6 +974,220 @@ err: return 0; } +static int +mt7530_vlan_cmd(struct mt7530_priv *priv, enum mt7530_vlan_cmd cmd, u16 vid) +{ + struct mt7530_dummy_poll p; + u32 val; + int ret; + + val = VTCR_BUSY | VTCR_FUNC(cmd) | vid; + mt7530_write(priv, MT7530_VTCR, val); + + INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_VTCR); + ret = readx_poll_timeout(_mt7530_read, &p, val, + !(val & VTCR_BUSY), 20, 20000); + if (ret < 0) { + dev_err(priv->dev, "poll timeout\n"); + return ret; + } + + val = mt7530_read(priv, MT7530_VTCR); + if (val & VTCR_INVALID) { + dev_err(priv->dev, "read VTCR invalid\n"); + return -EINVAL; + } + + return 0; +} + +static int +mt7530_port_vlan_filtering(struct dsa_switch *ds, int port, + bool vlan_filtering) +{ + struct mt7530_priv *priv = ds->priv; + + priv->ports[port].vlan_filtering = vlan_filtering; + + if (vlan_filtering) { + /* The port is being kept as VLAN-unaware port when bridge is + * set up with vlan_filtering not being set, Otherwise, the + * port and the corresponding CPU port is required the setup + * for becoming a VLAN-aware port. + */ + mt7530_port_set_vlan_aware(ds, port); + mt7530_port_set_vlan_aware(ds, MT7530_CPU_PORT); + } + + return 0; +} + +static int +mt7530_port_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + /* nothing needed */ + + return 0; +} + +static void +mt7530_hw_vlan_add(struct mt7530_priv *priv, + struct mt7530_hw_vlan_entry *entry) +{ + u8 new_members; + u32 val; + + new_members = entry->old_members | BIT(entry->port) | + BIT(MT7530_CPU_PORT); + + /* Validate the entry with independent learning, create egress tag per + * VLAN and joining the port as one of the port members. + */ + val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) | VLAN_VALID; + mt7530_write(priv, MT7530_VAWD1, val); + + /* Decide whether adding tag or not for those outgoing packets from the + * port inside the VLAN. + */ + val = entry->untagged ? MT7530_VLAN_EGRESS_UNTAG : + MT7530_VLAN_EGRESS_TAG; + mt7530_rmw(priv, MT7530_VAWD2, + ETAG_CTRL_P_MASK(entry->port), + ETAG_CTRL_P(entry->port, val)); + + /* CPU port is always taken as a tagged port for serving more than one + * VLANs across and also being applied with egress type stack mode for + * that VLAN tags would be appended after hardware special tag used as + * DSA tag. + */ + mt7530_rmw(priv, MT7530_VAWD2, + ETAG_CTRL_P_MASK(MT7530_CPU_PORT), + ETAG_CTRL_P(MT7530_CPU_PORT, + MT7530_VLAN_EGRESS_STACK)); +} + +static void +mt7530_hw_vlan_del(struct mt7530_priv *priv, + struct mt7530_hw_vlan_entry *entry) +{ + u8 new_members; + u32 val; + + new_members = entry->old_members & ~BIT(entry->port); + + val = mt7530_read(priv, MT7530_VAWD1); + if (!(val & VLAN_VALID)) { + dev_err(priv->dev, + "Cannot be deleted due to invalid entry\n"); + return; + } + + /* If certain member apart from CPU port is still alive in the VLAN, + * the entry would be kept valid. Otherwise, the entry is got to be + * disabled. + */ + if (new_members && new_members != BIT(MT7530_CPU_PORT)) { + val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) | + VLAN_VALID; + mt7530_write(priv, MT7530_VAWD1, val); + } else { + mt7530_write(priv, MT7530_VAWD1, 0); + mt7530_write(priv, MT7530_VAWD2, 0); + } +} + +static void +mt7530_hw_vlan_update(struct mt7530_priv *priv, u16 vid, + struct mt7530_hw_vlan_entry *entry, + mt7530_vlan_op vlan_op) +{ + u32 val; + + /* Fetch entry */ + mt7530_vlan_cmd(priv, MT7530_VTCR_RD_VID, vid); + + val = mt7530_read(priv, MT7530_VAWD1); + + entry->old_members = (val >> PORT_MEM_SHFT) & PORT_MEM_MASK; + + /* Manipulate entry */ + vlan_op(priv, entry); + + /* Flush result to hardware */ + mt7530_vlan_cmd(priv, MT7530_VTCR_WR_VID, vid); +} + +static void +mt7530_port_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; + struct mt7530_hw_vlan_entry new_entry; + struct mt7530_priv *priv = ds->priv; + u16 vid; + + /* The port is kept as VLAN-unaware if bridge with vlan_filtering not + * being set. + */ + if (!priv->ports[port].vlan_filtering) + return; + + mutex_lock(&priv->reg_mutex); + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + mt7530_hw_vlan_entry_init(&new_entry, port, untagged); + mt7530_hw_vlan_update(priv, vid, &new_entry, + mt7530_hw_vlan_add); + } + + if (pvid) { + mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, + G0_PORT_VID(vlan->vid_end)); + priv->ports[port].pvid = vlan->vid_end; + } + + mutex_unlock(&priv->reg_mutex); +} + +static int +mt7530_port_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct mt7530_hw_vlan_entry target_entry; + struct mt7530_priv *priv = ds->priv; + u16 vid, pvid; + + /* The port is kept as VLAN-unaware if bridge with vlan_filtering not + * being set. + */ + if (!priv->ports[port].vlan_filtering) + return 0; + + mutex_lock(&priv->reg_mutex); + + pvid = priv->ports[port].pvid; + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + mt7530_hw_vlan_entry_init(&target_entry, port, 0); + mt7530_hw_vlan_update(priv, vid, &target_entry, + mt7530_hw_vlan_del); + + /* PVID is being restored to the default whenever the PVID port + * is being removed from the VLAN. + */ + if (pvid == vid) + pvid = G0_PORT_VID_DEF; + } + + mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, pvid); + priv->ports[port].pvid = pvid; + + mutex_unlock(&priv->reg_mutex); + + return 0; +} + static enum dsa_tag_protocol mtk_get_tag_protocol(struct dsa_switch *ds, int port) { @@ -1035,6 +1317,10 @@ static const struct dsa_switch_ops mt7530_switch_ops = { .port_fdb_add = mt7530_port_fdb_add, .port_fdb_del = mt7530_port_fdb_del, .port_fdb_dump = mt7530_port_fdb_dump, + .port_vlan_filtering = mt7530_port_vlan_filtering, + .port_vlan_prepare = mt7530_port_vlan_prepare, + .port_vlan_add = mt7530_port_vlan_add, + .port_vlan_del = mt7530_port_vlan_del, }; static int diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index 74db9822eb40..d9b407a22a58 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -17,6 +17,7 @@ #define MT7530_NUM_PORTS 7 #define MT7530_CPU_PORT 6 #define MT7530_NUM_FDB_RECORDS 2048 +#define MT7530_ALL_MEMBERS 0xff #define NUM_TRGMII_CTRL 5 @@ -88,21 +89,42 @@ enum mt7530_fdb_cmd { /* Register for vlan table control */ #define MT7530_VTCR 0x90 #define VTCR_BUSY BIT(31) -#define VTCR_FUNC (((x) & 0xf) << 12) -#define VTCR_FUNC_RD_VID 0x1 -#define VTCR_FUNC_WR_VID 0x2 -#define VTCR_FUNC_INV_VID 0x3 -#define VTCR_FUNC_VAL_VID 0x4 +#define VTCR_INVALID BIT(16) +#define VTCR_FUNC(x) (((x) & 0xf) << 12) #define VTCR_VID ((x) & 0xfff) +enum mt7530_vlan_cmd { + /* Read/Write the specified VID entry from VAWD register based + * on VID. + */ + MT7530_VTCR_RD_VID = 0, + MT7530_VTCR_WR_VID = 1, +}; + /* Register for setup vlan and acl write data */ #define MT7530_VAWD1 0x94 #define PORT_STAG BIT(31) +/* Independent VLAN Learning */ #define IVL_MAC BIT(30) +/* Per VLAN Egress Tag Control */ +#define VTAG_EN BIT(28) +/* VLAN Member Control */ #define PORT_MEM(x) (((x) & 0xff) << 16) -#define VALID BIT(1) +/* VLAN Entry Valid */ +#define VLAN_VALID BIT(0) +#define PORT_MEM_SHFT 16 +#define PORT_MEM_MASK 0xff #define MT7530_VAWD2 0x98 +/* Egress Tag Control */ +#define ETAG_CTRL_P(p, x) (((x) & 0x3) << ((p) << 1)) +#define ETAG_CTRL_P_MASK(p) ETAG_CTRL_P(p, 3) + +enum mt7530_vlan_egress_attr { + MT7530_VLAN_EGRESS_UNTAG = 0, + MT7530_VLAN_EGRESS_TAG = 2, + MT7530_VLAN_EGRESS_STACK = 3, +}; /* Register for port STP state control */ #define MT7530_SSP_P(x) (0x2000 + ((x) * 0x100)) @@ -120,11 +142,23 @@ enum mt7530_stp_state { /* Register for port control */ #define MT7530_PCR_P(x) (0x2004 + ((x) * 0x100)) #define PORT_VLAN(x) ((x) & 0x3) + +enum mt7530_port_mode { + /* Port Matrix Mode: Frames are forwarded by the PCR_MATRIX members. */ + MT7530_PORT_MATRIX_MODE = PORT_VLAN(0), + + /* Security Mode: Discard any frame due to ingress membership + * violation or VID missed on the VLAN table. + */ + MT7530_PORT_SECURITY_MODE = PORT_VLAN(3), +}; + #define PCR_MATRIX(x) (((x) & 0xff) << 16) #define PORT_PRI(x) (((x) & 0x7) << 24) #define EG_TAG(x) (((x) & 0x3) << 28) #define PCR_MATRIX_MASK PCR_MATRIX(0xff) #define PCR_MATRIX_CLR PCR_MATRIX(0) +#define PCR_PORT_VLAN_MASK PORT_VLAN(3) /* Register for port security control */ #define MT7530_PSC_P(x) (0x200c + ((x) * 0x100)) @@ -134,10 +168,20 @@ enum mt7530_stp_state { #define MT7530_PVC_P(x) (0x2010 + ((x) * 0x100)) #define PORT_SPEC_TAG BIT(5) #define VLAN_ATTR(x) (((x) & 0x3) << 6) +#define VLAN_ATTR_MASK VLAN_ATTR(3) + +enum mt7530_vlan_port_attr { + MT7530_VLAN_USER = 0, + MT7530_VLAN_TRANSPARENT = 3, +}; + #define STAG_VPID (((x) & 0xffff) << 16) /* Register for port port-and-protocol based vlan 1 control */ #define MT7530_PPBV1_P(x) (0x2014 + ((x) * 0x100)) +#define G0_PORT_VID(x) (((x) & 0xfff) << 0) +#define G0_PORT_VID_MASK G0_PORT_VID(0xfff) +#define G0_PORT_VID_DEF G0_PORT_VID(1) /* Register for port MAC control register */ #define MT7530_PMCR_P(x) (0x3000 + ((x) * 0x100)) @@ -345,9 +389,20 @@ struct mt7530_fdb { bool noarp; }; +/* struct mt7530_port - This is the main data structure for holding the state + * of the port. + * @enable: The status used for show port is enabled or not. + * @pm: The matrix used to show all connections with the port. + * @pvid: The VLAN specified is to be considered a PVID at ingress. Any + * untagged frames will be assigned to the related VLAN. + * @vlan_filtering: The flags indicating whether the port that can recognize + * VLAN-tagged frames. + */ struct mt7530_port { bool enable; u32 pm; + u16 pvid; + bool vlan_filtering; }; /* struct mt7530_priv - This is the main data structure for holding the state @@ -382,6 +437,22 @@ struct mt7530_priv { struct mutex reg_mutex; }; +struct mt7530_hw_vlan_entry { + int port; + u8 old_members; + bool untagged; +}; + +static inline void mt7530_hw_vlan_entry_init(struct mt7530_hw_vlan_entry *e, + int port, bool untagged) +{ + e->port = port; + e->untagged = untagged; +} + +typedef void (*mt7530_vlan_op)(struct mt7530_priv *, + struct mt7530_hw_vlan_entry *); + struct mt7530_hw_stats { const char *string; u16 reg; diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 66d33e97cbc5..eb328bade225 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -1185,8 +1185,7 @@ static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port, static int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct switchdev_trans *trans) + const struct switchdev_obj_port_vlan *vlan) { struct mv88e6xxx_chip *chip = ds->priv; int err; @@ -1295,8 +1294,7 @@ static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_chip *chip, int port, } static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct switchdev_trans *trans) + const struct switchdev_obj_port_vlan *vlan) { struct mv88e6xxx_chip *chip = ds->priv; bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; @@ -1725,9 +1723,11 @@ static int mv88e6xxx_setup_message_port(struct mv88e6xxx_chip *chip, int port) static int mv88e6xxx_setup_egress_floods(struct mv88e6xxx_chip *chip, int port) { - bool flood = port == dsa_upstream_port(chip->ds); + struct dsa_switch *ds = chip->ds; + bool flood; /* Upstream ports flood frames with unknown unicast or multicast DA */ + flood = dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port); if (chip->info->ops->port_set_egress_floods) return chip->info->ops->port_set_egress_floods(chip, port, flood, flood); @@ -1744,6 +1744,39 @@ static int mv88e6xxx_serdes_power(struct mv88e6xxx_chip *chip, int port, return 0; } +static int mv88e6xxx_setup_upstream_port(struct mv88e6xxx_chip *chip, int port) +{ + struct dsa_switch *ds = chip->ds; + int upstream_port; + int err; + + upstream_port = dsa_upstream_port(ds, port); + if (chip->info->ops->port_set_upstream_port) { + err = chip->info->ops->port_set_upstream_port(chip, port, + upstream_port); + if (err) + return err; + } + + if (port == upstream_port) { + if (chip->info->ops->set_cpu_port) { + err = chip->info->ops->set_cpu_port(chip, + upstream_port); + if (err) + return err; + } + + if (chip->info->ops->set_egress_port) { + err = chip->info->ops->set_egress_port(chip, + upstream_port); + if (err) + return err; + } + } + + return 0; +} + static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) { struct dsa_switch *ds = chip->ds; @@ -1814,13 +1847,9 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) if (err) return err; - reg = 0; - if (chip->info->ops->port_set_upstream_port) { - err = chip->info->ops->port_set_upstream_port( - chip, port, dsa_upstream_port(ds)); - if (err) - return err; - } + err = mv88e6xxx_setup_upstream_port(chip, port); + if (err) + return err; err = mv88e6xxx_port_set_8021q_mode(chip, port, MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED); @@ -1946,21 +1975,8 @@ static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds, static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip) { struct dsa_switch *ds = chip->ds; - u32 upstream_port = dsa_upstream_port(ds); int err; - if (chip->info->ops->set_cpu_port) { - err = chip->info->ops->set_cpu_port(chip, upstream_port); - if (err) - return err; - } - - if (chip->info->ops->set_egress_port) { - err = chip->info->ops->set_egress_port(chip, upstream_port); - if (err) - return err; - } - /* Disable remote management, and set the switch's DSA device number. */ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL2, MV88E6XXX_G1_CTL2_MULTIPLE_CASCADE | @@ -3741,6 +3757,7 @@ static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds, return chip->info->tag_protocol; } +#if IS_ENABLED(CONFIG_NET_DSA_LEGACY) static const char *mv88e6xxx_drv_probe(struct device *dsa_dev, struct device *host_dev, int sw_addr, void **priv) @@ -3788,10 +3805,10 @@ free: return NULL; } +#endif static int mv88e6xxx_port_mdb_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb, - struct switchdev_trans *trans) + const struct switchdev_obj_port_mdb *mdb) { /* We don't need any dynamic resource from the kernel (yet), * so skip the prepare phase. @@ -3801,8 +3818,7 @@ static int mv88e6xxx_port_mdb_prepare(struct dsa_switch *ds, int port, } static void mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb, - struct switchdev_trans *trans) + const struct switchdev_obj_port_mdb *mdb) { struct mv88e6xxx_chip *chip = ds->priv; @@ -3829,7 +3845,9 @@ static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port, } static const struct dsa_switch_ops mv88e6xxx_switch_ops = { +#if IS_ENABLED(CONFIG_NET_DSA_LEGACY) .probe = mv88e6xxx_drv_probe, +#endif .get_tag_protocol = mv88e6xxx_get_tag_protocol, .setup = mv88e6xxx_setup, .adjust_link = mv88e6xxx_adjust_link, @@ -3958,11 +3976,19 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) if (err) goto out_g1_irq; } + + err = mv88e6xxx_g1_atu_prob_irq_setup(chip); + if (err) + goto out_g2_irq; + + err = mv88e6xxx_g1_vtu_prob_irq_setup(chip); + if (err) + goto out_g1_atu_prob_irq; } err = mv88e6xxx_mdios_register(chip, np); if (err) - goto out_g2_irq; + goto out_g1_vtu_prob_irq; err = mv88e6xxx_register_switch(chip); if (err) @@ -3972,6 +3998,12 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) out_mdio: mv88e6xxx_mdios_unregister(chip); +out_g1_vtu_prob_irq: + if (chip->irq > 0) + mv88e6xxx_g1_vtu_prob_irq_free(chip); +out_g1_atu_prob_irq: + if (chip->irq > 0) + mv88e6xxx_g1_atu_prob_irq_free(chip); out_g2_irq: if (chip->info->g2_irqs > 0 && chip->irq > 0) mv88e6xxx_g2_irq_free(chip); @@ -3995,6 +4027,8 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev) mv88e6xxx_mdios_unregister(chip); if (chip->irq > 0) { + mv88e6xxx_g1_vtu_prob_irq_free(chip); + mv88e6xxx_g1_atu_prob_irq_free(chip); if (chip->info->g2_irqs > 0) mv88e6xxx_g2_irq_free(chip); mutex_lock(&chip->reg_lock); diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 334f6f7544ba..3dba6e90adcf 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -207,6 +207,8 @@ struct mv88e6xxx_chip { int irq; int device_irq; int watchdog_irq; + int atu_prob_irq; + int vtu_prob_irq; }; struct mv88e6xxx_bus_ops { diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h index b0dc7518b47f..6aee7316fea6 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h @@ -29,9 +29,9 @@ #define MV88E6XXX_G1_STS_IRQ_AVB 8 #define MV88E6XXX_G1_STS_IRQ_DEVICE 7 #define MV88E6XXX_G1_STS_IRQ_STATS 6 -#define MV88E6XXX_G1_STS_IRQ_VTU_PROBLEM 5 +#define MV88E6XXX_G1_STS_IRQ_VTU_PROB 5 #define MV88E6XXX_G1_STS_IRQ_VTU_DONE 4 -#define MV88E6XXX_G1_STS_IRQ_ATU_PROBLEM 3 +#define MV88E6XXX_G1_STS_IRQ_ATU_PROB 3 #define MV88E6XXX_G1_STS_IRQ_ATU_DONE 2 #define MV88E6XXX_G1_STS_IRQ_TCAM_DONE 1 #define MV88E6XXX_G1_STS_IRQ_EEPROM_DONE 0 @@ -82,6 +82,10 @@ #define MV88E6XXX_G1_VTU_OP_VTU_GET_NEXT 0x4000 #define MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE 0x5000 #define MV88E6XXX_G1_VTU_OP_STU_GET_NEXT 0x6000 +#define MV88E6XXX_G1_VTU_OP_GET_CLR_VIOLATION 0x7000 +#define MV88E6XXX_G1_VTU_OP_MEMBER_VIOLATION BIT(6) +#define MV88E6XXX_G1_VTU_OP_MISS_VIOLATION BIT(5) +#define MV88E6XXX_G1_VTU_OP_SPID_MASK 0xf /* Offset 0x06: VTU VID Register */ #define MV88E6XXX_G1_VTU_VID 0x06 @@ -122,6 +126,10 @@ #define MV88E6XXX_G1_ATU_OP_FLUSH_MOVE_ALL_DB 0x5000 #define MV88E6XXX_G1_ATU_OP_FLUSH_MOVE_NON_STATIC_DB 0x6000 #define MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION 0x7000 +#define MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION BIT(7) +#define MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION BIT(6) +#define MV88E6XXX_G1_ATU_OP_MISS_VIOLTATION BIT(5) +#define MV88E6XXX_G1_ATU_OP_FULL_VIOLATION BIT(4) /* Offset 0x0C: ATU Data Register */ #define MV88E6XXX_G1_ATU_DATA 0x0c @@ -255,6 +263,8 @@ int mv88e6xxx_g1_atu_loadpurge(struct mv88e6xxx_chip *chip, u16 fid, int mv88e6xxx_g1_atu_flush(struct mv88e6xxx_chip *chip, u16 fid, bool all); int mv88e6xxx_g1_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, int port, bool all); +int mv88e6xxx_g1_atu_prob_irq_setup(struct mv88e6xxx_chip *chip); +void mv88e6xxx_g1_atu_prob_irq_free(struct mv88e6xxx_chip *chip); int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip, struct mv88e6xxx_vtu_entry *entry); @@ -269,5 +279,7 @@ int mv88e6390_g1_vtu_getnext(struct mv88e6xxx_chip *chip, int mv88e6390_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip, struct mv88e6xxx_vtu_entry *entry); int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip); +int mv88e6xxx_g1_vtu_prob_irq_setup(struct mv88e6xxx_chip *chip); +void mv88e6xxx_g1_vtu_prob_irq_free(struct mv88e6xxx_chip *chip); #endif /* _MV88E6XXX_GLOBAL1_H */ diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c index efeef4b01442..20d941f4273b 100644 --- a/drivers/net/dsa/mv88e6xxx/global1_atu.c +++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c @@ -9,6 +9,8 @@ * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ +#include <linux/interrupt.h> +#include <linux/irqdomain.h> #include "chip.h" #include "global1.h" @@ -307,3 +309,88 @@ int mv88e6xxx_g1_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, int port, return mv88e6xxx_g1_atu_move(chip, fid, from_port, to_port, all); } + +static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) +{ + struct mv88e6xxx_chip *chip = dev_id; + struct mv88e6xxx_atu_entry entry; + int err; + u16 val; + + mutex_lock(&chip->reg_lock); + + err = mv88e6xxx_g1_atu_op(chip, 0, + MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION); + if (err) + goto out; + + err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_OP, &val); + if (err) + goto out; + + err = mv88e6xxx_g1_atu_data_read(chip, &entry); + if (err) + goto out; + + err = mv88e6xxx_g1_atu_mac_read(chip, &entry); + if (err) + goto out; + + mutex_unlock(&chip->reg_lock); + + if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) { + dev_err_ratelimited(chip->dev, + "ATU age out violation for %pM\n", + entry.mac); + } + + if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) { + dev_err_ratelimited(chip->dev, + "ATU member violation for %pM portvec %x\n", + entry.mac, entry.portvec); + } + + if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) + dev_err_ratelimited(chip->dev, + "ATU miss violation for %pM portvec %x\n", + entry.mac, entry.portvec); + + if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) + dev_err_ratelimited(chip->dev, + "ATU full violation for %pM portvec %x\n", + entry.mac, entry.portvec); + + return IRQ_HANDLED; + +out: + mutex_unlock(&chip->reg_lock); + + dev_err(chip->dev, "ATU problem: error %d while handling interrupt\n", + err); + return IRQ_HANDLED; +} + +int mv88e6xxx_g1_atu_prob_irq_setup(struct mv88e6xxx_chip *chip) +{ + int err; + + chip->atu_prob_irq = irq_find_mapping(chip->g1_irq.domain, + MV88E6XXX_G1_STS_IRQ_ATU_PROB); + if (chip->atu_prob_irq < 0) + return chip->atu_prob_irq; + + err = request_threaded_irq(chip->atu_prob_irq, NULL, + mv88e6xxx_g1_atu_prob_irq_thread_fn, + IRQF_ONESHOT, "mv88e6xxx-g1-atu-prob", + chip); + if (err) + irq_dispose_mapping(chip->atu_prob_irq); + + return err; +} + +void mv88e6xxx_g1_atu_prob_irq_free(struct mv88e6xxx_chip *chip) +{ + free_irq(chip->atu_prob_irq, chip); + irq_dispose_mapping(chip->atu_prob_irq); +} diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c index 8c8a0ec3d6e9..7997961647de 100644 --- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c +++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c @@ -11,6 +11,9 @@ * (at your option) any later version. */ +#include <linux/interrupt.h> +#include <linux/irqdomain.h> + #include "chip.h" #include "global1.h" @@ -513,3 +516,74 @@ int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip) return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_FLUSH_ALL); } + +static irqreturn_t mv88e6xxx_g1_vtu_prob_irq_thread_fn(int irq, void *dev_id) +{ + struct mv88e6xxx_chip *chip = dev_id; + struct mv88e6xxx_vtu_entry entry; + int spid; + int err; + u16 val; + + mutex_lock(&chip->reg_lock); + + err = mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_GET_CLR_VIOLATION); + if (err) + goto out; + + err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_OP, &val); + if (err) + goto out; + + err = mv88e6xxx_g1_vtu_vid_read(chip, &entry); + if (err) + goto out; + + mutex_unlock(&chip->reg_lock); + + spid = val & MV88E6XXX_G1_VTU_OP_SPID_MASK; + + if (val & MV88E6XXX_G1_VTU_OP_MEMBER_VIOLATION) { + dev_err_ratelimited(chip->dev, "VTU member violation for vid %d, source port %d\n", + entry.vid, spid); + } + + if (val & MV88E6XXX_G1_VTU_OP_MISS_VIOLATION) + dev_err_ratelimited(chip->dev, "VTU miss violation for vid %d, source port %d\n", + entry.vid, spid); + + return IRQ_HANDLED; + +out: + mutex_unlock(&chip->reg_lock); + + dev_err(chip->dev, "VTU problem: error %d while handling interrupt\n", + err); + + return IRQ_HANDLED; +} + +int mv88e6xxx_g1_vtu_prob_irq_setup(struct mv88e6xxx_chip *chip) +{ + int err; + + chip->vtu_prob_irq = irq_find_mapping(chip->g1_irq.domain, + MV88E6XXX_G1_STS_IRQ_VTU_PROB); + if (chip->vtu_prob_irq < 0) + return chip->vtu_prob_irq; + + err = request_threaded_irq(chip->vtu_prob_irq, NULL, + mv88e6xxx_g1_vtu_prob_irq_thread_fn, + IRQF_ONESHOT, "mv88e6xxx-g1-vtu-prob", + chip); + if (err) + irq_dispose_mapping(chip->vtu_prob_irq); + + return err; +} + +void mv88e6xxx_g1_vtu_prob_irq_free(struct mv88e6xxx_chip *chip) +{ + free_irq(chip->vtu_prob_irq, chip); + irq_dispose_mapping(chip->vtu_prob_irq); +} diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index 58483af80bdb..30b1c8512049 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -42,48 +42,7 @@ #define DRV_NAME "dummy" #define DRV_VERSION "1.0" -#undef pr_fmt -#define pr_fmt(fmt) DRV_NAME ": " fmt - static int numdummies = 1; -static int num_vfs; - -struct vf_data_storage { - u8 vf_mac[ETH_ALEN]; - u16 pf_vlan; /* When set, guest VLAN config not allowed. */ - u16 pf_qos; - __be16 vlan_proto; - u16 min_tx_rate; - u16 max_tx_rate; - u8 spoofchk_enabled; - bool rss_query_enabled; - u8 trusted; - int link_state; -}; - -struct dummy_priv { - struct vf_data_storage *vfinfo; -}; - -static int dummy_num_vf(struct device *dev) -{ - return num_vfs; -} - -static struct bus_type dummy_bus = { - .name = "dummy", - .num_vf = dummy_num_vf, -}; - -static void release_dummy_parent(struct device *dev) -{ -} - -static struct device dummy_parent = { - .init_name = "dummy", - .bus = &dummy_bus, - .release = release_dummy_parent, -}; /* fake multicast ability */ static void set_multicast_list(struct net_device *dev) @@ -133,25 +92,10 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev) static int dummy_dev_init(struct net_device *dev) { - struct dummy_priv *priv = netdev_priv(dev); - dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats); if (!dev->dstats) return -ENOMEM; - priv->vfinfo = NULL; - - if (!num_vfs) - return 0; - - dev->dev.parent = &dummy_parent; - priv->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage), - GFP_KERNEL); - if (!priv->vfinfo) { - free_percpu(dev->dstats); - return -ENOMEM; - } - return 0; } @@ -169,117 +113,6 @@ static int dummy_change_carrier(struct net_device *dev, bool new_carrier) return 0; } -static int dummy_set_vf_mac(struct net_device *dev, int vf, u8 *mac) -{ - struct dummy_priv *priv = netdev_priv(dev); - - if (!is_valid_ether_addr(mac) || (vf >= num_vfs)) - return -EINVAL; - - memcpy(priv->vfinfo[vf].vf_mac, mac, ETH_ALEN); - - return 0; -} - -static int dummy_set_vf_vlan(struct net_device *dev, int vf, - u16 vlan, u8 qos, __be16 vlan_proto) -{ - struct dummy_priv *priv = netdev_priv(dev); - - if ((vf >= num_vfs) || (vlan > 4095) || (qos > 7)) - return -EINVAL; - - priv->vfinfo[vf].pf_vlan = vlan; - priv->vfinfo[vf].pf_qos = qos; - priv->vfinfo[vf].vlan_proto = vlan_proto; - - return 0; -} - -static int dummy_set_vf_rate(struct net_device *dev, int vf, int min, int max) -{ - struct dummy_priv *priv = netdev_priv(dev); - - if (vf >= num_vfs) - return -EINVAL; - - priv->vfinfo[vf].min_tx_rate = min; - priv->vfinfo[vf].max_tx_rate = max; - - return 0; -} - -static int dummy_set_vf_spoofchk(struct net_device *dev, int vf, bool val) -{ - struct dummy_priv *priv = netdev_priv(dev); - - if (vf >= num_vfs) - return -EINVAL; - - priv->vfinfo[vf].spoofchk_enabled = val; - - return 0; -} - -static int dummy_set_vf_rss_query_en(struct net_device *dev, int vf, bool val) -{ - struct dummy_priv *priv = netdev_priv(dev); - - if (vf >= num_vfs) - return -EINVAL; - - priv->vfinfo[vf].rss_query_enabled = val; - - return 0; -} - -static int dummy_set_vf_trust(struct net_device *dev, int vf, bool val) -{ - struct dummy_priv *priv = netdev_priv(dev); - - if (vf >= num_vfs) - return -EINVAL; - - priv->vfinfo[vf].trusted = val; - - return 0; -} - -static int dummy_get_vf_config(struct net_device *dev, - int vf, struct ifla_vf_info *ivi) -{ - struct dummy_priv *priv = netdev_priv(dev); - - if (vf >= num_vfs) - return -EINVAL; - - ivi->vf = vf; - memcpy(&ivi->mac, priv->vfinfo[vf].vf_mac, ETH_ALEN); - ivi->vlan = priv->vfinfo[vf].pf_vlan; - ivi->qos = priv->vfinfo[vf].pf_qos; - ivi->spoofchk = priv->vfinfo[vf].spoofchk_enabled; - ivi->linkstate = priv->vfinfo[vf].link_state; - ivi->min_tx_rate = priv->vfinfo[vf].min_tx_rate; - ivi->max_tx_rate = priv->vfinfo[vf].max_tx_rate; - ivi->rss_query_en = priv->vfinfo[vf].rss_query_enabled; - ivi->trusted = priv->vfinfo[vf].trusted; - ivi->vlan_proto = priv->vfinfo[vf].vlan_proto; - - return 0; -} - -static int dummy_set_vf_link_state(struct net_device *dev, int vf, int state) -{ - struct dummy_priv *priv = netdev_priv(dev); - - if (vf >= num_vfs) - return -EINVAL; - - priv->vfinfo[vf].link_state = state; - - return 0; -} - static const struct net_device_ops dummy_netdev_ops = { .ndo_init = dummy_dev_init, .ndo_uninit = dummy_dev_uninit, @@ -289,14 +122,6 @@ static const struct net_device_ops dummy_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, .ndo_get_stats64 = dummy_get_stats64, .ndo_change_carrier = dummy_change_carrier, - .ndo_set_vf_mac = dummy_set_vf_mac, - .ndo_set_vf_vlan = dummy_set_vf_vlan, - .ndo_set_vf_rate = dummy_set_vf_rate, - .ndo_set_vf_spoofchk = dummy_set_vf_spoofchk, - .ndo_set_vf_trust = dummy_set_vf_trust, - .ndo_get_vf_config = dummy_get_vf_config, - .ndo_set_vf_link_state = dummy_set_vf_link_state, - .ndo_set_vf_rss_query_en = dummy_set_vf_rss_query_en, }; static void dummy_get_drvinfo(struct net_device *dev, @@ -323,13 +148,6 @@ static const struct ethtool_ops dummy_ethtool_ops = { .get_ts_info = dummy_get_ts_info, }; -static void dummy_free_netdev(struct net_device *dev) -{ - struct dummy_priv *priv = netdev_priv(dev); - - kfree(priv->vfinfo); -} - static void dummy_setup(struct net_device *dev) { ether_setup(dev); @@ -338,7 +156,6 @@ static void dummy_setup(struct net_device *dev) dev->netdev_ops = &dummy_netdev_ops; dev->ethtool_ops = &dummy_ethtool_ops; dev->needs_free_netdev = true; - dev->priv_destructor = dummy_free_netdev; /* Fill in device structure with ethernet-generic values. */ dev->flags |= IFF_NOARP; @@ -370,7 +187,6 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[], static struct rtnl_link_ops dummy_link_ops __read_mostly = { .kind = DRV_NAME, - .priv_size = sizeof(struct dummy_priv), .setup = dummy_setup, .validate = dummy_validate, }; @@ -379,16 +195,12 @@ static struct rtnl_link_ops dummy_link_ops __read_mostly = { module_param(numdummies, int, 0); MODULE_PARM_DESC(numdummies, "Number of dummy pseudo devices"); -module_param(num_vfs, int, 0); -MODULE_PARM_DESC(num_vfs, "Number of dummy VFs per dummy device"); - static int __init dummy_init_one(void) { struct net_device *dev_dummy; int err; - dev_dummy = alloc_netdev(sizeof(struct dummy_priv), - "dummy%d", NET_NAME_ENUM, dummy_setup); + dev_dummy = alloc_netdev(0, "dummy%d", NET_NAME_ENUM, dummy_setup); if (!dev_dummy) return -ENOMEM; @@ -407,21 +219,6 @@ static int __init dummy_init_module(void) { int i, err = 0; - if (num_vfs) { - err = bus_register(&dummy_bus); - if (err < 0) { - pr_err("registering dummy bus failed\n"); - return err; - } - - err = device_register(&dummy_parent); - if (err < 0) { - pr_err("registering dummy parent device failed\n"); - bus_unregister(&dummy_bus); - return err; - } - } - rtnl_lock(); err = __rtnl_link_register(&dummy_link_ops); if (err < 0) @@ -437,22 +234,12 @@ static int __init dummy_init_module(void) out: rtnl_unlock(); - if (err && num_vfs) { - device_unregister(&dummy_parent); - bus_unregister(&dummy_bus); - } - return err; } static void __exit dummy_cleanup_module(void) { rtnl_link_unregister(&dummy_link_ops); - - if (num_vfs) { - device_unregister(&dummy_parent); - bus_unregister(&dummy_bus); - } } module_init(dummy_init_module); diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index c60421339a98..b6cf4b6962f5 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -42,6 +42,7 @@ source "drivers/net/ethernet/cavium/Kconfig" source "drivers/net/ethernet/chelsio/Kconfig" source "drivers/net/ethernet/cirrus/Kconfig" source "drivers/net/ethernet/cisco/Kconfig" +source "drivers/net/ethernet/cortina/Kconfig" config CX_ECAT tristate "Beckhoff CX5020 EtherCAT master support" @@ -170,6 +171,7 @@ source "drivers/net/ethernet/sis/Kconfig" source "drivers/net/ethernet/sfc/Kconfig" source "drivers/net/ethernet/sgi/Kconfig" source "drivers/net/ethernet/smsc/Kconfig" +source "drivers/net/ethernet/socionext/Kconfig" source "drivers/net/ethernet/stmicro/Kconfig" source "drivers/net/ethernet/sun/Kconfig" source "drivers/net/ethernet/tehuti/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 39f6273358ed..3cdf01e96e0b 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_NET_VENDOR_CAVIUM) += cavium/ obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/ obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/ +obj-$(CONFIG_NET_VENDOR_CORTINA) += cortina/ obj-$(CONFIG_CX_ECAT) += ec_bhf.o obj-$(CONFIG_DM9000) += davicom/ obj-$(CONFIG_DNET) += dnet.o @@ -82,6 +83,7 @@ obj-$(CONFIG_SFC) += sfc/ obj-$(CONFIG_SFC_FALCON) += sfc/falcon/ obj-$(CONFIG_NET_VENDOR_SGI) += sgi/ obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/ +obj-$(CONFIG_NET_VENDOR_SOCIONEXT) += socionext/ obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/ obj-$(CONFIG_NET_VENDOR_SUN) += sun/ obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/ diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c index a1a52eb53b14..8f71b79b4949 100644 --- a/drivers/net/ethernet/alteon/acenic.c +++ b/drivers/net/ethernet/alteon/acenic.c @@ -1436,13 +1436,13 @@ static int ace_init(struct net_device *dev) ace_set_txprd(regs, ap, 0); writel(0, ®s->RxRetCsm); - /* - * Enable DMA engine now. - * If we do this sooner, Mckinley box pukes. - * I assume it's because Tigon II DMA engine wants to check - * *something* even before the CPU is started. - */ - writel(1, ®s->AssistState); /* enable DMA */ + /* + * Enable DMA engine now. + * If we do this sooner, Mckinley box pukes. + * I assume it's because Tigon II DMA engine wants to check + * *something* even before the CPU is started. + */ + writel(1, ®s->AssistState); /* enable DMA */ /* * Start the NIC CPU diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c index b11e573ad57a..ea149c134e15 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c @@ -504,3 +504,14 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id) return 0; } + +bool ena_com_cq_empty(struct ena_com_io_cq *io_cq) +{ + struct ena_eth_io_rx_cdesc_base *cdesc; + + cdesc = ena_com_get_next_rx_cdesc(io_cq); + if (cdesc) + return false; + else + return true; +} diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h index bb53c3a4f8e9..2f7657227cfe 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h @@ -88,6 +88,8 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id); +bool ena_com_cq_empty(struct ena_com_io_cq *io_cq); + static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq, struct ena_eth_io_intr_reg *intr_reg) { diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index fbe21a817bd8..6975150d144e 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -161,6 +161,8 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter, ring->per_napi_packets = 0; ring->per_napi_bytes = 0; ring->cpu = 0; + ring->first_interrupt = false; + ring->no_interrupt_event_cnt = 0; u64_stats_init(&ring->syncp); } @@ -1277,6 +1279,9 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data) { struct ena_napi *ena_napi = data; + ena_napi->tx_ring->first_interrupt = true; + ena_napi->rx_ring->first_interrupt = true; + napi_schedule_irqoff(&ena_napi->napi); return IRQ_HANDLED; @@ -2663,8 +2668,32 @@ static void ena_fw_reset_device(struct work_struct *work) rtnl_unlock(); } -static int check_missing_comp_in_queue(struct ena_adapter *adapter, - struct ena_ring *tx_ring) +static int check_for_rx_interrupt_queue(struct ena_adapter *adapter, + struct ena_ring *rx_ring) +{ + if (likely(rx_ring->first_interrupt)) + return 0; + + if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) + return 0; + + rx_ring->no_interrupt_event_cnt++; + + if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) { + netif_err(adapter, rx_err, adapter->netdev, + "Potential MSIX issue on Rx side Queue = %d. Reset the device\n", + rx_ring->qid); + adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT; + smp_mb__before_atomic(); + set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); + return -EIO; + } + + return 0; +} + +static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter, + struct ena_ring *tx_ring) { struct ena_tx_buffer *tx_buf; unsigned long last_jiffies; @@ -2674,8 +2703,27 @@ static int check_missing_comp_in_queue(struct ena_adapter *adapter, for (i = 0; i < tx_ring->ring_size; i++) { tx_buf = &tx_ring->tx_buffer_info[i]; last_jiffies = tx_buf->last_jiffies; - if (unlikely(last_jiffies && - time_is_before_jiffies(last_jiffies + adapter->missing_tx_completion_to))) { + + if (last_jiffies == 0) + /* no pending Tx at this location */ + continue; + + if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies + + 2 * adapter->missing_tx_completion_to))) { + /* If after graceful period interrupt is still not + * received, we schedule a reset + */ + netif_err(adapter, tx_err, adapter->netdev, + "Potential MSIX issue on Tx side Queue = %d. Reset the device\n", + tx_ring->qid); + adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT; + smp_mb__before_atomic(); + set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); + return -EIO; + } + + if (unlikely(time_is_before_jiffies(last_jiffies + + adapter->missing_tx_completion_to))) { if (!tx_buf->print_once) netif_notice(adapter, tx_err, adapter->netdev, "Found a Tx that wasn't completed on time, qid %d, index %d.\n", @@ -2704,9 +2752,10 @@ static int check_missing_comp_in_queue(struct ena_adapter *adapter, return rc; } -static void check_for_missing_tx_completions(struct ena_adapter *adapter) +static void check_for_missing_completions(struct ena_adapter *adapter) { struct ena_ring *tx_ring; + struct ena_ring *rx_ring; int i, budget, rc; /* Make sure the driver doesn't turn the device in other process */ @@ -2725,8 +2774,13 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter) for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) { tx_ring = &adapter->tx_ring[i]; + rx_ring = &adapter->rx_ring[i]; + + rc = check_missing_comp_in_tx_queue(adapter, tx_ring); + if (unlikely(rc)) + return; - rc = check_missing_comp_in_queue(adapter, tx_ring); + rc = check_for_rx_interrupt_queue(adapter, rx_ring); if (unlikely(rc)) return; @@ -2885,7 +2939,7 @@ static void ena_timer_service(struct timer_list *t) check_for_admin_com_state(adapter); - check_for_missing_tx_completions(adapter); + check_for_missing_completions(adapter); check_for_empty_rx_ring(adapter); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 3bbc003871de..f1972b5ab650 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -44,7 +44,7 @@ #include "ena_eth_com.h" #define DRV_MODULE_VER_MAJOR 1 -#define DRV_MODULE_VER_MINOR 3 +#define DRV_MODULE_VER_MINOR 5 #define DRV_MODULE_VER_SUBMINOR 0 #define DRV_MODULE_NAME "ena" @@ -122,6 +122,7 @@ * We wait for 6 sec just to be on the safe side. */ #define ENA_DEVICE_KALIVE_TIMEOUT (6 * HZ) +#define ENA_MAX_NO_INTERRUPT_ITERATIONS 3 #define ENA_MMIO_DISABLE_REG_READ BIT(0) @@ -236,6 +237,9 @@ struct ena_ring { /* The maximum header length the device can handle */ u8 tx_max_header_size; + bool first_interrupt; + u16 no_interrupt_event_cnt; + /* cpu for TPH */ int cpu; /* number of tx/rx_buffer_info's entries */ diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h index 9aec43c5bba8..48ca97fbe7bc 100644 --- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h @@ -60,6 +60,8 @@ enum ena_regs_reset_reason_types { ENA_REGS_RESET_USER_TRIGGER = 12, ENA_REGS_RESET_GENERIC = 13, + + ENA_REGS_RESET_MISS_INTERRUPT = 14, }; /* ena_registers offsets */ diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index a74a8fbad53a..7a3ebfd236f5 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -2930,9 +2930,8 @@ void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring, void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx) { struct ethhdr *eth = (struct ethhdr *)skb->data; - unsigned char *buf = skb->data; unsigned char buffer[128]; - unsigned int i, j; + unsigned int i; netdev_dbg(netdev, "\n************** SKB dump ****************\n"); @@ -2943,22 +2942,13 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx) netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source); netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto)); - for (i = 0, j = 0; i < skb->len;) { - j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx", - buf[i++]); - - if ((i % 32) == 0) { - netdev_dbg(netdev, " %#06x: %s\n", i - 32, buffer); - j = 0; - } else if ((i % 16) == 0) { - buffer[j++] = ' '; - buffer[j++] = ' '; - } else if ((i % 4) == 0) { - buffer[j++] = ' '; - } + for (i = 0; i < skb->len; i += 32) { + unsigned int len = min(skb->len - i, 32U); + + hex_dump_to_buffer(&skb->data[i], len, 32, 1, + buffer, sizeof(buffer), false); + netdev_dbg(netdev, " %#06x: %s\n", i, buffer); } - if (i % 32) - netdev_dbg(netdev, " %#06x: %s\n", i - (i % 32), buffer); netdev_dbg(netdev, "\n************** SKB dump ****************\n"); } diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile index e4ae696920ef..686f6d8c9e79 100644 --- a/drivers/net/ethernet/aquantia/atlantic/Makefile +++ b/drivers/net/ethernet/aquantia/atlantic/Makefile @@ -39,4 +39,5 @@ atlantic-objs := aq_main.o \ hw_atl/hw_atl_a0.o \ hw_atl/hw_atl_b0.o \ hw_atl/hw_atl_utils.o \ + hw_atl/hw_atl_utils_fw2x.o \ hw_atl/hw_atl_llh.o diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h index 105fdb958cef..0b49f1aeebd3 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h @@ -65,7 +65,13 @@ /*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/ -#define AQ_CFG_FC_MODE 3U +#define AQ_NIC_FC_OFF 0U +#define AQ_NIC_FC_TX 1U +#define AQ_NIC_FC_RX 2U +#define AQ_NIC_FC_FULL 3U +#define AQ_NIC_FC_AUTO 4U + +#define AQ_CFG_FC_MODE AQ_NIC_FC_FULL #define AQ_CFG_SPEED_MSK 0xFFFFU /* 0xFFFFU==auto_neg */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h index 9eb5e222a234..d52b088ff8f0 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h @@ -16,8 +16,45 @@ #include <linux/pci.h> #include "ver.h" -#include "aq_nic.h" #include "aq_cfg.h" #include "aq_utils.h" +#define PCI_VENDOR_ID_AQUANTIA 0x1D6A + +#define AQ_DEVICE_ID_0001 0x0001 +#define AQ_DEVICE_ID_D100 0xD100 +#define AQ_DEVICE_ID_D107 0xD107 +#define AQ_DEVICE_ID_D108 0xD108 +#define AQ_DEVICE_ID_D109 0xD109 + +#define AQ_DEVICE_ID_AQC100 0x00B1 +#define AQ_DEVICE_ID_AQC107 0x07B1 +#define AQ_DEVICE_ID_AQC108 0x08B1 +#define AQ_DEVICE_ID_AQC109 0x09B1 +#define AQ_DEVICE_ID_AQC111 0x11B1 +#define AQ_DEVICE_ID_AQC112 0x12B1 + +#define AQ_DEVICE_ID_AQC100S 0x80B1 +#define AQ_DEVICE_ID_AQC107S 0x87B1 +#define AQ_DEVICE_ID_AQC108S 0x88B1 +#define AQ_DEVICE_ID_AQC109S 0x89B1 +#define AQ_DEVICE_ID_AQC111S 0x91B1 +#define AQ_DEVICE_ID_AQC112S 0x92B1 + +#define AQ_DEVICE_ID_AQC111E 0x51B1 +#define AQ_DEVICE_ID_AQC112E 0x52B1 + +#define HW_ATL_NIC_NAME "aQuantia AQtion 10Gbit Network Adapter" + +#define AQ_HWREV_ANY 0 +#define AQ_HWREV_1 1 +#define AQ_HWREV_2 2 + +#define AQ_NIC_RATE_10G BIT(0) +#define AQ_NIC_RATE_5G BIT(1) +#define AQ_NIC_RATE_5GSR BIT(2) +#define AQ_NIC_RATE_2GS BIT(3) +#define AQ_NIC_RATE_1G BIT(4) +#define AQ_NIC_RATE_100M BIT(5) + #endif /* AQ_COMMON_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index b3825de6cdfb..a2d416b24ffc 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -7,7 +7,7 @@ * version 2, as published by the Free Software Foundation. */ -/* File aq_hw.h: Declaraion of abstract interface for NIC hardware specific +/* File aq_hw.h: Declaration of abstract interface for NIC hardware specific * functions. */ @@ -15,12 +15,15 @@ #define AQ_HW_H #include "aq_common.h" +#include "aq_rss.h" +#include "hw_atl/hw_atl_utils.h" /* NIC H/W capabilities */ struct aq_hw_caps_s { u64 hw_features; u64 link_speed_msk; unsigned int hw_priv_flags; + u32 media_type; u32 rxds; u32 txds; u32 txhwb_alignment; @@ -28,7 +31,7 @@ struct aq_hw_caps_s { u32 vecs; u32 mtu; u32 mac_regs_count; - u8 ports; + u32 hw_alive_check_addr; u8 msix_irqs; u8 tcs; u8 rxd_alignment; @@ -39,7 +42,6 @@ struct aq_hw_caps_s { u8 rx_rings; bool flow_control; bool is_64_dma; - u32 fw_ver_expected; }; struct aq_hw_link_status_s { @@ -86,30 +88,43 @@ struct aq_stats_s { #define AQ_HW_FLAG_ERRORS (AQ_HW_FLAG_ERR_HW | AQ_HW_FLAG_ERR_UNPLUG) +#define AQ_NIC_FLAGS_IS_NOT_READY (AQ_NIC_FLAG_STOPPING | \ + AQ_NIC_FLAG_RESETTING | AQ_NIC_FLAG_CLOSING | \ + AQ_NIC_FLAG_ERR_UNPLUG | AQ_NIC_FLAG_ERR_HW) + +#define AQ_NIC_FLAGS_IS_NOT_TX_READY (AQ_NIC_FLAGS_IS_NOT_READY | \ + AQ_NIC_LINK_DOWN) + +#define AQ_HW_MEDIA_TYPE_TP 1U +#define AQ_HW_MEDIA_TYPE_FIBRE 2U + struct aq_hw_s { - struct aq_obj_s header; + atomic_t flags; + u8 rbl_enabled:1; struct aq_nic_cfg_s *aq_nic_cfg; - struct aq_pci_func_s *aq_pci_func; + const struct aq_fw_ops *aq_fw_ops; void __iomem *mmio; - unsigned int not_ff_addr; struct aq_hw_link_status_s aq_link_status; + struct hw_aq_atl_utils_mbox mbox; + struct hw_atl_stats_s last_stats; + struct aq_stats_s curr_stats; + u64 speed; + u32 itr_tx; + u32 itr_rx; + unsigned int chip_features; + u32 fw_ver_actual; + atomic_t dpc; + u32 mbox_addr; + u32 rpc_addr; + u32 rpc_tid; + struct hw_aq_atl_utils_fw_rpc rpc; }; struct aq_ring_s; struct aq_ring_param_s; -struct aq_nic_cfg_s; struct sk_buff; struct aq_hw_ops { - struct aq_hw_s *(*create)(struct aq_pci_func_s *aq_pci_func, - unsigned int port, struct aq_hw_ops *ops); - - void (*destroy)(struct aq_hw_s *self); - - int (*get_hw_caps)(struct aq_hw_s *self, - struct aq_hw_caps_s *aq_hw_caps, - unsigned short device, - unsigned short subsystem_device); int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring, unsigned int frags); @@ -123,20 +138,11 @@ struct aq_hw_ops { int (*hw_ring_tx_head_update)(struct aq_hw_s *self, struct aq_ring_s *aq_ring); - int (*hw_get_mac_permanent)(struct aq_hw_s *self, - struct aq_hw_caps_s *aq_hw_caps, - u8 *mac); - int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr); - int (*hw_get_link_status)(struct aq_hw_s *self); - - int (*hw_set_link_speed)(struct aq_hw_s *self, u32 speed); - int (*hw_reset)(struct aq_hw_s *self); - int (*hw_init)(struct aq_hw_s *self, struct aq_nic_cfg_s *aq_nic_cfg, - u8 *mac_addr); + int (*hw_init)(struct aq_hw_s *self, u8 *mac_addr); int (*hw_start)(struct aq_hw_s *self); @@ -184,9 +190,8 @@ struct aq_hw_ops { struct aq_rss_parameters *rss_params); int (*hw_get_regs)(struct aq_hw_s *self, - struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff); - - int (*hw_update_stats)(struct aq_hw_s *self); + const struct aq_hw_caps_s *aq_hw_caps, + u32 *regs_buff); struct aq_stats_s *(*hw_get_hw_stats)(struct aq_hw_s *self); @@ -197,4 +202,20 @@ struct aq_hw_ops { int (*hw_set_power)(struct aq_hw_s *self, unsigned int power_state); }; +struct aq_fw_ops { + int (*init)(struct aq_hw_s *self); + + int (*reset)(struct aq_hw_s *self); + + int (*get_mac_permanent)(struct aq_hw_s *self, u8 *mac); + + int (*set_link_speed)(struct aq_hw_s *self, u32 speed); + + int (*set_state)(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state); + + int (*update_link_status)(struct aq_hw_s *self); + + int (*update_stats)(struct aq_hw_s *self); +}; + #endif /* AQ_HW_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c index 5f13465995f6..d526c4f19d34 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c @@ -13,6 +13,7 @@ #include "aq_hw_utils.h" #include "aq_hw.h" +#include "aq_nic.h" void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift, u32 val) @@ -39,8 +40,10 @@ u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg) { u32 value = readl(hw->mmio + reg); - if ((~0U) == value && (~0U) == readl(hw->mmio + hw->not_ff_addr)) - aq_utils_obj_set(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG); + if ((~0U) == value && + (~0U) == readl(hw->mmio + + hw->aq_nic_cfg->aq_hw_caps->hw_alive_check_addr)) + aq_utils_obj_set(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG); return value; } @@ -54,11 +57,11 @@ int aq_hw_err_from_flags(struct aq_hw_s *hw) { int err = 0; - if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) { + if (aq_utils_obj_test(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG)) { err = -ENXIO; goto err_exit; } - if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_HW)) { + if (aq_utils_obj_test(&hw->flags, AQ_HW_FLAG_ERR_HW)) { err = -EIO; goto err_exit; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h index 03b72ddbffb9..dc88a1221f1d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h @@ -35,6 +35,9 @@ do { \ } \ } while (0) +#define aq_pr_err(...) pr_err(AQ_CFG_DRV_NAME ": " __VA_ARGS__) +#define aq_pr_trace(...) pr_info(AQ_CFG_DRV_NAME ": " __VA_ARGS__) + struct aq_hw_s; void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index 5d6c40d86775..ba5fe8c4125d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -13,49 +13,39 @@ #include "aq_nic.h" #include "aq_pci_func.h" #include "aq_ethtool.h" -#include "hw_atl/hw_atl_a0.h" -#include "hw_atl/hw_atl_b0.h" #include <linux/netdevice.h> #include <linux/module.h> -static const struct pci_device_id aq_pci_tbl[] = { - { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_0001), }, - { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D100), }, - { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D107), }, - { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D108), }, - { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D109), }, - {} -}; - -MODULE_DEVICE_TABLE(pci, aq_pci_tbl); - MODULE_LICENSE("GPL v2"); MODULE_VERSION(AQ_CFG_DRV_VERSION); MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR); MODULE_DESCRIPTION(AQ_CFG_DRV_DESC); -static struct aq_hw_ops *aq_pci_probe_get_hw_ops_by_id(struct pci_dev *pdev) +static const struct net_device_ops aq_ndev_ops; + +struct net_device *aq_ndev_alloc(void) { - struct aq_hw_ops *ops = NULL; + struct net_device *ndev = NULL; + struct aq_nic_s *aq_nic = NULL; + + ndev = alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX); + if (!ndev) + return NULL; - ops = hw_atl_a0_get_ops_by_id(pdev); - if (!ops) - ops = hw_atl_b0_get_ops_by_id(pdev); + aq_nic = netdev_priv(ndev); + aq_nic->ndev = ndev; + ndev->netdev_ops = &aq_ndev_ops; + ndev->ethtool_ops = &aq_ethtool_ops; - return ops; + return ndev; } static int aq_ndev_open(struct net_device *ndev) { - struct aq_nic_s *aq_nic = NULL; int err = 0; + struct aq_nic_s *aq_nic = netdev_priv(ndev); - aq_nic = aq_nic_alloc_hot(ndev); - if (!aq_nic) { - err = -ENOMEM; - goto err_exit; - } err = aq_nic_init(aq_nic); if (err < 0) goto err_exit; @@ -78,7 +68,6 @@ static int aq_ndev_close(struct net_device *ndev) if (err < 0) goto err_exit; aq_nic_deinit(aq_nic); - aq_nic_free_hot_resources(aq_nic); err_exit: return err; @@ -150,15 +139,13 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev) err = aq_nic_set_packet_filter(aq_nic, ndev->flags); if (err < 0) - goto err_exit; + return; if (netdev_mc_count(ndev)) { err = aq_nic_set_multicast_list(aq_nic, ndev); if (err < 0) - goto err_exit; + return; } - -err_exit:; } static const struct net_device_ops aq_ndev_ops = { @@ -170,66 +157,3 @@ static const struct net_device_ops aq_ndev_ops = { .ndo_set_mac_address = aq_ndev_set_mac_address, .ndo_set_features = aq_ndev_set_features }; - -static int aq_pci_probe(struct pci_dev *pdev, - const struct pci_device_id *pci_id) -{ - struct aq_hw_ops *aq_hw_ops = NULL; - struct aq_pci_func_s *aq_pci_func = NULL; - int err = 0; - - err = pci_enable_device(pdev); - if (err < 0) - goto err_exit; - aq_hw_ops = aq_pci_probe_get_hw_ops_by_id(pdev); - aq_pci_func = aq_pci_func_alloc(aq_hw_ops, pdev, - &aq_ndev_ops, &aq_ethtool_ops); - if (!aq_pci_func) { - err = -ENOMEM; - goto err_exit; - } - err = aq_pci_func_init(aq_pci_func); - if (err < 0) - goto err_exit; - -err_exit: - if (err < 0) { - if (aq_pci_func) - aq_pci_func_free(aq_pci_func); - } - return err; -} - -static void aq_pci_remove(struct pci_dev *pdev) -{ - struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev); - - aq_pci_func_deinit(aq_pci_func); - aq_pci_func_free(aq_pci_func); -} - -static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg) -{ - struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev); - - return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg); -} - -static int aq_pci_resume(struct pci_dev *pdev) -{ - struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev); - pm_message_t pm_msg = PMSG_RESTORE; - - return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg); -} - -static struct pci_driver aq_pci_ops = { - .name = AQ_CFG_DRV_NAME, - .id_table = aq_pci_tbl, - .probe = aq_pci_probe, - .remove = aq_pci_remove, - .suspend = aq_pci_suspend, - .resume = aq_pci_resume, -}; - -module_pci_driver(aq_pci_ops); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h index 9748e7e575e0..ce92152eb43e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.h @@ -14,4 +14,6 @@ #include "aq_common.h" +struct net_device *aq_ndev_alloc(void); + #endif /* AQ_MAIN_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 75a894a9251c..ebbaf63eaf47 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -14,7 +14,6 @@ #include "aq_vec.h" #include "aq_hw.h" #include "aq_pci_func.h" -#include "aq_nic_internal.h" #include <linux/moduleparam.h> #include <linux/netdevice.h> @@ -61,19 +60,13 @@ static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues) rss_params->indirection_table[i] = i & (num_rss_queues - 1); } -/* Fills aq_nic_cfg with valid defaults */ -static void aq_nic_cfg_init_defaults(struct aq_nic_s *self) +/* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */ +void aq_nic_cfg_start(struct aq_nic_s *self) { struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; - cfg->aq_hw_caps = &self->aq_hw_caps; - - cfg->vecs = AQ_CFG_VECS_DEF; cfg->tcs = AQ_CFG_TCS_DEF; - cfg->rxds = AQ_CFG_RXDS_DEF; - cfg->txds = AQ_CFG_TXDS_DEF; - cfg->is_polling = AQ_CFG_IS_POLLING_DEF; cfg->itr = aq_itr; @@ -94,19 +87,13 @@ static void aq_nic_cfg_init_defaults(struct aq_nic_s *self) cfg->vlan_id = 0U; aq_nic_rss_init(self, cfg->num_rss_queues); -} - -/* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */ -int aq_nic_cfg_start(struct aq_nic_s *self) -{ - struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; /*descriptors */ - cfg->rxds = min(cfg->rxds, cfg->aq_hw_caps->rxds); - cfg->txds = min(cfg->txds, cfg->aq_hw_caps->txds); + cfg->rxds = min(cfg->aq_hw_caps->rxds, AQ_CFG_RXDS_DEF); + cfg->txds = min(cfg->aq_hw_caps->txds, AQ_CFG_TXDS_DEF); /*rss rings */ - cfg->vecs = min(cfg->vecs, cfg->aq_hw_caps->vecs); + cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF); cfg->vecs = min(cfg->vecs, num_online_cpus()); /* cfg->vecs should be power of 2 for RSS */ if (cfg->vecs >= 8U) @@ -120,23 +107,22 @@ int aq_nic_cfg_start(struct aq_nic_s *self) cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF); - cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func); + cfg->irq_type = aq_pci_func_get_irq_type(self); if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) || - (self->aq_hw_caps.vecs == 1U) || + (cfg->aq_hw_caps->vecs == 1U) || (cfg->vecs == 1U)) { cfg->is_rss = 0U; cfg->vecs = 1U; } - cfg->link_speed_msk &= self->aq_hw_caps.link_speed_msk; - cfg->hw_features = self->aq_hw_caps.hw_features; - return 0; + cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk; + cfg->hw_features = cfg->aq_hw_caps->hw_features; } static int aq_nic_update_link_status(struct aq_nic_s *self) { - int err = self->aq_hw_ops.hw_get_link_status(self->aq_hw); + int err = self->aq_fw_ops->update_link_status(self->aq_hw); if (err) return err; @@ -150,9 +136,9 @@ static int aq_nic_update_link_status(struct aq_nic_s *self) self->link_status = self->aq_hw->aq_link_status; if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) { - aq_utils_obj_set(&self->header.flags, + aq_utils_obj_set(&self->flags, AQ_NIC_FLAG_STARTED); - aq_utils_obj_clear(&self->header.flags, + aq_utils_obj_clear(&self->flags, AQ_NIC_LINK_DOWN); netif_carrier_on(self->ndev); netif_tx_wake_all_queues(self->ndev); @@ -160,7 +146,7 @@ static int aq_nic_update_link_status(struct aq_nic_s *self) if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) { netif_carrier_off(self->ndev); netif_tx_disable(self->ndev); - aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN); + aq_utils_obj_set(&self->flags, AQ_NIC_LINK_DOWN); } return 0; } @@ -171,15 +157,15 @@ static void aq_nic_service_timer_cb(struct timer_list *t) int ctimer = AQ_CFG_SERVICE_TIMER_INTERVAL; int err = 0; - if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) + if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY)) goto err_exit; err = aq_nic_update_link_status(self); if (err) goto err_exit; - if (self->aq_hw_ops.hw_update_stats) - self->aq_hw_ops.hw_update_stats(self->aq_hw); + if (self->aq_fw_ops->update_stats) + self->aq_fw_ops->update_stats(self->aq_hw); aq_nic_update_ndev_stats(self); @@ -205,60 +191,6 @@ static void aq_nic_polling_timer_cb(struct timer_list *t) AQ_CFG_POLLING_TIMER_INTERVAL); } -static struct net_device *aq_nic_ndev_alloc(void) -{ - return alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX); -} - -struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, - const struct ethtool_ops *et_ops, - struct pci_dev *pdev, - struct aq_pci_func_s *aq_pci_func, - unsigned int port, - const struct aq_hw_ops *aq_hw_ops) -{ - struct net_device *ndev = NULL; - struct aq_nic_s *self = NULL; - int err = 0; - - ndev = aq_nic_ndev_alloc(); - if (!ndev) { - err = -ENOMEM; - goto err_exit; - } - - self = netdev_priv(ndev); - - ndev->netdev_ops = ndev_ops; - ndev->ethtool_ops = et_ops; - - SET_NETDEV_DEV(ndev, &pdev->dev); - - ndev->if_port = port; - self->ndev = ndev; - - self->aq_pci_func = aq_pci_func; - - self->aq_hw_ops = *aq_hw_ops; - self->port = (u8)port; - - self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port, - &self->aq_hw_ops); - err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps, - pdev->device, pdev->subsystem_device); - if (err < 0) - goto err_exit; - - aq_nic_cfg_init_defaults(self); - -err_exit: - if (err < 0) { - aq_nic_free_hot_resources(self); - self = NULL; - } - return self; -} - int aq_nic_ndev_register(struct aq_nic_s *self) { int err = 0; @@ -267,10 +199,14 @@ int aq_nic_ndev_register(struct aq_nic_s *self) err = -EINVAL; goto err_exit; } - err = self->aq_hw_ops.hw_get_mac_permanent(self->aq_hw, - self->aq_nic_cfg.aq_hw_caps, + + err = hw_atl_utils_initfw(self->aq_hw, &self->aq_fw_ops); + if (err) + goto err_exit; + + err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, self->ndev->dev_addr); - if (err < 0) + if (err) goto err_exit; #if defined(AQ_CFG_MAC_ADDR_PERMANENT) @@ -281,83 +217,39 @@ int aq_nic_ndev_register(struct aq_nic_s *self) } #endif + for (self->aq_vecs = 0; self->aq_vecs < aq_nic_get_cfg(self)->vecs; + self->aq_vecs++) { + self->aq_vec[self->aq_vecs] = + aq_vec_alloc(self, self->aq_vecs, aq_nic_get_cfg(self)); + if (!self->aq_vec[self->aq_vecs]) { + err = -ENOMEM; + goto err_exit; + } + } + netif_carrier_off(self->ndev); netif_tx_disable(self->ndev); err = register_netdev(self->ndev); - if (err < 0) + if (err) goto err_exit; err_exit: return err; } -int aq_nic_ndev_init(struct aq_nic_s *self) +void aq_nic_ndev_init(struct aq_nic_s *self) { - struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps; + const struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps; struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg; self->ndev->hw_features |= aq_hw_caps->hw_features; self->ndev->features = aq_hw_caps->hw_features; self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN; - self->ndev->max_mtu = self->aq_hw_caps.mtu - ETH_FCS_LEN - ETH_HLEN; - - return 0; -} - -void aq_nic_ndev_free(struct aq_nic_s *self) -{ - if (!self->ndev) - goto err_exit; - - if (self->ndev->reg_state == NETREG_REGISTERED) - unregister_netdev(self->ndev); - - if (self->aq_hw) - self->aq_hw_ops.destroy(self->aq_hw); - - free_netdev(self->ndev); - -err_exit:; -} - -struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev) -{ - struct aq_nic_s *self = NULL; - int err = 0; - - if (!ndev) { - err = -EINVAL; - goto err_exit; - } - self = netdev_priv(ndev); - - if (!self) { - err = -EINVAL; - goto err_exit; - } - if (netif_running(ndev)) - netif_tx_disable(ndev); - netif_carrier_off(self->ndev); - - for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs; - self->aq_vecs++) { - self->aq_vec[self->aq_vecs] = - aq_vec_alloc(self, self->aq_vecs, &self->aq_nic_cfg); - if (!self->aq_vec[self->aq_vecs]) { - err = -ENOMEM; - goto err_exit; - } - } + self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN; -err_exit: - if (err < 0) { - aq_nic_free_hot_resources(self); - self = NULL; - } - return self; } void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx, @@ -366,11 +258,6 @@ void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx, self->aq_ring_tx[idx] = ring; } -struct device *aq_nic_get_dev(struct aq_nic_s *self) -{ - return self->ndev->dev.parent; -} - struct net_device *aq_nic_get_ndev(struct aq_nic_s *self) { return self->ndev; @@ -383,18 +270,20 @@ int aq_nic_init(struct aq_nic_s *self) unsigned int i = 0U; self->power_state = AQ_HW_POWER_STATE_D0; - err = self->aq_hw_ops.hw_reset(self->aq_hw); + err = self->aq_hw_ops->hw_reset(self->aq_hw); if (err < 0) goto err_exit; - err = self->aq_hw_ops.hw_init(self->aq_hw, &self->aq_nic_cfg, - aq_nic_get_ndev(self)->dev_addr); + err = self->aq_hw_ops->hw_init(self->aq_hw, + aq_nic_get_ndev(self)->dev_addr); if (err < 0) goto err_exit; for (i = 0U, aq_vec = self->aq_vec[0]; self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) - aq_vec_init(aq_vec, &self->aq_hw_ops, self->aq_hw); + aq_vec_init(aq_vec, self->aq_hw_ops, self->aq_hw); + + netif_carrier_off(self->ndev); err_exit: return err; @@ -406,13 +295,13 @@ int aq_nic_start(struct aq_nic_s *self) int err = 0; unsigned int i = 0U; - err = self->aq_hw_ops.hw_multicast_list_set(self->aq_hw, + err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw, self->mc_list.ar, self->mc_list.count); if (err < 0) goto err_exit; - err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw, + err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, self->packet_filter); if (err < 0) goto err_exit; @@ -424,7 +313,7 @@ int aq_nic_start(struct aq_nic_s *self) goto err_exit; } - err = self->aq_hw_ops.hw_start(self->aq_hw); + err = self->aq_hw_ops->hw_start(self->aq_hw); if (err < 0) goto err_exit; @@ -442,14 +331,14 @@ int aq_nic_start(struct aq_nic_s *self) } else { for (i = 0U, aq_vec = self->aq_vec[0]; self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { - err = aq_pci_func_alloc_irq(self->aq_pci_func, i, + err = aq_pci_func_alloc_irq(self, i, self->ndev->name, aq_vec, - aq_vec_get_affinity_mask(aq_vec)); + aq_vec_get_affinity_mask(aq_vec)); if (err < 0) goto err_exit; } - err = self->aq_hw_ops.hw_irq_enable(self->aq_hw, + err = self->aq_hw_ops->hw_irq_enable(self->aq_hw, AQ_CFG_IRQ_MASK); if (err < 0) goto err_exit; @@ -634,9 +523,8 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) frags = aq_nic_map_skb(self, skb, ring); if (likely(frags)) { - err = self->aq_hw_ops.hw_ring_tx_xmit(self->aq_hw, - ring, - frags); + err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw, + ring, frags); if (err >= 0) { ++ring->stats.tx.packets; ring->stats.tx.bytes += skb->len; @@ -651,14 +539,14 @@ err_exit: int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self) { - return self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw); + return self->aq_hw_ops->hw_interrupt_moderation_set(self->aq_hw); } int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags) { int err = 0; - err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw, flags); + err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, flags); if (err < 0) goto err_exit; @@ -690,11 +578,11 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) * multicast mask */ self->packet_filter |= IFF_ALLMULTI; - self->aq_hw->aq_nic_cfg->mc_list_count = 0; - return self->aq_hw_ops.hw_packet_filter_set(self->aq_hw, - self->packet_filter); + self->aq_nic_cfg.mc_list_count = 0; + return self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, + self->packet_filter); } else { - return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw, + return self->aq_hw_ops->hw_multicast_list_set(self->aq_hw, self->mc_list.ar, self->mc_list.count); } @@ -709,7 +597,7 @@ int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev) { - return self->aq_hw_ops.hw_set_mac_address(self->aq_hw, ndev->dev_addr); + return self->aq_hw_ops->hw_set_mac_address(self->aq_hw, ndev->dev_addr); } unsigned int aq_nic_get_link_speed(struct aq_nic_s *self) @@ -724,8 +612,9 @@ int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p) regs->version = 1; - err = self->aq_hw_ops.hw_get_regs(self->aq_hw, - &self->aq_hw_caps, regs_buff); + err = self->aq_hw_ops->hw_get_regs(self->aq_hw, + self->aq_nic_cfg.aq_hw_caps, + regs_buff); if (err < 0) goto err_exit; @@ -735,7 +624,7 @@ err_exit: int aq_nic_get_regs_count(struct aq_nic_s *self) { - return self->aq_hw_caps.mac_regs_count; + return self->aq_nic_cfg.aq_hw_caps->mac_regs_count; } void aq_nic_get_stats(struct aq_nic_s *self, u64 *data) @@ -743,7 +632,7 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data) unsigned int i = 0U; unsigned int count = 0U; struct aq_vec_s *aq_vec = NULL; - struct aq_stats_s *stats = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw); + struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw); if (!stats) goto err_exit; @@ -774,7 +663,6 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data) i++; data += i; - count = 0U; for (i = 0U, aq_vec = self->aq_vec[0]; aq_vec && self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { @@ -788,7 +676,7 @@ err_exit:; static void aq_nic_update_ndev_stats(struct aq_nic_s *self) { struct net_device *ndev = self->ndev; - struct aq_stats_s *stats = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw); + struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw); ndev->stats.rx_packets = stats->uprc + stats->mprc + stats->bprc; ndev->stats.rx_bytes = stats->ubrc + stats->mbrc + stats->bbrc; @@ -802,39 +690,46 @@ static void aq_nic_update_ndev_stats(struct aq_nic_s *self) void aq_nic_get_link_ksettings(struct aq_nic_s *self, struct ethtool_link_ksettings *cmd) { - cmd->base.port = PORT_TP; + if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE) + cmd->base.port = PORT_FIBRE; + else + cmd->base.port = PORT_TP; /* This driver supports only 10G capable adapters, so DUPLEX_FULL */ cmd->base.duplex = DUPLEX_FULL; cmd->base.autoneg = self->aq_nic_cfg.is_autoneg; ethtool_link_ksettings_zero_link_mode(cmd, supported); - if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_10G) + if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10G) ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full); - if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_5G) + if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_5G) ethtool_link_ksettings_add_link_mode(cmd, supported, 5000baseT_Full); - if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_2GS) + if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2GS) ethtool_link_ksettings_add_link_mode(cmd, supported, 2500baseT_Full); - if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_1G) + if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_1G) ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); - if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_100M) + if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M) ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full); - if (self->aq_hw_caps.flow_control) + if (self->aq_nic_cfg.aq_hw_caps->flow_control) ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); - ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + + if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE) + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + else + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); ethtool_link_ksettings_zero_link_mode(cmd, advertising); @@ -865,7 +760,10 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self, ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); - ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE) + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + else + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); } int aq_nic_set_link_ksettings(struct aq_nic_s *self, @@ -876,7 +774,7 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self, int err = 0; if (cmd->base.autoneg == AUTONEG_ENABLE) { - rate = self->aq_hw_caps.link_speed_msk; + rate = self->aq_nic_cfg.aq_hw_caps->link_speed_msk; self->aq_nic_cfg.is_autoneg = true; } else { speed = cmd->base.speed; @@ -907,7 +805,7 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self, goto err_exit; break; } - if (!(self->aq_hw_caps.link_speed_msk & rate)) { + if (!(self->aq_nic_cfg.aq_hw_caps->link_speed_msk & rate)) { err = -1; goto err_exit; } @@ -915,7 +813,7 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self, self->aq_nic_cfg.is_autoneg = false; } - err = self->aq_hw_ops.hw_set_link_speed(self->aq_hw, rate); + err = self->aq_fw_ops->set_link_speed(self->aq_hw, rate); if (err < 0) goto err_exit; @@ -934,7 +832,7 @@ u32 aq_nic_get_fw_version(struct aq_nic_s *self) { u32 fw_version = 0U; - self->aq_hw_ops.hw_get_fw_version(self->aq_hw, &fw_version); + self->aq_hw_ops->hw_get_fw_version(self->aq_hw, &fw_version); return fw_version; } @@ -949,18 +847,18 @@ int aq_nic_stop(struct aq_nic_s *self) del_timer_sync(&self->service_timer); - self->aq_hw_ops.hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK); + self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK); if (self->aq_nic_cfg.is_polling) del_timer_sync(&self->polling_timer); else - aq_pci_func_free_irqs(self->aq_pci_func); + aq_pci_func_free_irqs(self); for (i = 0U, aq_vec = self->aq_vec[0]; self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) aq_vec_stop(aq_vec); - return self->aq_hw_ops.hw_stop(self->aq_hw); + return self->aq_hw_ops->hw_stop(self->aq_hw); } void aq_nic_deinit(struct aq_nic_s *self) @@ -976,23 +874,23 @@ void aq_nic_deinit(struct aq_nic_s *self) aq_vec_deinit(aq_vec); if (self->power_state == AQ_HW_POWER_STATE_D0) { - (void)self->aq_hw_ops.hw_deinit(self->aq_hw); + (void)self->aq_hw_ops->hw_deinit(self->aq_hw); } else { - (void)self->aq_hw_ops.hw_set_power(self->aq_hw, + (void)self->aq_hw_ops->hw_set_power(self->aq_hw, self->power_state); } err_exit:; } -void aq_nic_free_hot_resources(struct aq_nic_s *self) +void aq_nic_free_vectors(struct aq_nic_s *self) { unsigned int i = 0U; if (!self) goto err_exit; - for (i = AQ_DIMOF(self->aq_vec); i--;) { + for (i = ARRAY_SIZE(self->aq_vec); i--;) { if (self->aq_vec[i]) { aq_vec_free(self->aq_vec[i]); self->aq_vec[i] = NULL; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 3c9f8db03d5f..d16b0f1a95aa 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -14,26 +14,15 @@ #include "aq_common.h" #include "aq_rss.h" +#include "aq_hw.h" struct aq_ring_s; -struct aq_pci_func_s; struct aq_hw_ops; - -#define AQ_NIC_FC_OFF 0U -#define AQ_NIC_FC_TX 1U -#define AQ_NIC_FC_RX 2U -#define AQ_NIC_FC_FULL 3U -#define AQ_NIC_FC_AUTO 4U - -#define AQ_NIC_RATE_10G BIT(0) -#define AQ_NIC_RATE_5G BIT(1) -#define AQ_NIC_RATE_5GSR BIT(2) -#define AQ_NIC_RATE_2GS BIT(3) -#define AQ_NIC_RATE_1G BIT(4) -#define AQ_NIC_RATE_100M BIT(5) +struct aq_fw_s; +struct aq_vec_s; struct aq_nic_cfg_s { - struct aq_hw_caps_s *aq_hw_caps; + const struct aq_hw_caps_s *aq_hw_caps; u64 hw_features; u32 rxds; /* rx ring size, descriptors # */ u32 txds; /* tx ring size, descriptors # */ @@ -44,7 +33,6 @@ struct aq_nic_cfg_s { u16 tx_itr; u32 num_rss_queues; u32 mtu; - u32 ucp_0x364; u32 flow_control; u32 link_speed_msk; u32 vlan_id; @@ -69,20 +57,43 @@ struct aq_nic_cfg_s { #define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \ ((_TC_) * AQ_CFG_TCS_MAX + (_VEC_)) -struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, - const struct ethtool_ops *et_ops, - struct pci_dev *pdev, - struct aq_pci_func_s *aq_pci_func, - unsigned int port, - const struct aq_hw_ops *aq_hw_ops); -int aq_nic_ndev_init(struct aq_nic_s *self); +struct aq_nic_s { + atomic_t flags; + struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX]; + struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX]; + struct aq_hw_s *aq_hw; + struct net_device *ndev; + unsigned int aq_vecs; + unsigned int packet_filter; + unsigned int power_state; + u8 port; + const struct aq_hw_ops *aq_hw_ops; + const struct aq_fw_ops *aq_fw_ops; + struct aq_nic_cfg_s aq_nic_cfg; + struct timer_list service_timer; + struct timer_list polling_timer; + struct aq_hw_link_status_s link_status; + struct { + u32 count; + u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN]; + } mc_list; + + struct pci_dev *pdev; + unsigned int msix_entry_mask; +}; + +static inline struct device *aq_nic_get_dev(struct aq_nic_s *self) +{ + return self->ndev->dev.parent; +} + +void aq_nic_ndev_init(struct aq_nic_s *self); struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev); void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx, struct aq_ring_s *ring); -struct device *aq_nic_get_dev(struct aq_nic_s *self); struct net_device *aq_nic_get_ndev(struct aq_nic_s *self); int aq_nic_init(struct aq_nic_s *self); -int aq_nic_cfg_start(struct aq_nic_s *self); +void aq_nic_cfg_start(struct aq_nic_s *self); int aq_nic_ndev_register(struct aq_nic_s *self); void aq_nic_ndev_free(struct aq_nic_s *self); int aq_nic_start(struct aq_nic_s *self); @@ -93,6 +104,7 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data); int aq_nic_stop(struct aq_nic_s *self); void aq_nic_deinit(struct aq_nic_s *self); void aq_nic_free_hot_resources(struct aq_nic_s *self); +void aq_nic_free_vectors(struct aq_nic_s *self); int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu); int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev); int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h deleted file mode 100644 index e7d2711dc165..000000000000 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * aQuantia Corporation Network Driver - * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - */ - -/* File aq_nic_internal.h: Definition of private object structure. */ - -#ifndef AQ_NIC_INTERNAL_H -#define AQ_NIC_INTERNAL_H - -struct aq_nic_s { - struct aq_obj_s header; - struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX]; - struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX]; - struct aq_hw_s *aq_hw; - struct net_device *ndev; - struct aq_pci_func_s *aq_pci_func; - unsigned int aq_vecs; - unsigned int packet_filter; - unsigned int power_state; - u8 port; - struct aq_hw_ops aq_hw_ops; - struct aq_hw_caps_s aq_hw_caps; - struct aq_nic_cfg_s aq_nic_cfg; - struct timer_list service_timer; - struct timer_list polling_timer; - struct aq_hw_link_status_s link_status; - struct { - u32 count; - u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN]; - } mc_list; -}; - -#define AQ_NIC_FLAGS_IS_NOT_READY (AQ_NIC_FLAG_STOPPING | \ - AQ_NIC_FLAG_RESETTING | AQ_NIC_FLAG_CLOSING | \ - AQ_NIC_FLAG_ERR_UNPLUG | AQ_NIC_FLAG_ERR_HW) - -#define AQ_NIC_FLAGS_IS_NOT_TX_READY (AQ_NIC_FLAGS_IS_NOT_READY | \ - AQ_NIC_LINK_DOWN) - -#endif /* AQ_NIC_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index 58c29d04b186..22889fc158f2 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -9,181 +9,135 @@ /* File aq_pci_func.c: Definition of PCI functions. */ -#include "aq_pci_func.h" +#include <linux/interrupt.h> +#include <linux/module.h> + +#include "aq_main.h" #include "aq_nic.h" #include "aq_vec.h" #include "aq_hw.h" -#include <linux/interrupt.h> - -struct aq_pci_func_s { - struct pci_dev *pdev; - struct aq_nic_s *port[AQ_CFG_PCI_FUNC_PORTS]; - void __iomem *mmio; - void *aq_vec[AQ_CFG_PCI_FUNC_MSIX_IRQS]; - resource_size_t mmio_pa; - unsigned int msix_entry_mask; - unsigned int ports; - bool is_pci_enabled; - bool is_regions; - bool is_pci_using_dac; - struct aq_hw_caps_s aq_hw_caps; +#include "aq_pci_func.h" +#include "hw_atl/hw_atl_a0.h" +#include "hw_atl/hw_atl_b0.h" + +static const struct pci_device_id aq_pci_tbl[] = { + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D100), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D107), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D108), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D109), }, + + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112), }, + + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100S), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107S), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108S), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109S), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111S), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112S), }, + + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111E), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112E), }, + + {} }; -struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops, - struct pci_dev *pdev, - const struct net_device_ops *ndev_ops, - const struct ethtool_ops *eth_ops) -{ - struct aq_pci_func_s *self = NULL; - int err = 0; - unsigned int port = 0U; - - if (!aq_hw_ops) { - err = -EFAULT; - goto err_exit; - } - self = kzalloc(sizeof(*self), GFP_KERNEL); - if (!self) { - err = -ENOMEM; - goto err_exit; - } - - pci_set_drvdata(pdev, self); - self->pdev = pdev; +static const struct aq_board_revision_s hw_atl_boards[] = { + { AQ_DEVICE_ID_0001, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, }, + { AQ_DEVICE_ID_D100, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc100, }, + { AQ_DEVICE_ID_D107, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, }, + { AQ_DEVICE_ID_D108, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc108, }, + { AQ_DEVICE_ID_D109, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc109, }, + + { AQ_DEVICE_ID_0001, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, }, + { AQ_DEVICE_ID_D100, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc100, }, + { AQ_DEVICE_ID_D107, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, }, + { AQ_DEVICE_ID_D108, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc108, }, + { AQ_DEVICE_ID_D109, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc109, }, + + { AQ_DEVICE_ID_AQC100, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, }, + { AQ_DEVICE_ID_AQC107, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, }, + { AQ_DEVICE_ID_AQC108, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108, }, + { AQ_DEVICE_ID_AQC109, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109, }, + { AQ_DEVICE_ID_AQC111, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111, }, + { AQ_DEVICE_ID_AQC112, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112, }, + + { AQ_DEVICE_ID_AQC100S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc100s, }, + { AQ_DEVICE_ID_AQC107S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107s, }, + { AQ_DEVICE_ID_AQC108S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108s, }, + { AQ_DEVICE_ID_AQC109S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109s, }, + { AQ_DEVICE_ID_AQC111S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111s, }, + { AQ_DEVICE_ID_AQC112S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112s, }, + + { AQ_DEVICE_ID_AQC111E, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111e, }, + { AQ_DEVICE_ID_AQC112E, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112e, }, +}; - err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps, pdev->device, - pdev->subsystem_device); - if (err < 0) - goto err_exit; +MODULE_DEVICE_TABLE(pci, aq_pci_tbl); - self->ports = self->aq_hw_caps.ports; +static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev, + const struct aq_hw_ops **ops, + const struct aq_hw_caps_s **caps) +{ + int i = 0; - for (port = 0; port < self->ports; ++port) { - struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops, - pdev, self, - port, aq_hw_ops); + if (pdev->vendor != PCI_VENDOR_ID_AQUANTIA) + return -EINVAL; - if (!aq_nic) { - err = -ENOMEM; - goto err_exit; + for (i = 0; i < ARRAY_SIZE(hw_atl_boards); i++) { + if (hw_atl_boards[i].devid == pdev->device && + (hw_atl_boards[i].revision == AQ_HWREV_ANY || + hw_atl_boards[i].revision == pdev->revision)) { + *ops = hw_atl_boards[i].ops; + *caps = hw_atl_boards[i].caps; + break; } - self->port[port] = aq_nic; } -err_exit: - if (err < 0) { - if (self) - aq_pci_func_free(self); - self = NULL; - } + if (i == ARRAY_SIZE(hw_atl_boards)) + return -EINVAL; - (void)err; - return self; + return 0; } -int aq_pci_func_init(struct aq_pci_func_s *self) +int aq_pci_func_init(struct pci_dev *pdev) { int err = 0; - unsigned int bar = 0U; - unsigned int port = 0U; - unsigned int numvecs = 0U; - - err = pci_enable_device(self->pdev); - if (err < 0) - goto err_exit; - self->is_pci_enabled = true; - - err = pci_set_dma_mask(self->pdev, DMA_BIT_MASK(64)); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (!err) { - err = pci_set_consistent_dma_mask(self->pdev, DMA_BIT_MASK(64)); - self->is_pci_using_dac = 1; + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + } if (err) { - err = pci_set_dma_mask(self->pdev, DMA_BIT_MASK(32)); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (!err) - err = pci_set_consistent_dma_mask(self->pdev, + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - self->is_pci_using_dac = 0; } if (err != 0) { err = -ENOSR; goto err_exit; } - err = pci_request_regions(self->pdev, AQ_CFG_DRV_NAME "_mmio"); + err = pci_request_regions(pdev, AQ_CFG_DRV_NAME "_mmio"); if (err < 0) goto err_exit; - self->is_regions = true; + pci_set_master(pdev); - pci_set_master(self->pdev); - - for (bar = 0; bar < 4; ++bar) { - if (IORESOURCE_MEM & pci_resource_flags(self->pdev, bar)) { - resource_size_t reg_sz; - - self->mmio_pa = pci_resource_start(self->pdev, bar); - if (self->mmio_pa == 0U) { - err = -EIO; - goto err_exit; - } - - reg_sz = pci_resource_len(self->pdev, bar); - if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) { - err = -EIO; - goto err_exit; - } - - self->mmio = ioremap_nocache(self->mmio_pa, reg_sz); - if (!self->mmio) { - err = -EIO; - goto err_exit; - } - break; - } - } - - numvecs = min((u8)AQ_CFG_VECS_DEF, self->aq_hw_caps.msix_irqs); - numvecs = min(numvecs, num_online_cpus()); - - /* enable interrupts */ -#if !AQ_CFG_FORCE_LEGACY_INT - err = pci_alloc_irq_vectors(self->pdev, numvecs, numvecs, PCI_IRQ_MSIX); - - if (err < 0) { - err = pci_alloc_irq_vectors(self->pdev, 1, 1, - PCI_IRQ_MSI | PCI_IRQ_LEGACY); - if (err < 0) - goto err_exit; - } -#endif /* AQ_CFG_FORCE_LEGACY_INT */ - - /* net device init */ - for (port = 0; port < self->ports; ++port) { - if (!self->port[port]) - continue; - - err = aq_nic_cfg_start(self->port[port]); - if (err < 0) - goto err_exit; - - err = aq_nic_ndev_init(self->port[port]); - if (err < 0) - goto err_exit; - - err = aq_nic_ndev_register(self->port[port]); - if (err < 0) - goto err_exit; - } + return 0; err_exit: - if (err < 0) - aq_pci_func_deinit(self); return err; } -int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i, +int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i, char *name, void *aq_vec, cpumask_t *affinity_mask) { struct pci_dev *pdev = self->pdev; @@ -204,11 +158,10 @@ int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i, irq_set_affinity_hint(pci_irq_vector(pdev, i), affinity_mask); } - return err; } -void aq_pci_func_free_irqs(struct aq_pci_func_s *self) +void aq_pci_func_free_irqs(struct aq_nic_s *self) { struct pci_dev *pdev = self->pdev; unsigned int i = 0U; @@ -224,12 +177,7 @@ void aq_pci_func_free_irqs(struct aq_pci_func_s *self) } } -void __iomem *aq_pci_func_get_mmio(struct aq_pci_func_s *self) -{ - return self->mmio; -} - -unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self) +unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self) { if (self->pdev->msix_enabled) return AQ_HW_IRQ_MSIX; @@ -238,62 +186,159 @@ unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self) return AQ_HW_IRQ_LEGACY; } -void aq_pci_func_deinit(struct aq_pci_func_s *self) +static void aq_pci_free_irq_vectors(struct aq_nic_s *self) { - if (!self) - goto err_exit; - - aq_pci_func_free_irqs(self); pci_free_irq_vectors(self->pdev); - - if (self->is_regions) - pci_release_regions(self->pdev); - - if (self->is_pci_enabled) - pci_disable_device(self->pdev); - -err_exit:; } -void aq_pci_func_free(struct aq_pci_func_s *self) +static int aq_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *pci_id) { - unsigned int port = 0U; + struct aq_nic_s *self = NULL; + int err = 0; + struct net_device *ndev; + resource_size_t mmio_pa; + u32 bar; + u32 numvecs; - if (!self) - goto err_exit; + err = pci_enable_device(pdev); + if (err) + return err; - for (port = 0; port < self->ports; ++port) { - if (!self->port[port]) - continue; + err = aq_pci_func_init(pdev); + if (err) + goto err_pci_func; - aq_nic_ndev_free(self->port[port]); + ndev = aq_ndev_alloc(); + if (!ndev) { + err = -ENOMEM; + goto err_ndev; } - if (self->mmio) - iounmap(self->mmio); + self = netdev_priv(ndev); + self->pdev = pdev; + SET_NETDEV_DEV(ndev, &pdev->dev); + pci_set_drvdata(pdev, self); - kfree(self); + err = aq_pci_probe_get_hw_by_id(pdev, &self->aq_hw_ops, + &aq_nic_get_cfg(self)->aq_hw_caps); + if (err) + goto err_ioremap; -err_exit:; -} + self->aq_hw = kzalloc(sizeof(*self->aq_hw), GFP_KERNEL); + self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self); -int aq_pci_func_change_pm_state(struct aq_pci_func_s *self, - pm_message_t *pm_msg) -{ - int err = 0; - unsigned int port = 0U; + for (bar = 0; bar < 4; ++bar) { + if (IORESOURCE_MEM & pci_resource_flags(pdev, bar)) { + resource_size_t reg_sz; - if (!self) { - err = -EFAULT; - goto err_exit; + mmio_pa = pci_resource_start(pdev, bar); + if (mmio_pa == 0U) { + err = -EIO; + goto err_ioremap; + } + + reg_sz = pci_resource_len(pdev, bar); + if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) { + err = -EIO; + goto err_ioremap; + } + + self->aq_hw->mmio = ioremap_nocache(mmio_pa, reg_sz); + if (!self->aq_hw->mmio) { + err = -EIO; + goto err_ioremap; + } + break; + } } - for (port = 0; port < self->ports; ++port) { - if (!self->port[port]) - continue; - (void)aq_nic_change_pm_state(self->port[port], pm_msg); + if (bar == 4) { + err = -EIO; + goto err_ioremap; } -err_exit: + numvecs = min((u8)AQ_CFG_VECS_DEF, + aq_nic_get_cfg(self)->aq_hw_caps->msix_irqs); + numvecs = min(numvecs, num_online_cpus()); + /*enable interrupts */ +#if !AQ_CFG_FORCE_LEGACY_INT + err = pci_alloc_irq_vectors(self->pdev, numvecs, numvecs, + PCI_IRQ_MSIX); + + if (err < 0) { + err = pci_alloc_irq_vectors(self->pdev, 1, 1, + PCI_IRQ_MSI | PCI_IRQ_LEGACY); + if (err < 0) + goto err_hwinit; + } +#endif + + /* net device init */ + aq_nic_cfg_start(self); + + aq_nic_ndev_init(self); + + err = aq_nic_ndev_register(self); + if (err < 0) + goto err_register; + + return 0; + +err_register: + aq_nic_free_vectors(self); + aq_pci_free_irq_vectors(self); +err_hwinit: + iounmap(self->aq_hw->mmio); +err_ioremap: + free_netdev(ndev); +err_pci_func: + pci_release_regions(pdev); +err_ndev: + pci_disable_device(pdev); return err; } + +static void aq_pci_remove(struct pci_dev *pdev) +{ + struct aq_nic_s *self = pci_get_drvdata(pdev); + + if (self->ndev) { + if (self->ndev->reg_state == NETREG_REGISTERED) + unregister_netdev(self->ndev); + aq_nic_free_vectors(self); + aq_pci_free_irq_vectors(self); + iounmap(self->aq_hw->mmio); + kfree(self->aq_hw); + pci_release_regions(pdev); + free_netdev(self->ndev); + } + + pci_disable_device(pdev); +} + +static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg) +{ + struct aq_nic_s *self = pci_get_drvdata(pdev); + + return aq_nic_change_pm_state(self, &pm_msg); +} + +static int aq_pci_resume(struct pci_dev *pdev) +{ + struct aq_nic_s *self = pci_get_drvdata(pdev); + pm_message_t pm_msg = PMSG_RESTORE; + + return aq_nic_change_pm_state(self, &pm_msg); +} + +static struct pci_driver aq_pci_ops = { + .name = AQ_CFG_DRV_NAME, + .id_table = aq_pci_tbl, + .probe = aq_pci_probe, + .remove = aq_pci_remove, + .suspend = aq_pci_suspend, + .resume = aq_pci_resume, +}; + +module_pci_driver(aq_pci_ops); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h index ecb033791203..aeee67bf69fa 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h @@ -13,22 +13,20 @@ #define AQ_PCI_FUNC_H #include "aq_common.h" +#include "aq_nic.h" -struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *hw_ops, - struct pci_dev *pdev, - const struct net_device_ops *ndev_ops, - const struct ethtool_ops *eth_ops); -int aq_pci_func_init(struct aq_pci_func_s *self); -int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i, +struct aq_board_revision_s { + unsigned short devid; + unsigned short revision; + const struct aq_hw_ops *ops; + const struct aq_hw_caps_s *caps; +}; + +int aq_pci_func_init(struct pci_dev *pdev); +int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i, char *name, void *aq_vec, cpumask_t *affinity_mask); -void aq_pci_func_free_irqs(struct aq_pci_func_s *self); -int aq_pci_func_start(struct aq_pci_func_s *self); -void __iomem *aq_pci_func_get_mmio(struct aq_pci_func_s *self); -unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self); -void aq_pci_func_deinit(struct aq_pci_func_s *self); -void aq_pci_func_free(struct aq_pci_func_s *self); -int aq_pci_func_change_pm_state(struct aq_pci_func_s *self, - pm_message_t *pm_msg); +void aq_pci_func_free_irqs(struct aq_nic_s *self); +unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self); #endif /* AQ_PCI_FUNC_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 519ca6534b85..0be6a11370bb 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -279,10 +279,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self, skb_record_rx_queue(skb, self->idx); - napi_gro_receive(napi, skb); - ++self->stats.rx.packets; self->stats.rx.bytes += skb->len; + + napi_gro_receive(napi, skb); } err_exit: diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index 5844078764bd..965fae0fb6e0 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h @@ -15,6 +15,7 @@ #include "aq_common.h" struct page; +struct aq_nic_cfg_s; /* TxC SOP DX EOP * +----------+----------+----------+----------- @@ -105,7 +106,6 @@ union aq_ring_stats_s { }; struct aq_ring_s { - struct aq_obj_s header; struct aq_ring_buff_s *buff_ring; u8 *dx_ring; /* descriptors ring, dma shared mem */ struct aq_nic_s *aq_nic; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h index e12bcdfb874a..786ea8187c69 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h @@ -14,12 +14,6 @@ #include "aq_common.h" -#define AQ_DIMOF(_ARY_) ARRAY_SIZE(_ARY_) - -struct aq_obj_s { - atomic_t flags; -}; - static inline void aq_utils_obj_set(atomic_t *flags, u32 mask) { unsigned long flags_old, flags_new; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index 5fecc9a099ef..f890b8a5a862 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -19,8 +19,7 @@ #include <linux/netdevice.h> struct aq_vec_s { - struct aq_obj_s header; - struct aq_hw_ops *aq_hw_ops; + const struct aq_hw_ops *aq_hw_ops; struct aq_hw_s *aq_hw; struct aq_nic_s *aq_nic; unsigned int tx_rings; @@ -166,7 +165,7 @@ err_exit: return self; } -int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops, +int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops, struct aq_hw_s *aq_hw) { struct aq_ring_s *ring = NULL; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h index 6c68b184236c..8bdf60bb3f63 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h @@ -19,6 +19,8 @@ struct aq_hw_s; struct aq_hw_ops; +struct aq_nic_s; +struct aq_nic_cfg_s; struct aq_ring_stats_rx_s; struct aq_ring_stats_tx_s; @@ -26,7 +28,7 @@ irqreturn_t aq_vec_isr(int irq, void *private); irqreturn_t aq_vec_isr_legacy(int irq, void *private); struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx, struct aq_nic_cfg_s *aq_nic_cfg); -int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops, +int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops, struct aq_hw_s *aq_hw); void aq_vec_deinit(struct aq_vec_s *self); void aq_vec_free(struct aq_vec_s *self); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index f18dce14c93c..67e2f9fb9402 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -12,78 +12,100 @@ #include "../aq_hw.h" #include "../aq_hw_utils.h" #include "../aq_ring.h" +#include "../aq_nic.h" #include "hw_atl_a0.h" #include "hw_atl_utils.h" #include "hw_atl_llh.h" #include "hw_atl_a0_internal.h" -static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self, - struct aq_hw_caps_s *aq_hw_caps, - unsigned short device, - unsigned short subsystem_device) -{ - memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps)); - - if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001) - aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G; - - if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) { - aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G; - aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_5G; - } - - return 0; -} - -static struct aq_hw_s *hw_atl_a0_create(struct aq_pci_func_s *aq_pci_func, - unsigned int port, - struct aq_hw_ops *ops) -{ - struct hw_atl_s *self = NULL; - - self = kzalloc(sizeof(*self), GFP_KERNEL); - if (!self) - goto err_exit; - - self->base.aq_pci_func = aq_pci_func; +#define DEFAULT_A0_BOARD_BASIC_CAPABILITIES \ + .is_64_dma = true, \ + .msix_irqs = 4U, \ + .irq_mask = ~0U, \ + .vecs = HW_ATL_A0_RSS_MAX, \ + .tcs = HW_ATL_A0_TC_MAX, \ + .rxd_alignment = 1U, \ + .rxd_size = HW_ATL_A0_RXD_SIZE, \ + .rxds = 248U, \ + .txd_alignment = 1U, \ + .txd_size = HW_ATL_A0_TXD_SIZE, \ + .txds = 8U * 1024U, \ + .txhwb_alignment = 4096U, \ + .tx_rings = HW_ATL_A0_TX_RINGS, \ + .rx_rings = HW_ATL_A0_RX_RINGS, \ + .hw_features = NETIF_F_HW_CSUM | \ + NETIF_F_RXHASH | \ + NETIF_F_RXCSUM | \ + NETIF_F_SG | \ + NETIF_F_TSO, \ + .hw_priv_flags = IFF_UNICAST_FLT, \ + .flow_control = true, \ + .mtu = HW_ATL_A0_MTU_JUMBO, \ + .mac_regs_count = 88, \ + .hw_alive_check_addr = 0x10U + +const struct aq_hw_caps_s hw_atl_a0_caps_aqc100 = { + DEFAULT_A0_BOARD_BASIC_CAPABILITIES, + .media_type = AQ_HW_MEDIA_TYPE_FIBRE, + .link_speed_msk = HW_ATL_A0_RATE_5G | + HW_ATL_A0_RATE_2G5 | + HW_ATL_A0_RATE_1G | + HW_ATL_A0_RATE_100M, +}; - self->base.not_ff_addr = 0x10U; +const struct aq_hw_caps_s hw_atl_a0_caps_aqc107 = { + DEFAULT_A0_BOARD_BASIC_CAPABILITIES, + .media_type = AQ_HW_MEDIA_TYPE_TP, + .link_speed_msk = HW_ATL_A0_RATE_10G | + HW_ATL_A0_RATE_5G | + HW_ATL_A0_RATE_2G5 | + HW_ATL_A0_RATE_1G | + HW_ATL_A0_RATE_100M, +}; -err_exit: - return (struct aq_hw_s *)self; -} +const struct aq_hw_caps_s hw_atl_a0_caps_aqc108 = { + DEFAULT_A0_BOARD_BASIC_CAPABILITIES, + .media_type = AQ_HW_MEDIA_TYPE_TP, + .link_speed_msk = HW_ATL_A0_RATE_5G | + HW_ATL_A0_RATE_2G5 | + HW_ATL_A0_RATE_1G | + HW_ATL_A0_RATE_100M, +}; -static void hw_atl_a0_destroy(struct aq_hw_s *self) -{ - kfree(self); -} +const struct aq_hw_caps_s hw_atl_a0_caps_aqc109 = { + DEFAULT_A0_BOARD_BASIC_CAPABILITIES, + .media_type = AQ_HW_MEDIA_TYPE_TP, + .link_speed_msk = HW_ATL_A0_RATE_2G5 | + HW_ATL_A0_RATE_1G | + HW_ATL_A0_RATE_100M, +}; static int hw_atl_a0_hw_reset(struct aq_hw_s *self) { int err = 0; - glb_glb_reg_res_dis_set(self, 1U); - pci_pci_reg_res_dis_set(self, 0U); - rx_rx_reg_res_dis_set(self, 0U); - tx_tx_reg_res_dis_set(self, 0U); + hw_atl_glb_glb_reg_res_dis_set(self, 1U); + hw_atl_pci_pci_reg_res_dis_set(self, 0U); + hw_atl_rx_rx_reg_res_dis_set(self, 0U); + hw_atl_tx_tx_reg_res_dis_set(self, 0U); HW_ATL_FLUSH(); - glb_soft_res_set(self, 1); + hw_atl_glb_soft_res_set(self, 1); /* check 10 times by 1ms */ - AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U); + AQ_HW_WAIT_FOR(hw_atl_glb_soft_res_get(self) == 0, 1000U, 10U); if (err < 0) goto err_exit; - itr_irq_reg_res_dis_set(self, 0U); - itr_res_irq_set(self, 1U); + hw_atl_itr_irq_reg_res_dis_set(self, 0U); + hw_atl_itr_res_irq_set(self, 1U); /* check 10 times by 1ms */ - AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U); + AQ_HW_WAIT_FOR(hw_atl_itr_res_irq_get(self) == 0, 1000U, 10U); if (err < 0) goto err_exit; - hw_atl_utils_mpi_set(self, MPI_RESET, 0x0U); + self->aq_fw_ops->set_state(self, MPI_RESET); err = aq_hw_err_from_flags(self); @@ -99,51 +121,53 @@ static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self) bool is_rx_flow_control = false; /* TPS Descriptor rate init */ - tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U); - tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA); + hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U); + hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA); /* TPS VM init */ - tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U); + hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U); /* TPS TC credits init */ - tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U); - tps_tx_pkt_shed_data_arb_mode_set(self, 0U); + hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U); + hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U); - tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U); - tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U); - tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U); - tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U); + hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U); + hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U); + hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U); + hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U); /* Tx buf size */ buff_size = HW_ATL_A0_TXBUF_MAX; - tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc); - tpb_tx_buff_hi_threshold_per_tc_set(self, - (buff_size * (1024 / 32U) * 66U) / - 100U, tc); - tpb_tx_buff_lo_threshold_per_tc_set(self, - (buff_size * (1024 / 32U) * 50U) / - 100U, tc); + hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc); + hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self, + (buff_size * + (1024 / 32U) * 66U) / + 100U, tc); + hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self, + (buff_size * + (1024 / 32U) * 50U) / + 100U, tc); /* QoS Rx buf size per TC */ tc = 0; is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control); buff_size = HW_ATL_A0_RXBUF_MAX; - rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc); - rpb_rx_buff_hi_threshold_per_tc_set(self, - (buff_size * - (1024U / 32U) * 66U) / - 100U, tc); - rpb_rx_buff_lo_threshold_per_tc_set(self, - (buff_size * - (1024U / 32U) * 50U) / - 100U, tc); - rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc); + hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc); + hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self, + (buff_size * + (1024U / 32U) * 66U) / + 100U, tc); + hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, + (buff_size * + (1024U / 32U) * 50U) / + 100U, tc); + hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc); /* QoS 802.1p priority -> TC mapping */ for (i_priority = 8U; i_priority--;) - rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U); + hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U); return aq_hw_err_from_flags(self); } @@ -151,20 +175,19 @@ static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self) static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s *self, struct aq_rss_parameters *rss_params) { - struct aq_nic_cfg_s *cfg = NULL; + struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; int err = 0; unsigned int i = 0U; unsigned int addr = 0U; - cfg = self->aq_nic_cfg; - for (i = 10, addr = 0U; i--; ++addr) { u32 key_data = cfg->is_rss ? __swab32(rss_params->hash_secret_key[i]) : 0U; - rpf_rss_key_wr_data_set(self, key_data); - rpf_rss_key_addr_set(self, addr); - rpf_rss_key_wr_en_set(self, 1U); - AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U); + hw_atl_rpf_rss_key_wr_data_set(self, key_data); + hw_atl_rpf_rss_key_addr_set(self, addr); + hw_atl_rpf_rss_key_wr_en_set(self, 1U); + AQ_HW_WAIT_FOR(hw_atl_rpf_rss_key_wr_en_get(self) == 0, + 1000U, 10U); if (err < 0) goto err_exit; } @@ -193,11 +216,12 @@ static int hw_atl_a0_hw_rss_set(struct aq_hw_s *self, ((i * 3U) & 0xFU)); } - for (i = AQ_DIMOF(bitary); i--;) { - rpf_rss_redir_tbl_wr_data_set(self, bitary[i]); - rpf_rss_redir_tbl_addr_set(self, i); - rpf_rss_redir_wr_en_set(self, 1U); - AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U); + for (i = ARRAY_SIZE(bitary); i--;) { + hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]); + hw_atl_rpf_rss_redir_tbl_addr_set(self, i); + hw_atl_rpf_rss_redir_wr_en_set(self, 1U); + AQ_HW_WAIT_FOR(hw_atl_rpf_rss_redir_wr_en_get(self) == 0, + 1000U, 10U); if (err < 0) goto err_exit; } @@ -212,35 +236,35 @@ static int hw_atl_a0_hw_offload_set(struct aq_hw_s *self, struct aq_nic_cfg_s *aq_nic_cfg) { /* TX checksums offloads*/ - tpo_ipv4header_crc_offload_en_set(self, 1); - tpo_tcp_udp_crc_offload_en_set(self, 1); + hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1); + hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1); /* RX checksums offloads*/ - rpo_ipv4header_crc_offload_en_set(self, 1); - rpo_tcp_udp_crc_offload_en_set(self, 1); + hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1); + hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1); /* LSO offloads*/ - tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); + hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); return aq_hw_err_from_flags(self); } static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self) { - thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U); - thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U); - thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU); + hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U); + hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U); + hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU); /* Tx interrupts */ - tdm_tx_desc_wr_wb_irq_en_set(self, 1U); + hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U); /* misc */ aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ? 0x00010000U : 0x00000000U); - tdm_tx_dca_en_set(self, 0U); - tdm_tx_dca_mode_set(self, 0U); + hw_atl_tdm_tx_dca_en_set(self, 0U); + hw_atl_tdm_tx_dca_mode_set(self, 0U); - tpb_tx_path_scp_ins_en_set(self, 1U); + hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U); return aq_hw_err_from_flags(self); } @@ -251,38 +275,38 @@ static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self) int i; /* Rx TC/RSS number config */ - rpb_rpf_rx_traf_class_mode_set(self, 1U); + hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U); /* Rx flow control */ - rpb_rx_flow_ctl_mode_set(self, 1U); + hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U); /* RSS Ring selection */ - reg_rx_flr_rss_control1set(self, cfg->is_rss ? + hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ? 0xB3333333U : 0x00000000U); /* Multicast filters */ for (i = HW_ATL_A0_MAC_MAX; i--;) { - rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i); - rpfl2unicast_flr_act_set(self, 1U, i); + hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i); + hw_atl_rpfl2unicast_flr_act_set(self, 1U, i); } - reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U); - reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U); + hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U); + hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U); /* Vlan filters */ - rpf_vlan_outer_etht_set(self, 0x88A8U); - rpf_vlan_inner_etht_set(self, 0x8100U); - rpf_vlan_prom_mode_en_set(self, 1); + hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U); + hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U); + hw_atl_rpf_vlan_prom_mode_en_set(self, 1); /* Rx Interrupts */ - rdm_rx_desc_wr_wb_irq_en_set(self, 1U); + hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U); /* misc */ - rpfl2broadcast_flr_act_set(self, 1U); - rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U)); + hw_atl_rpfl2broadcast_flr_act_set(self, 1U); + hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U)); - rdm_rx_dca_en_set(self, 0U); - rdm_rx_dca_mode_set(self, 0U); + hw_atl_rdm_rx_dca_en_set(self, 0U); + hw_atl_rdm_rx_dca_mode_set(self, 0U); return aq_hw_err_from_flags(self); } @@ -301,10 +325,10 @@ static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr) l = (mac_addr[2] << 24) | (mac_addr[3] << 16) | (mac_addr[4] << 8) | mac_addr[5]; - rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC); - rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC); - rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_A0_MAC); - rpfl2_uc_flr_en_set(self, 1U, HW_ATL_A0_MAC); + hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC); + hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC); + hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_A0_MAC); + hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_A0_MAC); err = aq_hw_err_from_flags(self); @@ -312,9 +336,7 @@ err_exit: return err; } -static int hw_atl_a0_hw_init(struct aq_hw_s *self, - struct aq_nic_cfg_s *aq_nic_cfg, - u8 *mac_addr) +static int hw_atl_a0_hw_init(struct aq_hw_s *self, u8 *mac_addr) { static u32 aq_hw_atl_igcr_table_[4][2] = { { 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */ @@ -325,20 +347,18 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self, int err = 0; - self->aq_nic_cfg = aq_nic_cfg; - - hw_atl_utils_hw_chip_features_init(self, - &PHAL_ATLANTIC_A0->chip_features); + struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg; hw_atl_a0_hw_init_tx_path(self); hw_atl_a0_hw_init_rx_path(self); hw_atl_a0_hw_mac_addr_set(self, mac_addr); - hw_atl_utils_mpi_set(self, MPI_INIT, aq_nic_cfg->link_speed_msk); + self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk); + self->aq_fw_ops->set_state(self, MPI_INIT); - reg_tx_dma_debug_ctl_set(self, 0x800000b8U); - reg_tx_dma_debug_ctl_set(self, 0x000000b8U); + hw_atl_reg_tx_dma_debug_ctl_set(self, 0x800000b8U); + hw_atl_reg_tx_dma_debug_ctl_set(self, 0x000000b8U); hw_atl_a0_hw_qos_set(self); hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss); @@ -346,26 +366,25 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self, /* Reset link status and read out initial hardware counters */ self->aq_link_status.mbps = 0; - hw_atl_utils_update_stats(self); + self->aq_fw_ops->update_stats(self); err = aq_hw_err_from_flags(self); if (err < 0) goto err_exit; /* Interrupts */ - reg_irq_glb_ctl_set(self, - aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type] - [(aq_nic_cfg->vecs > 1U) ? - 1 : 0]); + hw_atl_reg_irq_glb_ctl_set(self, + aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type] + [(aq_nic_cfg->vecs > 1U) ? 1 : 0]); - itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask); + hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask); /* Interrupts */ - reg_gen_irq_map_set(self, - ((HW_ATL_A0_ERR_INT << 0x18) | (1U << 0x1F)) | - ((HW_ATL_A0_ERR_INT << 0x10) | (1U << 0x17)) | - ((HW_ATL_A0_ERR_INT << 8) | (1U << 0xF)) | - ((HW_ATL_A0_ERR_INT) | (1U << 0x7)), 0U); + hw_atl_reg_gen_irq_map_set(self, + ((HW_ATL_A0_ERR_INT << 0x18) | (1U << 0x1F)) | + ((HW_ATL_A0_ERR_INT << 0x10) | (1U << 0x17)) | + ((HW_ATL_A0_ERR_INT << 8) | (1U << 0xF)) | + ((HW_ATL_A0_ERR_INT) | (1U << 0x7)), 0U); hw_atl_a0_hw_offload_set(self, aq_nic_cfg); @@ -376,28 +395,28 @@ err_exit: static int hw_atl_a0_hw_ring_tx_start(struct aq_hw_s *self, struct aq_ring_s *ring) { - tdm_tx_desc_en_set(self, 1, ring->idx); + hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx); return aq_hw_err_from_flags(self); } static int hw_atl_a0_hw_ring_rx_start(struct aq_hw_s *self, struct aq_ring_s *ring) { - rdm_rx_desc_en_set(self, 1, ring->idx); + hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx); return aq_hw_err_from_flags(self); } static int hw_atl_a0_hw_start(struct aq_hw_s *self) { - tpb_tx_buff_en_set(self, 1); - rpb_rx_buff_en_set(self, 1); + hw_atl_tpb_tx_buff_en_set(self, 1); + hw_atl_rpb_rx_buff_en_set(self, 1); return aq_hw_err_from_flags(self); } static int hw_atl_a0_hw_tx_ring_tail_update(struct aq_hw_s *self, struct aq_ring_s *ring) { - reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx); + hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx); return 0; } @@ -483,36 +502,37 @@ static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self, u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa; u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); - rdm_rx_desc_en_set(self, false, aq_ring->idx); + hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx); - rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); + hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); - reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw, - aq_ring->idx); + hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw, + aq_ring->idx); - reg_rx_dma_desc_base_addressmswset(self, - dma_desc_addr_msw, aq_ring->idx); + hw_atl_reg_rx_dma_desc_base_addressmswset(self, + dma_desc_addr_msw, + aq_ring->idx); - rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); + hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); - rdm_rx_desc_data_buff_size_set(self, - AQ_CFG_RX_FRAME_MAX / 1024U, + hw_atl_rdm_rx_desc_data_buff_size_set(self, + AQ_CFG_RX_FRAME_MAX / 1024U, aq_ring->idx); - rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx); - rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); - rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx); + hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx); + hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); + hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx); /* Rx ring set mode */ /* Mapping interrupt vector */ - itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx); - itr_irq_map_en_rx_set(self, true, aq_ring->idx); + hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx); + hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx); - rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); - rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx); - rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx); - rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx); + hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); + hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx); + hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx); + hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx); return aq_hw_err_from_flags(self); } @@ -524,25 +544,25 @@ static int hw_atl_a0_hw_ring_tx_init(struct aq_hw_s *self, u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa; u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); - reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr, - aq_ring->idx); + hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr, + aq_ring->idx); - reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr, - aq_ring->idx); + hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr, + aq_ring->idx); - tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); + hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); hw_atl_a0_hw_tx_ring_tail_update(self, aq_ring); /* Set Tx threshold */ - tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx); + hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx); /* Mapping interrupt vector */ - itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx); - itr_irq_map_en_tx_set(self, true, aq_ring->idx); + hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx); + hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx); - tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); - tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx); + hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); + hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx); return aq_hw_err_from_flags(self); } @@ -563,7 +583,7 @@ static int hw_atl_a0_hw_ring_rx_fill(struct aq_hw_s *self, rxd->hdr_addr = 0U; } - reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx); + hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx); return aq_hw_err_from_flags(self); } @@ -572,13 +592,13 @@ static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s *self, struct aq_ring_s *ring) { int err = 0; - unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx); + unsigned int hw_head = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx); - if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) { + if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) { err = -ENXIO; goto err_exit; } - ring->hw_head = hw_head_; + ring->hw_head = hw_head; err = aq_hw_err_from_flags(self); err_exit: @@ -602,15 +622,16 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self, if (!(rxd_wb->status & 0x5U)) { /* RxD is not done */ if ((1U << 4) & - reg_rx_dma_desc_status_get(self, ring->idx)) { - rdm_rx_desc_en_set(self, false, ring->idx); - rdm_rx_desc_res_set(self, true, ring->idx); - rdm_rx_desc_res_set(self, false, ring->idx); - rdm_rx_desc_en_set(self, true, ring->idx); + hw_atl_reg_rx_dma_desc_status_get(self, ring->idx)) { + hw_atl_rdm_rx_desc_en_set(self, false, ring->idx); + hw_atl_rdm_rx_desc_res_set(self, true, ring->idx); + hw_atl_rdm_rx_desc_res_set(self, false, ring->idx); + hw_atl_rdm_rx_desc_en_set(self, true, ring->idx); } if (ring->hw_head || - (rdm_rx_desc_head_ptr_get(self, ring->idx) < 2U)) { + (hw_atl_rdm_rx_desc_head_ptr_get(self, + ring->idx) < 2U)) { break; } else if (!(rxd_wb->status & 0x1U)) { struct hw_atl_rxd_wb_s *rxd_wb1 = @@ -693,26 +714,25 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self, static int hw_atl_a0_hw_irq_enable(struct aq_hw_s *self, u64 mask) { - itr_irq_msk_setlsw_set(self, LODWORD(mask) | + hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask) | (1U << HW_ATL_A0_ERR_INT)); return aq_hw_err_from_flags(self); } static int hw_atl_a0_hw_irq_disable(struct aq_hw_s *self, u64 mask) { - itr_irq_msk_clearlsw_set(self, LODWORD(mask)); - itr_irq_status_clearlsw_set(self, LODWORD(mask)); + hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask)); + hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask)); - if ((1U << 16) & reg_gen_irq_status_get(self)) - - atomic_inc(&PHAL_ATLANTIC_A0->dpc); + if ((1U << 16) & hw_atl_reg_gen_irq_status_get(self)) + atomic_inc(&self->dpc); return aq_hw_err_from_flags(self); } static int hw_atl_a0_hw_irq_read(struct aq_hw_s *self, u64 *mask) { - *mask = itr_irq_statuslsw_get(self); + *mask = hw_atl_itr_irq_statuslsw_get(self); return aq_hw_err_from_flags(self); } @@ -723,18 +743,20 @@ static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self, { unsigned int i = 0U; - rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC)); - rpfl2multicast_flr_en_set(self, IS_FILTER_ENABLED(IFF_MULTICAST), 0); - rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST)); + hw_atl_rpfl2promiscuous_mode_en_set(self, + IS_FILTER_ENABLED(IFF_PROMISC)); + hw_atl_rpfl2multicast_flr_en_set(self, + IS_FILTER_ENABLED(IFF_MULTICAST), 0); + hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST)); self->aq_nic_cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST); for (i = HW_ATL_A0_MAC_MIN; i < HW_ATL_A0_MAC_MAX; ++i) - rpfl2_uc_flr_en_set(self, - (self->aq_nic_cfg->is_mc_list_enabled && - (i <= self->aq_nic_cfg->mc_list_count)) ? - 1U : 0U, i); + hw_atl_rpfl2_uc_flr_en_set(self, + (self->aq_nic_cfg->is_mc_list_enabled && + (i <= self->aq_nic_cfg->mc_list_count)) ? + 1U : 0U, i); return aq_hw_err_from_flags(self); } @@ -761,17 +783,19 @@ static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self, u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) | (ar_mac[i][4] << 8) | ar_mac[i][5]; - rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC_MIN + i); + hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC_MIN + i); - rpfl2unicast_dest_addresslsw_set(self, - l, HW_ATL_A0_MAC_MIN + i); + hw_atl_rpfl2unicast_dest_addresslsw_set(self, + l, + HW_ATL_A0_MAC_MIN + i); - rpfl2unicast_dest_addressmsw_set(self, - h, HW_ATL_A0_MAC_MIN + i); + hw_atl_rpfl2unicast_dest_addressmsw_set(self, + h, + HW_ATL_A0_MAC_MIN + i); - rpfl2_uc_flr_en_set(self, - (self->aq_nic_cfg->is_mc_list_enabled), - HW_ATL_A0_MAC_MIN + i); + hw_atl_rpfl2_uc_flr_en_set(self, + (self->aq_nic_cfg->is_mc_list_enabled), + HW_ATL_A0_MAC_MIN + i); } err = aq_hw_err_from_flags(self); @@ -823,7 +847,7 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self) } for (i = HW_ATL_A0_RINGS_MAX; i--;) - reg_irq_thr_set(self, itr_rx, i); + hw_atl_reg_irq_thr_set(self, itr_rx, i); return aq_hw_err_from_flags(self); } @@ -837,38 +861,19 @@ static int hw_atl_a0_hw_stop(struct aq_hw_s *self) static int hw_atl_a0_hw_ring_tx_stop(struct aq_hw_s *self, struct aq_ring_s *ring) { - tdm_tx_desc_en_set(self, 0U, ring->idx); + hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx); return aq_hw_err_from_flags(self); } static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self, struct aq_ring_s *ring) { - rdm_rx_desc_en_set(self, 0U, ring->idx); + hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx); return aq_hw_err_from_flags(self); } -static int hw_atl_a0_hw_set_speed(struct aq_hw_s *self, u32 speed) -{ - int err = 0; - - err = hw_atl_utils_mpi_set_speed(self, speed, MPI_INIT); - if (err < 0) - goto err_exit; - -err_exit: - return err; -} - -static struct aq_hw_ops hw_atl_ops_ = { - .create = hw_atl_a0_create, - .destroy = hw_atl_a0_destroy, - .get_hw_caps = hw_atl_a0_get_hw_caps, - - .hw_get_mac_permanent = hw_atl_utils_get_mac_permanent, +const struct aq_hw_ops hw_atl_ops_a0 = { .hw_set_mac_address = hw_atl_a0_hw_mac_addr_set, - .hw_get_link_status = hw_atl_utils_mpi_get_link_status, - .hw_set_link_speed = hw_atl_a0_hw_set_speed, .hw_init = hw_atl_a0_hw_init, .hw_deinit = hw_atl_utils_hw_deinit, .hw_set_power = hw_atl_utils_hw_set_power, @@ -898,21 +903,6 @@ static struct aq_hw_ops hw_atl_ops_ = { .hw_rss_set = hw_atl_a0_hw_rss_set, .hw_rss_hash_set = hw_atl_a0_hw_rss_hash_set, .hw_get_regs = hw_atl_utils_hw_get_regs, - .hw_update_stats = hw_atl_utils_update_stats, .hw_get_hw_stats = hw_atl_utils_get_hw_stats, .hw_get_fw_version = hw_atl_utils_get_fw_version, }; - -struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev) -{ - bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA); - bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) || - (pdev->device == HW_ATL_DEVICE_ID_D100) || - (pdev->device == HW_ATL_DEVICE_ID_D107) || - (pdev->device == HW_ATL_DEVICE_ID_D108) || - (pdev->device == HW_ATL_DEVICE_ID_D109)); - - bool is_rev_ok = (pdev->revision == 1U); - - return (is_vid_ok && is_did_ok && is_rev_ok) ? &hw_atl_ops_ : NULL; -} diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h index 6e1d527954c9..25fe954def03 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h @@ -16,19 +16,11 @@ #include "../aq_common.h" -#ifndef PCI_VENDOR_ID_AQUANTIA +extern const struct aq_hw_caps_s hw_atl_a0_caps_aqc100; +extern const struct aq_hw_caps_s hw_atl_a0_caps_aqc107; +extern const struct aq_hw_caps_s hw_atl_a0_caps_aqc108; +extern const struct aq_hw_caps_s hw_atl_a0_caps_aqc109; -#define PCI_VENDOR_ID_AQUANTIA 0x1D6A -#define HW_ATL_DEVICE_ID_0001 0x0001 -#define HW_ATL_DEVICE_ID_D100 0xD100 -#define HW_ATL_DEVICE_ID_D107 0xD107 -#define HW_ATL_DEVICE_ID_D108 0xD108 -#define HW_ATL_DEVICE_ID_D109 0xD109 - -#define HW_ATL_NIC_NAME "aQuantia AQtion 5Gbit Network Adapter" - -#endif - -struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev); +extern const struct aq_hw_ops hw_atl_ops_a0; #endif /* HW_ATL_A0_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h index 0592a0330cf0..1d8855558d74 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h @@ -88,69 +88,4 @@ #define HW_ATL_A0_FW_VER_EXPECTED 0x01050006U -/* Hardware tx descriptor */ -struct __packed hw_atl_txd_s { - u64 buf_addr; - u32 ctl; - u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */ -}; - -/* Hardware tx context descriptor */ -struct __packed hw_atl_txc_s { - u32 rsvd; - u32 len; - u32 ctl; - u32 len2; -}; - -/* Hardware rx descriptor */ -struct __packed hw_atl_rxd_s { - u64 buf_addr; - u64 hdr_addr; -}; - -/* Hardware rx descriptor writeback */ -struct __packed hw_atl_rxd_wb_s { - u32 type; - u32 rss_hash; - u16 status; - u16 pkt_len; - u16 next_desc_ptr; - u16 vlan; -}; - -/* HW layer capabilities */ -static struct aq_hw_caps_s hw_atl_a0_hw_caps_ = { - .ports = 1U, - .is_64_dma = true, - .msix_irqs = 4U, - .irq_mask = ~0U, - .vecs = HW_ATL_A0_RSS_MAX, - .tcs = HW_ATL_A0_TC_MAX, - .rxd_alignment = 1U, - .rxd_size = HW_ATL_A0_RXD_SIZE, - .rxds = 248U, - .txd_alignment = 1U, - .txd_size = HW_ATL_A0_TXD_SIZE, - .txds = 8U * 1024U, - .txhwb_alignment = 4096U, - .tx_rings = HW_ATL_A0_TX_RINGS, - .rx_rings = HW_ATL_A0_RX_RINGS, - .hw_features = NETIF_F_HW_CSUM | - NETIF_F_RXCSUM | - NETIF_F_RXHASH | - NETIF_F_SG | - NETIF_F_TSO, - .hw_priv_flags = IFF_UNICAST_FLT, - .link_speed_msk = (HW_ATL_A0_RATE_10G | - HW_ATL_A0_RATE_5G | - HW_ATL_A0_RATE_2G5 | - HW_ATL_A0_RATE_1G | - HW_ATL_A0_RATE_100M), - .flow_control = true, - .mtu = HW_ATL_A0_MTU_JUMBO, - .mac_regs_count = 88, - .fw_ver_expected = HW_ATL_A0_FW_VER_EXPECTED, -}; - #endif /* HW_ATL_A0_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index e4a22ce7bf09..819f6bcf9b4e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -12,83 +12,89 @@ #include "../aq_hw.h" #include "../aq_hw_utils.h" #include "../aq_ring.h" +#include "../aq_nic.h" #include "hw_atl_b0.h" #include "hw_atl_utils.h" #include "hw_atl_llh.h" #include "hw_atl_b0_internal.h" #include "hw_atl_llh_internal.h" -static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self, - struct aq_hw_caps_s *aq_hw_caps, - unsigned short device, - unsigned short subsystem_device) -{ - memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps)); - - if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001) - aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G; - - if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) { - aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G; - aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_5G; - } - - return 0; -} - -static struct aq_hw_s *hw_atl_b0_create(struct aq_pci_func_s *aq_pci_func, - unsigned int port, - struct aq_hw_ops *ops) -{ - struct hw_atl_s *self = NULL; - - self = kzalloc(sizeof(*self), GFP_KERNEL); - if (!self) - goto err_exit; - - self->base.aq_pci_func = aq_pci_func; +#define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \ + .is_64_dma = true, \ + .msix_irqs = 4U, \ + .irq_mask = ~0U, \ + .vecs = HW_ATL_B0_RSS_MAX, \ + .tcs = HW_ATL_B0_TC_MAX, \ + .rxd_alignment = 1U, \ + .rxd_size = HW_ATL_B0_RXD_SIZE, \ + .rxds = 4U * 1024U, \ + .txd_alignment = 1U, \ + .txd_size = HW_ATL_B0_TXD_SIZE, \ + .txds = 8U * 1024U, \ + .txhwb_alignment = 4096U, \ + .tx_rings = HW_ATL_B0_TX_RINGS, \ + .rx_rings = HW_ATL_B0_RX_RINGS, \ + .hw_features = NETIF_F_HW_CSUM | \ + NETIF_F_RXCSUM | \ + NETIF_F_RXHASH | \ + NETIF_F_SG | \ + NETIF_F_TSO | \ + NETIF_F_LRO, \ + .hw_priv_flags = IFF_UNICAST_FLT, \ + .flow_control = true, \ + .mtu = HW_ATL_B0_MTU_JUMBO, \ + .mac_regs_count = 88, \ + .hw_alive_check_addr = 0x10U + +const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = { + DEFAULT_B0_BOARD_BASIC_CAPABILITIES, + .media_type = AQ_HW_MEDIA_TYPE_FIBRE, + .link_speed_msk = HW_ATL_B0_RATE_10G | + HW_ATL_B0_RATE_5G | + HW_ATL_B0_RATE_2G5 | + HW_ATL_B0_RATE_1G | + HW_ATL_B0_RATE_100M, +}; - self->base.not_ff_addr = 0x10U; +const struct aq_hw_caps_s hw_atl_b0_caps_aqc107 = { + DEFAULT_B0_BOARD_BASIC_CAPABILITIES, + .media_type = AQ_HW_MEDIA_TYPE_TP, + .link_speed_msk = HW_ATL_B0_RATE_10G | + HW_ATL_B0_RATE_5G | + HW_ATL_B0_RATE_2G5 | + HW_ATL_B0_RATE_1G | + HW_ATL_B0_RATE_100M, +}; -err_exit: - return (struct aq_hw_s *)self; -} +const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = { + DEFAULT_B0_BOARD_BASIC_CAPABILITIES, + .media_type = AQ_HW_MEDIA_TYPE_TP, + .link_speed_msk = HW_ATL_B0_RATE_5G | + HW_ATL_B0_RATE_2G5 | + HW_ATL_B0_RATE_1G | + HW_ATL_B0_RATE_100M, +}; -static void hw_atl_b0_destroy(struct aq_hw_s *self) -{ - kfree(self); -} +const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = { + DEFAULT_B0_BOARD_BASIC_CAPABILITIES, + .media_type = AQ_HW_MEDIA_TYPE_TP, + .link_speed_msk = HW_ATL_B0_RATE_2G5 | + HW_ATL_B0_RATE_1G | + HW_ATL_B0_RATE_100M, +}; static int hw_atl_b0_hw_reset(struct aq_hw_s *self) { int err = 0; - glb_glb_reg_res_dis_set(self, 1U); - pci_pci_reg_res_dis_set(self, 0U); - rx_rx_reg_res_dis_set(self, 0U); - tx_tx_reg_res_dis_set(self, 0U); - - HW_ATL_FLUSH(); - glb_soft_res_set(self, 1); - - /* check 10 times by 1ms */ - AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U); - if (err < 0) - goto err_exit; - - itr_irq_reg_res_dis_set(self, 0U); - itr_res_irq_set(self, 1U); - - /* check 10 times by 1ms */ - AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U); - if (err < 0) - goto err_exit; + err = hw_atl_utils_soft_reset(self); + if (err) + return err; - hw_atl_utils_mpi_set(self, MPI_RESET, 0x0U); + self->aq_fw_ops->set_state(self, MPI_RESET); err = aq_hw_err_from_flags(self); -err_exit: return err; } @@ -100,51 +106,53 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self) bool is_rx_flow_control = false; /* TPS Descriptor rate init */ - tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U); - tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA); + hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U); + hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA); /* TPS VM init */ - tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U); + hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U); /* TPS TC credits init */ - tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U); - tps_tx_pkt_shed_data_arb_mode_set(self, 0U); + hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U); + hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U); - tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U); - tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U); - tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U); - tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U); + hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U); + hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U); + hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U); + hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U); /* Tx buf size */ buff_size = HW_ATL_B0_TXBUF_MAX; - tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc); - tpb_tx_buff_hi_threshold_per_tc_set(self, - (buff_size * (1024 / 32U) * 66U) / - 100U, tc); - tpb_tx_buff_lo_threshold_per_tc_set(self, - (buff_size * (1024 / 32U) * 50U) / - 100U, tc); + hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc); + hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self, + (buff_size * + (1024 / 32U) * 66U) / + 100U, tc); + hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self, + (buff_size * + (1024 / 32U) * 50U) / + 100U, tc); /* QoS Rx buf size per TC */ tc = 0; is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control); buff_size = HW_ATL_B0_RXBUF_MAX; - rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc); - rpb_rx_buff_hi_threshold_per_tc_set(self, - (buff_size * - (1024U / 32U) * 66U) / - 100U, tc); - rpb_rx_buff_lo_threshold_per_tc_set(self, - (buff_size * - (1024U / 32U) * 50U) / - 100U, tc); - rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc); + hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc); + hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self, + (buff_size * + (1024U / 32U) * 66U) / + 100U, tc); + hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, + (buff_size * + (1024U / 32U) * 50U) / + 100U, tc); + hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc); /* QoS 802.1p priority -> TC mapping */ for (i_priority = 8U; i_priority--;) - rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U); + hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U); return aq_hw_err_from_flags(self); } @@ -152,20 +160,19 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self) static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self, struct aq_rss_parameters *rss_params) { - struct aq_nic_cfg_s *cfg = NULL; + struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; int err = 0; unsigned int i = 0U; unsigned int addr = 0U; - cfg = self->aq_nic_cfg; - for (i = 10, addr = 0U; i--; ++addr) { u32 key_data = cfg->is_rss ? __swab32(rss_params->hash_secret_key[i]) : 0U; - rpf_rss_key_wr_data_set(self, key_data); - rpf_rss_key_addr_set(self, addr); - rpf_rss_key_wr_en_set(self, 1U); - AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U); + hw_atl_rpf_rss_key_wr_data_set(self, key_data); + hw_atl_rpf_rss_key_addr_set(self, addr); + hw_atl_rpf_rss_key_wr_en_set(self, 1U); + AQ_HW_WAIT_FOR(hw_atl_rpf_rss_key_wr_en_get(self) == 0, + 1000U, 10U); if (err < 0) goto err_exit; } @@ -194,11 +201,12 @@ static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self, ((i * 3U) & 0xFU)); } - for (i = AQ_DIMOF(bitary); i--;) { - rpf_rss_redir_tbl_wr_data_set(self, bitary[i]); - rpf_rss_redir_tbl_addr_set(self, i); - rpf_rss_redir_wr_en_set(self, 1U); - AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U); + for (i = ARRAY_SIZE(bitary); i--;) { + hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]); + hw_atl_rpf_rss_redir_tbl_addr_set(self, i); + hw_atl_rpf_rss_redir_wr_en_set(self, 1U); + AQ_HW_WAIT_FOR(hw_atl_rpf_rss_redir_wr_en_get(self) == 0, + 1000U, 10U); if (err < 0) goto err_exit; } @@ -215,15 +223,15 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self, unsigned int i; /* TX checksums offloads*/ - tpo_ipv4header_crc_offload_en_set(self, 1); - tpo_tcp_udp_crc_offload_en_set(self, 1); + hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1); + hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1); /* RX checksums offloads*/ - rpo_ipv4header_crc_offload_en_set(self, 1); - rpo_tcp_udp_crc_offload_en_set(self, 1); + hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1); + hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1); /* LSO offloads*/ - tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); + hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); /* LRO offloads */ { @@ -232,43 +240,44 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self, ((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0)); for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++) - rpo_lro_max_num_of_descriptors_set(self, val, i); + hw_atl_rpo_lro_max_num_of_descriptors_set(self, val, i); - rpo_lro_time_base_divider_set(self, 0x61AU); - rpo_lro_inactive_interval_set(self, 0); - rpo_lro_max_coalescing_interval_set(self, 2); + hw_atl_rpo_lro_time_base_divider_set(self, 0x61AU); + hw_atl_rpo_lro_inactive_interval_set(self, 0); + hw_atl_rpo_lro_max_coalescing_interval_set(self, 2); - rpo_lro_qsessions_lim_set(self, 1U); + hw_atl_rpo_lro_qsessions_lim_set(self, 1U); - rpo_lro_total_desc_lim_set(self, 2U); + hw_atl_rpo_lro_total_desc_lim_set(self, 2U); - rpo_lro_patch_optimization_en_set(self, 0U); + hw_atl_rpo_lro_patch_optimization_en_set(self, 0U); - rpo_lro_min_pay_of_first_pkt_set(self, 10U); + hw_atl_rpo_lro_min_pay_of_first_pkt_set(self, 10U); - rpo_lro_pkt_lim_set(self, 1U); + hw_atl_rpo_lro_pkt_lim_set(self, 1U); - rpo_lro_en_set(self, aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U); + hw_atl_rpo_lro_en_set(self, + aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U); } return aq_hw_err_from_flags(self); } static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self) { - thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U); - thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U); - thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU); + hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U); + hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U); + hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU); /* Tx interrupts */ - tdm_tx_desc_wr_wb_irq_en_set(self, 1U); + hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U); /* misc */ aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ? 0x00010000U : 0x00000000U); - tdm_tx_dca_en_set(self, 0U); - tdm_tx_dca_mode_set(self, 0U); + hw_atl_tdm_tx_dca_en_set(self, 0U); + hw_atl_tdm_tx_dca_mode_set(self, 0U); - tpb_tx_path_scp_ins_en_set(self, 1U); + hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U); return aq_hw_err_from_flags(self); } @@ -279,55 +288,55 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self) int i; /* Rx TC/RSS number config */ - rpb_rpf_rx_traf_class_mode_set(self, 1U); + hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U); /* Rx flow control */ - rpb_rx_flow_ctl_mode_set(self, 1U); + hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U); /* RSS Ring selection */ - reg_rx_flr_rss_control1set(self, cfg->is_rss ? + hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ? 0xB3333333U : 0x00000000U); /* Multicast filters */ for (i = HW_ATL_B0_MAC_MAX; i--;) { - rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i); - rpfl2unicast_flr_act_set(self, 1U, i); + hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i); + hw_atl_rpfl2unicast_flr_act_set(self, 1U, i); } - reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U); - reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U); + hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U); + hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U); /* Vlan filters */ - rpf_vlan_outer_etht_set(self, 0x88A8U); - rpf_vlan_inner_etht_set(self, 0x8100U); + hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U); + hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U); if (cfg->vlan_id) { - rpf_vlan_flr_act_set(self, 1U, 0U); - rpf_vlan_id_flr_set(self, 0U, 0U); - rpf_vlan_flr_en_set(self, 0U, 0U); + hw_atl_rpf_vlan_flr_act_set(self, 1U, 0U); + hw_atl_rpf_vlan_id_flr_set(self, 0U, 0U); + hw_atl_rpf_vlan_flr_en_set(self, 0U, 0U); - rpf_vlan_accept_untagged_packets_set(self, 1U); - rpf_vlan_untagged_act_set(self, 1U); + hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U); + hw_atl_rpf_vlan_untagged_act_set(self, 1U); - rpf_vlan_flr_act_set(self, 1U, 1U); - rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U); - rpf_vlan_flr_en_set(self, 1U, 1U); + hw_atl_rpf_vlan_flr_act_set(self, 1U, 1U); + hw_atl_rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U); + hw_atl_rpf_vlan_flr_en_set(self, 1U, 1U); } else { - rpf_vlan_prom_mode_en_set(self, 1); + hw_atl_rpf_vlan_prom_mode_en_set(self, 1); } /* Rx Interrupts */ - rdm_rx_desc_wr_wb_irq_en_set(self, 1U); + hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U); /* misc */ aq_hw_write_reg(self, 0x00005040U, IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U); - rpfl2broadcast_flr_act_set(self, 1U); - rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U)); + hw_atl_rpfl2broadcast_flr_act_set(self, 1U); + hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U)); - rdm_rx_dca_en_set(self, 0U); - rdm_rx_dca_mode_set(self, 0U); + hw_atl_rdm_rx_dca_en_set(self, 0U); + hw_atl_rdm_rx_dca_mode_set(self, 0U); return aq_hw_err_from_flags(self); } @@ -346,10 +355,10 @@ static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr) l = (mac_addr[2] << 24) | (mac_addr[3] << 16) | (mac_addr[4] << 8) | mac_addr[5]; - rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC); - rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC); - rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC); - rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC); + hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC); + hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC); + hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC); + hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC); err = aq_hw_err_from_flags(self); @@ -357,9 +366,7 @@ err_exit: return err; } -static int hw_atl_b0_hw_init(struct aq_hw_s *self, - struct aq_nic_cfg_s *aq_nic_cfg, - u8 *mac_addr) +static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr) { static u32 aq_hw_atl_igcr_table_[4][2] = { { 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */ @@ -371,51 +378,50 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self, int err = 0; u32 val; - self->aq_nic_cfg = aq_nic_cfg; - - hw_atl_utils_hw_chip_features_init(self, - &PHAL_ATLANTIC_B0->chip_features); + struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg; hw_atl_b0_hw_init_tx_path(self); hw_atl_b0_hw_init_rx_path(self); hw_atl_b0_hw_mac_addr_set(self, mac_addr); - hw_atl_utils_mpi_set(self, MPI_INIT, aq_nic_cfg->link_speed_msk); + self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk); + self->aq_fw_ops->set_state(self, MPI_INIT); hw_atl_b0_hw_qos_set(self); hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss); hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); /* Force limit MRRS on RDM/TDM to 2K */ - val = aq_hw_read_reg(self, pci_reg_control6_adr); - aq_hw_write_reg(self, pci_reg_control6_adr, (val & ~0x707) | 0x404); + val = aq_hw_read_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR); + aq_hw_write_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR, + (val & ~0x707) | 0x404); /* TX DMA total request limit. B0 hardware is not capable to * handle more than (8K-MRRS) incoming DMA data. * Value 24 in 256byte units */ - aq_hw_write_reg(self, tx_dma_total_req_limit_adr, 24); + aq_hw_write_reg(self, HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24); /* Reset link status and read out initial hardware counters */ self->aq_link_status.mbps = 0; - hw_atl_utils_update_stats(self); + self->aq_fw_ops->update_stats(self); err = aq_hw_err_from_flags(self); if (err < 0) goto err_exit; /* Interrupts */ - reg_irq_glb_ctl_set(self, - aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type] + hw_atl_reg_irq_glb_ctl_set(self, + aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type] [(aq_nic_cfg->vecs > 1U) ? 1 : 0]); - itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask); + hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask); /* Interrupts */ - reg_gen_irq_map_set(self, - ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) | + hw_atl_reg_gen_irq_map_set(self, + ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) | ((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U); hw_atl_b0_hw_offload_set(self, aq_nic_cfg); @@ -427,28 +433,28 @@ err_exit: static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self, struct aq_ring_s *ring) { - tdm_tx_desc_en_set(self, 1, ring->idx); + hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx); return aq_hw_err_from_flags(self); } static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self, struct aq_ring_s *ring) { - rdm_rx_desc_en_set(self, 1, ring->idx); + hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx); return aq_hw_err_from_flags(self); } static int hw_atl_b0_hw_start(struct aq_hw_s *self) { - tpb_tx_buff_en_set(self, 1); - rpb_rx_buff_en_set(self, 1); + hw_atl_tpb_tx_buff_en_set(self, 1); + hw_atl_rpb_rx_buff_en_set(self, 1); return aq_hw_err_from_flags(self); } static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self, struct aq_ring_s *ring) { - reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx); + hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx); return 0; } @@ -534,36 +540,36 @@ static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa; u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); - rdm_rx_desc_en_set(self, false, aq_ring->idx); + hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx); - rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); + hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); - reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw, - aq_ring->idx); + hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw, + aq_ring->idx); - reg_rx_dma_desc_base_addressmswset(self, - dma_desc_addr_msw, aq_ring->idx); + hw_atl_reg_rx_dma_desc_base_addressmswset(self, + dma_desc_addr_msw, aq_ring->idx); - rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); + hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); - rdm_rx_desc_data_buff_size_set(self, - AQ_CFG_RX_FRAME_MAX / 1024U, + hw_atl_rdm_rx_desc_data_buff_size_set(self, + AQ_CFG_RX_FRAME_MAX / 1024U, aq_ring->idx); - rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx); - rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); - rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx); + hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx); + hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); + hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx); /* Rx ring set mode */ /* Mapping interrupt vector */ - itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx); - itr_irq_map_en_rx_set(self, true, aq_ring->idx); + hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx); + hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx); - rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); - rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx); - rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx); - rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx); + hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); + hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx); + hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx); + hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx); return aq_hw_err_from_flags(self); } @@ -575,25 +581,25 @@ static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self, u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa; u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); - reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr, - aq_ring->idx); + hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr, + aq_ring->idx); - reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr, - aq_ring->idx); + hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr, + aq_ring->idx); - tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); + hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); hw_atl_b0_hw_tx_ring_tail_update(self, aq_ring); /* Set Tx threshold */ - tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx); + hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx); /* Mapping interrupt vector */ - itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx); - itr_irq_map_en_tx_set(self, true, aq_ring->idx); + hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx); + hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx); - tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); - tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx); + hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); + hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx); return aq_hw_err_from_flags(self); } @@ -614,7 +620,7 @@ static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self, rxd->hdr_addr = 0U; } - reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx); + hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx); return aq_hw_err_from_flags(self); } @@ -623,9 +629,9 @@ static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self, struct aq_ring_s *ring) { int err = 0; - unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx); + unsigned int hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx); - if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) { + if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) { err = -ENXIO; goto err_exit; } @@ -728,22 +734,22 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask) { - itr_irq_msk_setlsw_set(self, LODWORD(mask)); + hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask)); return aq_hw_err_from_flags(self); } static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask) { - itr_irq_msk_clearlsw_set(self, LODWORD(mask)); - itr_irq_status_clearlsw_set(self, LODWORD(mask)); + hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask)); + hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask)); - atomic_inc(&PHAL_ATLANTIC_B0->dpc); + atomic_inc(&self->dpc); return aq_hw_err_from_flags(self); } static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask) { - *mask = itr_irq_statuslsw_get(self); + *mask = hw_atl_itr_irq_statuslsw_get(self); return aq_hw_err_from_flags(self); } @@ -754,20 +760,20 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self, { unsigned int i = 0U; - rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC)); - rpfl2multicast_flr_en_set(self, - IS_FILTER_ENABLED(IFF_MULTICAST), 0); + hw_atl_rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC)); + hw_atl_rpfl2multicast_flr_en_set(self, + IS_FILTER_ENABLED(IFF_MULTICAST), 0); - rpfl2_accept_all_mc_packets_set(self, - IS_FILTER_ENABLED(IFF_ALLMULTI)); + hw_atl_rpfl2_accept_all_mc_packets_set(self, + IS_FILTER_ENABLED(IFF_ALLMULTI)); - rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST)); + hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST)); self->aq_nic_cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST); for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i) - rpfl2_uc_flr_en_set(self, - (self->aq_nic_cfg->is_mc_list_enabled && + hw_atl_rpfl2_uc_flr_en_set(self, + (self->aq_nic_cfg->is_mc_list_enabled && (i <= self->aq_nic_cfg->mc_list_count)) ? 1U : 0U, i); @@ -796,16 +802,16 @@ static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self, u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) | (ar_mac[i][4] << 8) | ar_mac[i][5]; - rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i); + hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i); - rpfl2unicast_dest_addresslsw_set(self, - l, HW_ATL_B0_MAC_MIN + i); + hw_atl_rpfl2unicast_dest_addresslsw_set(self, + l, HW_ATL_B0_MAC_MIN + i); - rpfl2unicast_dest_addressmsw_set(self, - h, HW_ATL_B0_MAC_MIN + i); + hw_atl_rpfl2unicast_dest_addressmsw_set(self, + h, HW_ATL_B0_MAC_MIN + i); - rpfl2_uc_flr_en_set(self, - (self->aq_nic_cfg->is_mc_list_enabled), + hw_atl_rpfl2_uc_flr_en_set(self, + (self->aq_nic_cfg->is_mc_list_enabled), HW_ATL_B0_MAC_MIN + i); } @@ -824,10 +830,10 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self) switch (self->aq_nic_cfg->itr) { case AQ_CFG_INTERRUPT_MODERATION_ON: case AQ_CFG_INTERRUPT_MODERATION_AUTO: - tdm_tx_desc_wr_wb_irq_en_set(self, 0U); - tdm_tdm_intr_moder_en_set(self, 1U); - rdm_rx_desc_wr_wb_irq_en_set(self, 0U); - rdm_rdm_intr_moder_en_set(self, 1U); + hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U); + hw_atl_tdm_tdm_intr_moder_en_set(self, 1U); + hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U); + hw_atl_rdm_rdm_intr_moder_en_set(self, 1U); if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) { /* HW timers are in 2us units */ @@ -887,18 +893,18 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self) } break; case AQ_CFG_INTERRUPT_MODERATION_OFF: - tdm_tx_desc_wr_wb_irq_en_set(self, 1U); - tdm_tdm_intr_moder_en_set(self, 0U); - rdm_rx_desc_wr_wb_irq_en_set(self, 1U); - rdm_rdm_intr_moder_en_set(self, 0U); + hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U); + hw_atl_tdm_tdm_intr_moder_en_set(self, 0U); + hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U); + hw_atl_rdm_rdm_intr_moder_en_set(self, 0U); itr_tx = 0U; itr_rx = 0U; break; } for (i = HW_ATL_B0_RINGS_MAX; i--;) { - reg_tx_intr_moder_ctrl_set(self, itr_tx, i); - reg_rx_intr_moder_ctrl_set(self, itr_rx, i); + hw_atl_reg_tx_intr_moder_ctrl_set(self, itr_tx, i); + hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i); } return aq_hw_err_from_flags(self); @@ -913,38 +919,19 @@ static int hw_atl_b0_hw_stop(struct aq_hw_s *self) static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, struct aq_ring_s *ring) { - tdm_tx_desc_en_set(self, 0U, ring->idx); + hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx); return aq_hw_err_from_flags(self); } static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, struct aq_ring_s *ring) { - rdm_rx_desc_en_set(self, 0U, ring->idx); + hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx); return aq_hw_err_from_flags(self); } -static int hw_atl_b0_hw_set_speed(struct aq_hw_s *self, u32 speed) -{ - int err = 0; - - err = hw_atl_utils_mpi_set_speed(self, speed, MPI_INIT); - if (err < 0) - goto err_exit; - -err_exit: - return err; -} - -static struct aq_hw_ops hw_atl_ops_ = { - .create = hw_atl_b0_create, - .destroy = hw_atl_b0_destroy, - .get_hw_caps = hw_atl_b0_get_hw_caps, - - .hw_get_mac_permanent = hw_atl_utils_get_mac_permanent, +const struct aq_hw_ops hw_atl_ops_b0 = { .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set, - .hw_get_link_status = hw_atl_utils_mpi_get_link_status, - .hw_set_link_speed = hw_atl_b0_hw_set_speed, .hw_init = hw_atl_b0_hw_init, .hw_deinit = hw_atl_utils_hw_deinit, .hw_set_power = hw_atl_utils_hw_set_power, @@ -974,21 +961,6 @@ static struct aq_hw_ops hw_atl_ops_ = { .hw_rss_set = hw_atl_b0_hw_rss_set, .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set, .hw_get_regs = hw_atl_utils_hw_get_regs, - .hw_update_stats = hw_atl_utils_update_stats, .hw_get_hw_stats = hw_atl_utils_get_hw_stats, .hw_get_fw_version = hw_atl_utils_get_fw_version, }; - -struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev) -{ - bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA); - bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) || - (pdev->device == HW_ATL_DEVICE_ID_D100) || - (pdev->device == HW_ATL_DEVICE_ID_D107) || - (pdev->device == HW_ATL_DEVICE_ID_D108) || - (pdev->device == HW_ATL_DEVICE_ID_D109)); - - bool is_rev_ok = (pdev->revision == 2U); - - return (is_vid_ok && is_did_ok && is_rev_ok) ? &hw_atl_ops_ : NULL; -} diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h index a1e1bce6c1f3..2cc8dacfdc27 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h @@ -16,19 +16,27 @@ #include "../aq_common.h" -#ifndef PCI_VENDOR_ID_AQUANTIA +extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc100; +extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc107; +extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc108; +extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc109; -#define PCI_VENDOR_ID_AQUANTIA 0x1D6A -#define HW_ATL_DEVICE_ID_0001 0x0001 -#define HW_ATL_DEVICE_ID_D100 0xD100 -#define HW_ATL_DEVICE_ID_D107 0xD107 -#define HW_ATL_DEVICE_ID_D108 0xD108 -#define HW_ATL_DEVICE_ID_D109 0xD109 +#define hw_atl_b0_caps_aqc111 hw_atl_b0_caps_aqc108 +#define hw_atl_b0_caps_aqc112 hw_atl_b0_caps_aqc109 -#define HW_ATL_NIC_NAME "aQuantia AQtion 5Gbit Network Adapter" +#define hw_atl_b0_caps_aqc100s hw_atl_b0_caps_aqc100 +#define hw_atl_b0_caps_aqc107s hw_atl_b0_caps_aqc107 +#define hw_atl_b0_caps_aqc108s hw_atl_b0_caps_aqc108 +#define hw_atl_b0_caps_aqc109s hw_atl_b0_caps_aqc109 -#endif +#define hw_atl_b0_caps_aqc111s hw_atl_b0_caps_aqc108 +#define hw_atl_b0_caps_aqc112s hw_atl_b0_caps_aqc109 -struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev); +#define hw_atl_b0_caps_aqc111e hw_atl_b0_caps_aqc108 +#define hw_atl_b0_caps_aqc112e hw_atl_b0_caps_aqc109 + +extern const struct aq_hw_ops hw_atl_ops_b0; + +#define hw_atl_ops_b1 hw_atl_ops_b0 #endif /* HW_ATL_B0_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h index 9aa2c6edfca2..405d1455c222 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h @@ -142,70 +142,6 @@ #define HW_ATL_INTR_MODER_MAX 0x1FF #define HW_ATL_INTR_MODER_MIN 0xFF -/* Hardware tx descriptor */ -struct __packed hw_atl_txd_s { - u64 buf_addr; - u32 ctl; - u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */ -}; - -/* Hardware tx context descriptor */ -struct __packed hw_atl_txc_s { - u32 rsvd; - u32 len; - u32 ctl; - u32 len2; -}; - -/* Hardware rx descriptor */ -struct __packed hw_atl_rxd_s { - u64 buf_addr; - u64 hdr_addr; -}; - -/* Hardware rx descriptor writeback */ -struct __packed hw_atl_rxd_wb_s { - u32 type; - u32 rss_hash; - u16 status; - u16 pkt_len; - u16 next_desc_ptr; - u16 vlan; -}; - /* HW layer capabilities */ -static struct aq_hw_caps_s hw_atl_b0_hw_caps_ = { - .ports = 1U, - .is_64_dma = true, - .msix_irqs = 4U, - .irq_mask = ~0U, - .vecs = HW_ATL_B0_RSS_MAX, - .tcs = HW_ATL_B0_TC_MAX, - .rxd_alignment = 1U, - .rxd_size = HW_ATL_B0_RXD_SIZE, - .rxds = 8U * 1024U, - .txd_alignment = 1U, - .txd_size = HW_ATL_B0_TXD_SIZE, - .txds = 8U * 1024U, - .txhwb_alignment = 4096U, - .tx_rings = HW_ATL_B0_TX_RINGS, - .rx_rings = HW_ATL_B0_RX_RINGS, - .hw_features = NETIF_F_HW_CSUM | - NETIF_F_RXCSUM | - NETIF_F_RXHASH | - NETIF_F_SG | - NETIF_F_TSO | - NETIF_F_LRO, - .hw_priv_flags = IFF_UNICAST_FLT, - .link_speed_msk = (HW_ATL_B0_RATE_10G | - HW_ATL_B0_RATE_5G | - HW_ATL_B0_RATE_2G5 | - HW_ATL_B0_RATE_1G | - HW_ATL_B0_RATE_100M), - .flow_control = true, - .mtu = HW_ATL_B0_MTU_JUMBO, - .mac_regs_count = 88, - .fw_ver_expected = HW_ATL_B0_FW_VER_EXPECTED, -}; #endif /* HW_ATL_B0_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c index 3de651afa8c7..10ba035dadb1 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c @@ -16,111 +16,115 @@ #include "../aq_hw_utils.h" /* global */ -void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem, u32 semaphore) +void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem, + u32 semaphore) { - aq_hw_write_reg(aq_hw, glb_cpu_sem_adr(semaphore), glb_cpu_sem); + aq_hw_write_reg(aq_hw, HW_ATL_GLB_CPU_SEM_ADR(semaphore), glb_cpu_sem); } -u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore) +u32 hw_atl_reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore) { - return aq_hw_read_reg(aq_hw, glb_cpu_sem_adr(semaphore)); + return aq_hw_read_reg(aq_hw, HW_ATL_GLB_CPU_SEM_ADR(semaphore)); } -void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis) +void hw_atl_glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis) { - aq_hw_write_reg_bit(aq_hw, glb_reg_res_dis_adr, - glb_reg_res_dis_msk, - glb_reg_res_dis_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_GLB_REG_RES_DIS_ADR, + HW_ATL_GLB_REG_RES_DIS_MSK, + HW_ATL_GLB_REG_RES_DIS_SHIFT, glb_reg_res_dis); } -void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res) +void hw_atl_glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res) { - aq_hw_write_reg_bit(aq_hw, glb_soft_res_adr, glb_soft_res_msk, - glb_soft_res_shift, soft_res); + aq_hw_write_reg_bit(aq_hw, HW_ATL_GLB_SOFT_RES_ADR, + HW_ATL_GLB_SOFT_RES_MSK, + HW_ATL_GLB_SOFT_RES_SHIFT, soft_res); } -u32 glb_soft_res_get(struct aq_hw_s *aq_hw) +u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw) { - return aq_hw_read_reg_bit(aq_hw, glb_soft_res_adr, - glb_soft_res_msk, - glb_soft_res_shift); + return aq_hw_read_reg_bit(aq_hw, HW_ATL_GLB_SOFT_RES_ADR, + HW_ATL_GLB_SOFT_RES_MSK, + HW_ATL_GLB_SOFT_RES_SHIFT); } -u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw) +u32 hw_atl_reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw) { - return aq_hw_read_reg(aq_hw, rx_dma_stat_counter7_adr); + return aq_hw_read_reg(aq_hw, HW_ATL_RX_DMA_STAT_COUNTER7_ADR); } -u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw) +u32 hw_atl_reg_glb_mif_id_get(struct aq_hw_s *aq_hw) { - return aq_hw_read_reg(aq_hw, glb_mif_id_adr); + return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MIF_ID_ADR); } /* stats */ -u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw) +u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw) { - return aq_hw_read_reg(aq_hw, rpb_rx_dma_drop_pkt_cnt_adr); + return aq_hw_read_reg(aq_hw, HW_ATL_RPB_RX_DMA_DROP_PKT_CNT_ADR); } -u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw) +u32 hw_atl_stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw) { - return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_counterlsw__adr); + return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERLSW); } -u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw) +u32 hw_atl_stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw) { - return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_counterlsw__adr); + return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERLSW); } -u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw) +u32 hw_atl_stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw) { - return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_counterlsw__adr); + return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERLSW); } -u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw) +u32 hw_atl_stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw) { - return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_counterlsw__adr); + return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERLSW); } -u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw) +u32 hw_atl_stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw) { - return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_countermsw__adr); + return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERMSW); } -u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw) +u32 hw_atl_stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw) { - return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_countermsw__adr); + return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERMSW); } -u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw) +u32 hw_atl_stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw) { - return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_countermsw__adr); + return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERMSW); } -u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw) +u32 hw_atl_stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw) { - return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_countermsw__adr); + return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERMSW); } /* interrupt */ -void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw) +void hw_atl_itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, + u32 irq_auto_masklsw) { - aq_hw_write_reg(aq_hw, itr_iamrlsw_adr, irq_auto_masklsw); + aq_hw_write_reg(aq_hw, HW_ATL_ITR_IAMRLSW_ADR, irq_auto_masklsw); } -void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx) +void hw_atl_itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, + u32 rx) { /* register address for bitfield imr_rx{r}_en */ static u32 itr_imr_rxren_adr[32] = { 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U, - 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU, + 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU, 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U, - 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU, + 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU, 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U, - 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU, + 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU, 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U, - 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU + 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU }; /* bitmask for bitfield imr_rx{r}_en */ @@ -149,18 +153,19 @@ void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx) irq_map_en_rx); } -void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx) +void hw_atl_itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, + u32 tx) { /* register address for bitfield imr_tx{t}_en */ static u32 itr_imr_txten_adr[32] = { 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U, - 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU, + 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU, 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U, - 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU, + 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU, 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U, - 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU, + 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU, 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U, - 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU + 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU }; /* bitmask for bitfield imr_tx{t}_en */ @@ -189,30 +194,30 @@ void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx) irq_map_en_tx); } -void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx) +void hw_atl_itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx) { /* register address for bitfield imr_rx{r}[4:0] */ static u32 itr_imr_rxr_adr[32] = { 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U, - 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU, + 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU, 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U, - 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU, + 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU, 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U, - 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU, + 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU, 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U, - 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU + 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU }; /* bitmask for bitfield imr_rx{r}[4:0] */ static u32 itr_imr_rxr_msk[32] = { - 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, - 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, - 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, - 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, - 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, - 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, - 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU, - 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU + 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU, + 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU, + 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU, + 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU, + 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU, + 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU, + 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU, + 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU }; /* lower bit position of bitfield imr_rx{r}[4:0] */ @@ -229,30 +234,30 @@ void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx) irq_map_rx); } -void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx) +void hw_atl_itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx) { /* register address for bitfield imr_tx{t}[4:0] */ static u32 itr_imr_txt_adr[32] = { 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U, - 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU, + 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU, 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U, - 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU, + 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU, 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U, - 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU, + 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU, 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U, - 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU + 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU }; /* bitmask for bitfield imr_tx{t}[4:0] */ static u32 itr_imr_txt_msk[32] = { - 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, - 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, - 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, - 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, - 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, - 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, - 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U, - 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U + 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U, + 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U, + 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U, + 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U, + 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U, + 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U, + 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U, + 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U }; /* lower bit position of bitfield imr_tx{t}[4:0] */ @@ -269,429 +274,463 @@ void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx) irq_map_tx); } -void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw) +void hw_atl_itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, + u32 irq_msk_clearlsw) { - aq_hw_write_reg(aq_hw, itr_imcrlsw_adr, irq_msk_clearlsw); + aq_hw_write_reg(aq_hw, HW_ATL_ITR_IMCRLSW_ADR, irq_msk_clearlsw); } -void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw) +void hw_atl_itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw) { - aq_hw_write_reg(aq_hw, itr_imsrlsw_adr, irq_msk_setlsw); + aq_hw_write_reg(aq_hw, HW_ATL_ITR_IMSRLSW_ADR, irq_msk_setlsw); } -void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis) +void hw_atl_itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis) { - aq_hw_write_reg_bit(aq_hw, itr_reg_res_dsbl_adr, - itr_reg_res_dsbl_msk, - itr_reg_res_dsbl_shift, irq_reg_res_dis); + aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_REG_RES_DSBL_ADR, + HW_ATL_ITR_REG_RES_DSBL_MSK, + HW_ATL_ITR_REG_RES_DSBL_SHIFT, irq_reg_res_dis); } -void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw, - u32 irq_status_clearlsw) +void hw_atl_itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw, + u32 irq_status_clearlsw) { - aq_hw_write_reg(aq_hw, itr_iscrlsw_adr, irq_status_clearlsw); + aq_hw_write_reg(aq_hw, HW_ATL_ITR_ISCRLSW_ADR, irq_status_clearlsw); } -u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw) +u32 hw_atl_itr_irq_statuslsw_get(struct aq_hw_s *aq_hw) { - return aq_hw_read_reg(aq_hw, itr_isrlsw_adr); + return aq_hw_read_reg(aq_hw, HW_ATL_ITR_ISRLSW_ADR); } -u32 itr_res_irq_get(struct aq_hw_s *aq_hw) +u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw) { - return aq_hw_read_reg_bit(aq_hw, itr_res_adr, itr_res_msk, - itr_res_shift); + return aq_hw_read_reg_bit(aq_hw, HW_ATL_ITR_RES_ADR, HW_ATL_ITR_RES_MSK, + HW_ATL_ITR_RES_SHIFT); } -void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq) +void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq) { - aq_hw_write_reg_bit(aq_hw, itr_res_adr, itr_res_msk, - itr_res_shift, res_irq); + aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_RES_ADR, HW_ATL_ITR_RES_MSK, + HW_ATL_ITR_RES_SHIFT, res_irq); } /* rdm */ -void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca) +void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca) { - aq_hw_write_reg_bit(aq_hw, rdm_dcadcpuid_adr(dca), - rdm_dcadcpuid_msk, - rdm_dcadcpuid_shift, cpuid); + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADCPUID_ADR(dca), + HW_ATL_RDM_DCADCPUID_MSK, + HW_ATL_RDM_DCADCPUID_SHIFT, cpuid); } -void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en) +void hw_atl_rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en) { - aq_hw_write_reg_bit(aq_hw, rdm_dca_en_adr, rdm_dca_en_msk, - rdm_dca_en_shift, rx_dca_en); + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCA_EN_ADR, HW_ATL_RDM_DCA_EN_MSK, + HW_ATL_RDM_DCA_EN_SHIFT, rx_dca_en); } -void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode) +void hw_atl_rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode) { - aq_hw_write_reg_bit(aq_hw, rdm_dca_mode_adr, rdm_dca_mode_msk, - rdm_dca_mode_shift, rx_dca_mode); + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCA_MODE_ADR, + HW_ATL_RDM_DCA_MODE_MSK, + HW_ATL_RDM_DCA_MODE_SHIFT, rx_dca_mode); } -void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw, - u32 rx_desc_data_buff_size, u32 descriptor) +void hw_atl_rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw, + u32 rx_desc_data_buff_size, + u32 descriptor) { - aq_hw_write_reg_bit(aq_hw, rdm_descddata_size_adr(descriptor), - rdm_descddata_size_msk, - rdm_descddata_size_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDDATA_SIZE_ADR(descriptor), + HW_ATL_RDM_DESCDDATA_SIZE_MSK, + HW_ATL_RDM_DESCDDATA_SIZE_SHIFT, rx_desc_data_buff_size); } -void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en, u32 dca) +void hw_atl_rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en, + u32 dca) { - aq_hw_write_reg_bit(aq_hw, rdm_dcaddesc_en_adr(dca), - rdm_dcaddesc_en_msk, - rdm_dcaddesc_en_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADDESC_EN_ADR(dca), + HW_ATL_RDM_DCADDESC_EN_MSK, + HW_ATL_RDM_DCADDESC_EN_SHIFT, rx_desc_dca_en); } -void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en, u32 descriptor) +void hw_atl_rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en, + u32 descriptor) { - aq_hw_write_reg_bit(aq_hw, rdm_descden_adr(descriptor), - rdm_descden_msk, - rdm_descden_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDEN_ADR(descriptor), + HW_ATL_RDM_DESCDEN_MSK, + HW_ATL_RDM_DESCDEN_SHIFT, rx_desc_en); } -void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw, - u32 rx_desc_head_buff_size, u32 descriptor) +void hw_atl_rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw, + u32 rx_desc_head_buff_size, + u32 descriptor) { - aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_size_adr(descriptor), - rdm_descdhdr_size_msk, - rdm_descdhdr_size_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDHDR_SIZE_ADR(descriptor), + HW_ATL_RDM_DESCDHDR_SIZE_MSK, + HW_ATL_RDM_DESCDHDR_SIZE_SHIFT, rx_desc_head_buff_size); } -void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw, - u32 rx_desc_head_splitting, u32 descriptor) +void hw_atl_rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw, + u32 rx_desc_head_splitting, + u32 descriptor) { - aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_split_adr(descriptor), - rdm_descdhdr_split_msk, - rdm_descdhdr_split_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDHDR_SPLIT_ADR(descriptor), + HW_ATL_RDM_DESCDHDR_SPLIT_MSK, + HW_ATL_RDM_DESCDHDR_SPLIT_SHIFT, rx_desc_head_splitting); } -u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor) +u32 hw_atl_rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor) { - return aq_hw_read_reg_bit(aq_hw, rdm_descdhd_adr(descriptor), - rdm_descdhd_msk, rdm_descdhd_shift); + return aq_hw_read_reg_bit(aq_hw, HW_ATL_RDM_DESCDHD_ADR(descriptor), + HW_ATL_RDM_DESCDHD_MSK, + HW_ATL_RDM_DESCDHD_SHIFT); } -void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len, u32 descriptor) +void hw_atl_rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len, + u32 descriptor) { - aq_hw_write_reg_bit(aq_hw, rdm_descdlen_adr(descriptor), - rdm_descdlen_msk, rdm_descdlen_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDLEN_ADR(descriptor), + HW_ATL_RDM_DESCDLEN_MSK, HW_ATL_RDM_DESCDLEN_SHIFT, rx_desc_len); } -void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res, u32 descriptor) +void hw_atl_rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res, + u32 descriptor) { - aq_hw_write_reg_bit(aq_hw, rdm_descdreset_adr(descriptor), - rdm_descdreset_msk, rdm_descdreset_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDRESET_ADR(descriptor), + HW_ATL_RDM_DESCDRESET_MSK, + HW_ATL_RDM_DESCDRESET_SHIFT, rx_desc_res); } -void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, - u32 rx_desc_wr_wb_irq_en) +void hw_atl_rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, + u32 rx_desc_wr_wb_irq_en) { - aq_hw_write_reg_bit(aq_hw, rdm_int_desc_wrb_en_adr, - rdm_int_desc_wrb_en_msk, - rdm_int_desc_wrb_en_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_INT_DESC_WRB_EN_ADR, + HW_ATL_RDM_INT_DESC_WRB_EN_MSK, + HW_ATL_RDM_INT_DESC_WRB_EN_SHIFT, rx_desc_wr_wb_irq_en); } -void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en, u32 dca) +void hw_atl_rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en, + u32 dca) { - aq_hw_write_reg_bit(aq_hw, rdm_dcadhdr_en_adr(dca), - rdm_dcadhdr_en_msk, - rdm_dcadhdr_en_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADHDR_EN_ADR(dca), + HW_ATL_RDM_DCADHDR_EN_MSK, + HW_ATL_RDM_DCADHDR_EN_SHIFT, rx_head_dca_en); } -void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca) +void hw_atl_rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, + u32 dca) { - aq_hw_write_reg_bit(aq_hw, rdm_dcadpay_en_adr(dca), - rdm_dcadpay_en_msk, rdm_dcadpay_en_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADPAY_EN_ADR(dca), + HW_ATL_RDM_DCADPAY_EN_MSK, + HW_ATL_RDM_DCADPAY_EN_SHIFT, rx_pld_dca_en); } -void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en) +void hw_atl_rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, + u32 rdm_intr_moder_en) { - aq_hw_write_reg_bit(aq_hw, rdm_int_rim_en_adr, - rdm_int_rim_en_msk, - rdm_int_rim_en_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_INT_RIM_EN_ADR, + HW_ATL_RDM_INT_RIM_EN_MSK, + HW_ATL_RDM_INT_RIM_EN_SHIFT, rdm_intr_moder_en); } /* reg */ -void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx) +void hw_atl_reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, + u32 regidx) { - aq_hw_write_reg(aq_hw, gen_intr_map_adr(regidx), gen_intr_map); + aq_hw_write_reg(aq_hw, HW_ATL_GEN_INTR_MAP_ADR(regidx), gen_intr_map); } -u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw) +u32 hw_atl_reg_gen_irq_status_get(struct aq_hw_s *aq_hw) { - return aq_hw_read_reg(aq_hw, gen_intr_stat_adr); + return aq_hw_read_reg(aq_hw, HW_ATL_GEN_INTR_STAT_ADR); } -void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl) +void hw_atl_reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl) { - aq_hw_write_reg(aq_hw, intr_glb_ctl_adr, intr_glb_ctl); + aq_hw_write_reg(aq_hw, HW_ATL_INTR_GLB_CTL_ADR, intr_glb_ctl); } -void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle) +void hw_atl_reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle) { - aq_hw_write_reg(aq_hw, intr_thr_adr(throttle), intr_thr); + aq_hw_write_reg(aq_hw, HW_ATL_INTR_THR_ADR(throttle), intr_thr); } -void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, - u32 rx_dma_desc_base_addrlsw, - u32 descriptor) +void hw_atl_reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_base_addrlsw, + u32 descriptor) { - aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrlsw_adr(descriptor), + aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor), rx_dma_desc_base_addrlsw); } -void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, - u32 rx_dma_desc_base_addrmsw, - u32 descriptor) +void hw_atl_reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_base_addrmsw, + u32 descriptor) { - aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrmsw_adr(descriptor), + aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor), rx_dma_desc_base_addrmsw); } -u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor) +u32 hw_atl_reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor) { - return aq_hw_read_reg(aq_hw, rx_dma_desc_stat_adr(descriptor)); + return aq_hw_read_reg(aq_hw, HW_ATL_RX_DMA_DESC_STAT_ADR(descriptor)); } -void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, - u32 rx_dma_desc_tail_ptr, u32 descriptor) +void hw_atl_reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_tail_ptr, + u32 descriptor) { - aq_hw_write_reg(aq_hw, rx_dma_desc_tail_ptr_adr(descriptor), + aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_TAIL_PTR_ADR(descriptor), rx_dma_desc_tail_ptr); } -void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr_msk) +void hw_atl_reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw, + u32 rx_flr_mcst_flr_msk) { - aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_msk_adr, rx_flr_mcst_flr_msk); + aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_MCST_FLR_MSK_ADR, + rx_flr_mcst_flr_msk); } -void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr, - u32 filter) +void hw_atl_reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr, + u32 filter) { - aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_adr(filter), rx_flr_mcst_flr); + aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_MCST_FLR_ADR(filter), + rx_flr_mcst_flr); } -void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw, u32 rx_flr_rss_control1) +void hw_atl_reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw, + u32 rx_flr_rss_control1) { - aq_hw_write_reg(aq_hw, rx_flr_rss_control1_adr, rx_flr_rss_control1); + aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_RSS_CONTROL1_ADR, + rx_flr_rss_control1); } -void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_filter_control2) +void hw_atl_reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, + u32 rx_filter_control2) { - aq_hw_write_reg(aq_hw, rx_flr_control2_adr, rx_filter_control2); + aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_CONTROL2_ADR, rx_filter_control2); } -void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, - u32 rx_intr_moderation_ctl, - u32 queue) +void hw_atl_reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, + u32 rx_intr_moderation_ctl, + u32 queue) { - aq_hw_write_reg(aq_hw, rx_intr_moderation_ctl_adr(queue), + aq_hw_write_reg(aq_hw, HW_ATL_RX_INTR_MODERATION_CTL_ADR(queue), rx_intr_moderation_ctl); } -void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl) +void hw_atl_reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, + u32 tx_dma_debug_ctl) { - aq_hw_write_reg(aq_hw, tx_dma_debug_ctl_adr, tx_dma_debug_ctl); + aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DEBUG_CTL_ADR, tx_dma_debug_ctl); } -void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, - u32 tx_dma_desc_base_addrlsw, - u32 descriptor) +void hw_atl_reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_base_addrlsw, + u32 descriptor) { - aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrlsw_adr(descriptor), + aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor), tx_dma_desc_base_addrlsw); } -void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, - u32 tx_dma_desc_base_addrmsw, - u32 descriptor) +void hw_atl_reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_base_addrmsw, + u32 descriptor) { - aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrmsw_adr(descriptor), + aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor), tx_dma_desc_base_addrmsw); } -void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, - u32 tx_dma_desc_tail_ptr, u32 descriptor) +void hw_atl_reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_tail_ptr, + u32 descriptor) { - aq_hw_write_reg(aq_hw, tx_dma_desc_tail_ptr_adr(descriptor), + aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_TAIL_PTR_ADR(descriptor), tx_dma_desc_tail_ptr); } -void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, - u32 tx_intr_moderation_ctl, - u32 queue) +void hw_atl_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, + u32 tx_intr_moderation_ctl, + u32 queue) { - aq_hw_write_reg(aq_hw, tx_intr_moderation_ctl_adr(queue), + aq_hw_write_reg(aq_hw, HW_ATL_TX_INTR_MODERATION_CTL_ADR(queue), tx_intr_moderation_ctl); } /* RPB: rx packet buffer */ -void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk) +void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk) { - aq_hw_write_reg_bit(aq_hw, rpb_dma_sys_lbk_adr, - rpb_dma_sys_lbk_msk, - rpb_dma_sys_lbk_shift, dma_sys_lbk); + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_DMA_SYS_LBK_ADR, + HW_ATL_RPB_DMA_SYS_LBK_MSK, + HW_ATL_RPB_DMA_SYS_LBK_SHIFT, dma_sys_lbk); } -void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw, - u32 rx_traf_class_mode) +void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw, + u32 rx_traf_class_mode) { - aq_hw_write_reg_bit(aq_hw, rpb_rpf_rx_tc_mode_adr, - rpb_rpf_rx_tc_mode_msk, - rpb_rpf_rx_tc_mode_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RPF_RX_TC_MODE_ADR, + HW_ATL_RPB_RPF_RX_TC_MODE_MSK, + HW_ATL_RPB_RPF_RX_TC_MODE_SHIFT, rx_traf_class_mode); } -void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en) +void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en) { - aq_hw_write_reg_bit(aq_hw, rpb_rx_buf_en_adr, rpb_rx_buf_en_msk, - rpb_rx_buf_en_shift, rx_buff_en); + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RX_BUF_EN_ADR, + HW_ATL_RPB_RX_BUF_EN_MSK, + HW_ATL_RPB_RX_BUF_EN_SHIFT, rx_buff_en); } -void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, - u32 rx_buff_hi_threshold_per_tc, - u32 buffer) +void hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_buff_hi_threshold_per_tc, + u32 buffer) { - aq_hw_write_reg_bit(aq_hw, rpb_rxbhi_thresh_adr(buffer), - rpb_rxbhi_thresh_msk, rpb_rxbhi_thresh_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBHI_THRESH_ADR(buffer), + HW_ATL_RPB_RXBHI_THRESH_MSK, + HW_ATL_RPB_RXBHI_THRESH_SHIFT, rx_buff_hi_threshold_per_tc); } -void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, - u32 rx_buff_lo_threshold_per_tc, - u32 buffer) +void hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_buff_lo_threshold_per_tc, + u32 buffer) { - aq_hw_write_reg_bit(aq_hw, rpb_rxblo_thresh_adr(buffer), - rpb_rxblo_thresh_msk, - rpb_rxblo_thresh_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBLO_THRESH_ADR(buffer), + HW_ATL_RPB_RXBLO_THRESH_MSK, + HW_ATL_RPB_RXBLO_THRESH_SHIFT, rx_buff_lo_threshold_per_tc); } -void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode) +void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode) { - aq_hw_write_reg_bit(aq_hw, rpb_rx_fc_mode_adr, - rpb_rx_fc_mode_msk, - rpb_rx_fc_mode_shift, rx_flow_ctl_mode); + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RX_FC_MODE_ADR, + HW_ATL_RPB_RX_FC_MODE_MSK, + HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode); } -void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, - u32 rx_pkt_buff_size_per_tc, u32 buffer) +void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_pkt_buff_size_per_tc, u32 buffer) { - aq_hw_write_reg_bit(aq_hw, rpb_rxbbuf_size_adr(buffer), - rpb_rxbbuf_size_msk, rpb_rxbbuf_size_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBBUF_SIZE_ADR(buffer), + HW_ATL_RPB_RXBBUF_SIZE_MSK, + HW_ATL_RPB_RXBBUF_SIZE_SHIFT, rx_pkt_buff_size_per_tc); } -void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc, - u32 buffer) +void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc, + u32 buffer) { - aq_hw_write_reg_bit(aq_hw, rpb_rxbxoff_en_adr(buffer), - rpb_rxbxoff_en_msk, rpb_rxbxoff_en_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBXOFF_EN_ADR(buffer), + HW_ATL_RPB_RXBXOFF_EN_MSK, + HW_ATL_RPB_RXBXOFF_EN_SHIFT, rx_xoff_en_per_tc); } /* rpf */ -void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw, - u32 l2broadcast_count_threshold) +void hw_atl_rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw, + u32 l2broadcast_count_threshold) { - aq_hw_write_reg_bit(aq_hw, rpfl2bc_thresh_adr, - rpfl2bc_thresh_msk, - rpfl2bc_thresh_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_THRESH_ADR, + HW_ATL_RPFL2BC_THRESH_MSK, + HW_ATL_RPFL2BC_THRESH_SHIFT, l2broadcast_count_threshold); } -void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en) +void hw_atl_rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en) { - aq_hw_write_reg_bit(aq_hw, rpfl2bc_en_adr, rpfl2bc_en_msk, - rpfl2bc_en_shift, l2broadcast_en); + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_EN_ADR, HW_ATL_RPFL2BC_EN_MSK, + HW_ATL_RPFL2BC_EN_SHIFT, l2broadcast_en); } -void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2broadcast_flr_act) +void hw_atl_rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw, + u32 l2broadcast_flr_act) { - aq_hw_write_reg_bit(aq_hw, rpfl2bc_act_adr, rpfl2bc_act_msk, - rpfl2bc_act_shift, l2broadcast_flr_act); + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_ACT_ADR, + HW_ATL_RPFL2BC_ACT_MSK, + HW_ATL_RPFL2BC_ACT_SHIFT, l2broadcast_flr_act); } -void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en, - u32 filter) +void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, + u32 l2multicast_flr_en, + u32 filter) { - aq_hw_write_reg_bit(aq_hw, rpfl2mc_enf_adr(filter), - rpfl2mc_enf_msk, - rpfl2mc_enf_shift, l2multicast_flr_en); + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2MC_ENF_ADR(filter), + HW_ATL_RPFL2MC_ENF_MSK, + HW_ATL_RPFL2MC_ENF_SHIFT, l2multicast_flr_en); } -void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw, - u32 l2promiscuous_mode_en) +void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw, + u32 l2promiscuous_mode_en) { - aq_hw_write_reg_bit(aq_hw, rpfl2promis_mode_adr, - rpfl2promis_mode_msk, - rpfl2promis_mode_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2PROMIS_MODE_ADR, + HW_ATL_RPFL2PROMIS_MODE_MSK, + HW_ATL_RPFL2PROMIS_MODE_SHIFT, l2promiscuous_mode_en); } -void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act, - u32 filter) +void hw_atl_rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, + u32 l2unicast_flr_act, + u32 filter) { - aq_hw_write_reg_bit(aq_hw, rpfl2uc_actf_adr(filter), - rpfl2uc_actf_msk, rpfl2uc_actf_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_ACTF_ADR(filter), + HW_ATL_RPFL2UC_ACTF_MSK, HW_ATL_RPFL2UC_ACTF_SHIFT, l2unicast_flr_act); } -void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en, - u32 filter) +void hw_atl_rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en, + u32 filter) { - aq_hw_write_reg_bit(aq_hw, rpfl2uc_enf_adr(filter), - rpfl2uc_enf_msk, - rpfl2uc_enf_shift, l2unicast_flr_en); + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_ENF_ADR(filter), + HW_ATL_RPFL2UC_ENF_MSK, + HW_ATL_RPFL2UC_ENF_SHIFT, l2unicast_flr_en); } -void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw, - u32 l2unicast_dest_addresslsw, - u32 filter) +void hw_atl_rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw, + u32 l2unicast_dest_addresslsw, + u32 filter) { - aq_hw_write_reg(aq_hw, rpfl2uc_daflsw_adr(filter), + aq_hw_write_reg(aq_hw, HW_ATL_RPFL2UC_DAFLSW_ADR(filter), l2unicast_dest_addresslsw); } -void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw, - u32 l2unicast_dest_addressmsw, - u32 filter) +void hw_atl_rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw, + u32 l2unicast_dest_addressmsw, + u32 filter) { - aq_hw_write_reg_bit(aq_hw, rpfl2uc_dafmsw_adr(filter), - rpfl2uc_dafmsw_msk, rpfl2uc_dafmsw_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_DAFMSW_ADR(filter), + HW_ATL_RPFL2UC_DAFMSW_MSK, + HW_ATL_RPFL2UC_DAFMSW_SHIFT, l2unicast_dest_addressmsw); } -void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw, - u32 l2_accept_all_mc_packets) +void hw_atl_rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw, + u32 l2_accept_all_mc_packets) { - aq_hw_write_reg_bit(aq_hw, rpfl2mc_accept_all_adr, - rpfl2mc_accept_all_msk, - rpfl2mc_accept_all_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2MC_ACCEPT_ALL_ADR, + HW_ATL_RPFL2MC_ACCEPT_ALL_MSK, + HW_ATL_RPFL2MC_ACCEPT_ALL_SHIFT, l2_accept_all_mc_packets); } -void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw, - u32 user_priority_tc_map, u32 tc) +void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw, + u32 user_priority_tc_map, u32 tc) { /* register address for bitfield rx_tc_up{t}[2:0] */ static u32 rpf_rpb_rx_tc_upt_adr[8] = { - 0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U, - 0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U + 0x000054c4U, 0x000054C4U, 0x000054C4U, 0x000054C4U, + 0x000054c4U, 0x000054C4U, 0x000054C4U, 0x000054C4U }; /* bitmask for bitfield rx_tc_up{t}[2:0] */ @@ -711,273 +750,290 @@ void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw, user_priority_tc_map); } -void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr) +void hw_atl_rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr) { - aq_hw_write_reg_bit(aq_hw, rpf_rss_key_addr_adr, - rpf_rss_key_addr_msk, - rpf_rss_key_addr_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_ADDR_ADR, + HW_ATL_RPF_RSS_KEY_ADDR_MSK, + HW_ATL_RPF_RSS_KEY_ADDR_SHIFT, rss_key_addr); } -void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data) +void hw_atl_rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data) { - aq_hw_write_reg(aq_hw, rpf_rss_key_wr_data_adr, + aq_hw_write_reg(aq_hw, HW_ATL_RPF_RSS_KEY_WR_DATA_ADR, rss_key_wr_data); } -u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw) +u32 hw_atl_rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw) { - return aq_hw_read_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr, - rpf_rss_key_wr_eni_msk, - rpf_rss_key_wr_eni_shift); + return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_WR_ENI_ADR, + HW_ATL_RPF_RSS_KEY_WR_ENI_MSK, + HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT); } -void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en) +void hw_atl_rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en) { - aq_hw_write_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr, - rpf_rss_key_wr_eni_msk, - rpf_rss_key_wr_eni_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_WR_ENI_ADR, + HW_ATL_RPF_RSS_KEY_WR_ENI_MSK, + HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT, rss_key_wr_en); } -void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw, u32 rss_redir_tbl_addr) +void hw_atl_rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw, + u32 rss_redir_tbl_addr) { - aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_addr_adr, - rpf_rss_redir_addr_msk, - rpf_rss_redir_addr_shift, rss_redir_tbl_addr); + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_ADDR_ADR, + HW_ATL_RPF_RSS_REDIR_ADDR_MSK, + HW_ATL_RPF_RSS_REDIR_ADDR_SHIFT, + rss_redir_tbl_addr); } -void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw, - u32 rss_redir_tbl_wr_data) +void hw_atl_rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw, + u32 rss_redir_tbl_wr_data) { - aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_data_adr, - rpf_rss_redir_wr_data_msk, - rpf_rss_redir_wr_data_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_DATA_ADR, + HW_ATL_RPF_RSS_REDIR_WR_DATA_MSK, + HW_ATL_RPF_RSS_REDIR_WR_DATA_SHIFT, rss_redir_tbl_wr_data); } -u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw) +u32 hw_atl_rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw) { - return aq_hw_read_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr, - rpf_rss_redir_wr_eni_msk, - rpf_rss_redir_wr_eni_shift); + return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR, + HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK, + HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT); } -void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en) +void hw_atl_rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en) { - aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr, - rpf_rss_redir_wr_eni_msk, - rpf_rss_redir_wr_eni_shift, rss_redir_wr_en); + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR, + HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK, + HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT, rss_redir_wr_en); } -void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw, u32 tpo_to_rpf_sys_lbk) +void hw_atl_rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw, + u32 tpo_to_rpf_sys_lbk) { - aq_hw_write_reg_bit(aq_hw, rpf_tpo_rpf_sys_lbk_adr, - rpf_tpo_rpf_sys_lbk_msk, - rpf_tpo_rpf_sys_lbk_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_TPO_RPF_SYS_LBK_ADR, + HW_ATL_RPF_TPO_RPF_SYS_LBK_MSK, + HW_ATL_RPF_TPO_RPF_SYS_LBK_SHIFT, tpo_to_rpf_sys_lbk); } -void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht) +void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht) { - aq_hw_write_reg_bit(aq_hw, rpf_vl_inner_tpid_adr, - rpf_vl_inner_tpid_msk, - rpf_vl_inner_tpid_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_INNER_TPID_ADR, + HW_ATL_RPF_VL_INNER_TPID_MSK, + HW_ATL_RPF_VL_INNER_TPID_SHIFT, vlan_inner_etht); } -void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht) +void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht) { - aq_hw_write_reg_bit(aq_hw, rpf_vl_outer_tpid_adr, - rpf_vl_outer_tpid_msk, - rpf_vl_outer_tpid_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_OUTER_TPID_ADR, + HW_ATL_RPF_VL_OUTER_TPID_MSK, + HW_ATL_RPF_VL_OUTER_TPID_SHIFT, vlan_outer_etht); } -void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en) +void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, + u32 vlan_prom_mode_en) { - aq_hw_write_reg_bit(aq_hw, rpf_vl_promis_mode_adr, - rpf_vl_promis_mode_msk, - rpf_vl_promis_mode_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_PROMIS_MODE_ADR, + HW_ATL_RPF_VL_PROMIS_MODE_MSK, + HW_ATL_RPF_VL_PROMIS_MODE_SHIFT, vlan_prom_mode_en); } -void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw, - u32 vlan_accept_untagged_packets) +void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw, + u32 vlan_acc_untagged_packets) { - aq_hw_write_reg_bit(aq_hw, rpf_vl_accept_untagged_mode_adr, - rpf_vl_accept_untagged_mode_msk, - rpf_vl_accept_untagged_mode_shift, - vlan_accept_untagged_packets); + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_ADR, + HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSK, + HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_SHIFT, + vlan_acc_untagged_packets); } -void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act) +void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, + u32 vlan_untagged_act) { - aq_hw_write_reg_bit(aq_hw, rpf_vl_untagged_act_adr, - rpf_vl_untagged_act_msk, - rpf_vl_untagged_act_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_UNTAGGED_ACT_ADR, + HW_ATL_RPF_VL_UNTAGGED_ACT_MSK, + HW_ATL_RPF_VL_UNTAGGED_ACT_SHIFT, vlan_untagged_act); } -void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter) +void hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, + u32 filter) { - aq_hw_write_reg_bit(aq_hw, rpf_vl_en_f_adr(filter), - rpf_vl_en_f_msk, - rpf_vl_en_f_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_EN_F_ADR(filter), + HW_ATL_RPF_VL_EN_F_MSK, + HW_ATL_RPF_VL_EN_F_SHIFT, vlan_flr_en); } -void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_flr_act, u32 filter) +void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_flr_act, + u32 filter) { - aq_hw_write_reg_bit(aq_hw, rpf_vl_act_f_adr(filter), - rpf_vl_act_f_msk, - rpf_vl_act_f_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ACT_F_ADR(filter), + HW_ATL_RPF_VL_ACT_F_MSK, + HW_ATL_RPF_VL_ACT_F_SHIFT, vlan_flr_act); } -void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter) +void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, + u32 filter) { - aq_hw_write_reg_bit(aq_hw, rpf_vl_id_f_adr(filter), - rpf_vl_id_f_msk, - rpf_vl_id_f_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ID_F_ADR(filter), + HW_ATL_RPF_VL_ID_F_MSK, + HW_ATL_RPF_VL_ID_F_SHIFT, vlan_id_flr); } -void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter) +void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, + u32 filter) { - aq_hw_write_reg_bit(aq_hw, rpf_et_enf_adr(filter), - rpf_et_enf_msk, - rpf_et_enf_shift, etht_flr_en); + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ENF_ADR(filter), + HW_ATL_RPF_ET_ENF_MSK, + HW_ATL_RPF_ET_ENF_SHIFT, etht_flr_en); } -void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw, - u32 etht_user_priority_en, u32 filter) +void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw, + u32 etht_user_priority_en, u32 filter) { - aq_hw_write_reg_bit(aq_hw, rpf_et_upfen_adr(filter), - rpf_et_upfen_msk, rpf_et_upfen_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPFEN_ADR(filter), + HW_ATL_RPF_ET_UPFEN_MSK, HW_ATL_RPF_ET_UPFEN_SHIFT, etht_user_priority_en); } -void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en, - u32 filter) +void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, + u32 etht_rx_queue_en, + u32 filter) { - aq_hw_write_reg_bit(aq_hw, rpf_et_rxqfen_adr(filter), - rpf_et_rxqfen_msk, rpf_et_rxqfen_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQFEN_ADR(filter), + HW_ATL_RPF_ET_RXQFEN_MSK, + HW_ATL_RPF_ET_RXQFEN_SHIFT, etht_rx_queue_en); } -void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority, - u32 filter) +void hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, + u32 etht_user_priority, + u32 filter) { - aq_hw_write_reg_bit(aq_hw, rpf_et_upf_adr(filter), - rpf_et_upf_msk, - rpf_et_upf_shift, etht_user_priority); + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPF_ADR(filter), + HW_ATL_RPF_ET_UPF_MSK, + HW_ATL_RPF_ET_UPF_SHIFT, etht_user_priority); } -void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue, - u32 filter) +void hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue, + u32 filter) { - aq_hw_write_reg_bit(aq_hw, rpf_et_rxqf_adr(filter), - rpf_et_rxqf_msk, - rpf_et_rxqf_shift, etht_rx_queue); + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQF_ADR(filter), + HW_ATL_RPF_ET_RXQF_MSK, + HW_ATL_RPF_ET_RXQF_SHIFT, etht_rx_queue); } -void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue, - u32 filter) +void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue, + u32 filter) { - aq_hw_write_reg_bit(aq_hw, rpf_et_mng_rxqf_adr(filter), - rpf_et_mng_rxqf_msk, rpf_et_mng_rxqf_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_MNG_RXQF_ADR(filter), + HW_ATL_RPF_ET_MNG_RXQF_MSK, + HW_ATL_RPF_ET_MNG_RXQF_SHIFT, etht_mgt_queue); } -void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act, u32 filter) +void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act, + u32 filter) { - aq_hw_write_reg_bit(aq_hw, rpf_et_actf_adr(filter), - rpf_et_actf_msk, - rpf_et_actf_shift, etht_flr_act); + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ACTF_ADR(filter), + HW_ATL_RPF_ET_ACTF_MSK, + HW_ATL_RPF_ET_ACTF_SHIFT, etht_flr_act); } -void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter) +void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter) { - aq_hw_write_reg_bit(aq_hw, rpf_et_valf_adr(filter), - rpf_et_valf_msk, - rpf_et_valf_shift, etht_flr); + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_VALF_ADR(filter), + HW_ATL_RPF_ET_VALF_MSK, + HW_ATL_RPF_ET_VALF_SHIFT, etht_flr); } /* RPO: rx packet offload */ -void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, - u32 ipv4header_crc_offload_en) +void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 ipv4header_crc_offload_en) { - aq_hw_write_reg_bit(aq_hw, rpo_ipv4chk_en_adr, - rpo_ipv4chk_en_msk, - rpo_ipv4chk_en_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_IPV4CHK_EN_ADR, + HW_ATL_RPO_IPV4CHK_EN_MSK, + HW_ATL_RPO_IPV4CHK_EN_SHIFT, ipv4header_crc_offload_en); } -void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw, - u32 rx_desc_vlan_stripping, u32 descriptor) +void hw_atl_rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw, + u32 rx_desc_vlan_stripping, + u32 descriptor) { - aq_hw_write_reg_bit(aq_hw, rpo_descdvl_strip_adr(descriptor), - rpo_descdvl_strip_msk, - rpo_descdvl_strip_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_DESCDVL_STRIP_ADR(descriptor), + HW_ATL_RPO_DESCDVL_STRIP_MSK, + HW_ATL_RPO_DESCDVL_STRIP_SHIFT, rx_desc_vlan_stripping); } -void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, - u32 tcp_udp_crc_offload_en) +void hw_atl_rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 tcp_udp_crc_offload_en) { - aq_hw_write_reg_bit(aq_hw, rpol4chk_en_adr, rpol4chk_en_msk, - rpol4chk_en_shift, tcp_udp_crc_offload_en); + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPOL4CHK_EN_ADR, + HW_ATL_RPOL4CHK_EN_MSK, + HW_ATL_RPOL4CHK_EN_SHIFT, tcp_udp_crc_offload_en); } -void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en) +void hw_atl_rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en) { - aq_hw_write_reg(aq_hw, rpo_lro_en_adr, lro_en); + aq_hw_write_reg(aq_hw, HW_ATL_RPO_LRO_EN_ADR, lro_en); } -void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw, - u32 lro_patch_optimization_en) +void hw_atl_rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw, + u32 lro_patch_optimization_en) { - aq_hw_write_reg_bit(aq_hw, rpo_lro_ptopt_en_adr, - rpo_lro_ptopt_en_msk, - rpo_lro_ptopt_en_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_PTOPT_EN_ADR, + HW_ATL_RPO_LRO_PTOPT_EN_MSK, + HW_ATL_RPO_LRO_PTOPT_EN_SHIFT, lro_patch_optimization_en); } -void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw, - u32 lro_qsessions_lim) +void hw_atl_rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw, + u32 lro_qsessions_lim) { - aq_hw_write_reg_bit(aq_hw, rpo_lro_qses_lmt_adr, - rpo_lro_qses_lmt_msk, - rpo_lro_qses_lmt_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_QSES_LMT_ADR, + HW_ATL_RPO_LRO_QSES_LMT_MSK, + HW_ATL_RPO_LRO_QSES_LMT_SHIFT, lro_qsessions_lim); } -void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim) +void hw_atl_rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, + u32 lro_total_desc_lim) { - aq_hw_write_reg_bit(aq_hw, rpo_lro_tot_dsc_lmt_adr, - rpo_lro_tot_dsc_lmt_msk, - rpo_lro_tot_dsc_lmt_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_TOT_DSC_LMT_ADR, + HW_ATL_RPO_LRO_TOT_DSC_LMT_MSK, + HW_ATL_RPO_LRO_TOT_DSC_LMT_SHIFT, lro_total_desc_lim); } -void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw, - u32 lro_min_pld_of_first_pkt) +void hw_atl_rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw, + u32 lro_min_pld_of_first_pkt) { - aq_hw_write_reg_bit(aq_hw, rpo_lro_pkt_min_adr, - rpo_lro_pkt_min_msk, - rpo_lro_pkt_min_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_PKT_MIN_ADR, + HW_ATL_RPO_LRO_PKT_MIN_MSK, + HW_ATL_RPO_LRO_PKT_MIN_SHIFT, lro_min_pld_of_first_pkt); } -void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_pkt_lim) +void hw_atl_rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_pkt_lim) { - aq_hw_write_reg(aq_hw, rpo_lro_rsc_max_adr, lro_pkt_lim); + aq_hw_write_reg(aq_hw, HW_ATL_RPO_LRO_RSC_MAX_ADR, lro_pkt_lim); } -void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw, - u32 lro_max_number_of_descriptors, - u32 lro) +void hw_atl_rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw, + u32 lro_max_number_of_descriptors, + u32 lro) { /* Register address for bitfield lro{L}_des_max[1:0] */ static u32 rpo_lro_ldes_max_adr[32] = { @@ -1017,378 +1073,390 @@ void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw, lro_max_number_of_descriptors); } -void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw, - u32 lro_time_base_divider) +void hw_atl_rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw, + u32 lro_time_base_divider) { - aq_hw_write_reg_bit(aq_hw, rpo_lro_tb_div_adr, - rpo_lro_tb_div_msk, - rpo_lro_tb_div_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_TB_DIV_ADR, + HW_ATL_RPO_LRO_TB_DIV_MSK, + HW_ATL_RPO_LRO_TB_DIV_SHIFT, lro_time_base_divider); } -void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw, - u32 lro_inactive_interval) +void hw_atl_rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw, + u32 lro_inactive_interval) { - aq_hw_write_reg_bit(aq_hw, rpo_lro_ina_ival_adr, - rpo_lro_ina_ival_msk, - rpo_lro_ina_ival_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_INA_IVAL_ADR, + HW_ATL_RPO_LRO_INA_IVAL_MSK, + HW_ATL_RPO_LRO_INA_IVAL_SHIFT, lro_inactive_interval); } -void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw, - u32 lro_max_coalescing_interval) +void hw_atl_rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw, + u32 lro_max_coal_interval) { - aq_hw_write_reg_bit(aq_hw, rpo_lro_max_ival_adr, - rpo_lro_max_ival_msk, - rpo_lro_max_ival_shift, - lro_max_coalescing_interval); + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_MAX_IVAL_ADR, + HW_ATL_RPO_LRO_MAX_IVAL_MSK, + HW_ATL_RPO_LRO_MAX_IVAL_SHIFT, + lro_max_coal_interval); } /* rx */ -void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis) +void hw_atl_rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis) { - aq_hw_write_reg_bit(aq_hw, rx_reg_res_dsbl_adr, - rx_reg_res_dsbl_msk, - rx_reg_res_dsbl_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_RX_REG_RES_DSBL_ADR, + HW_ATL_RX_REG_RES_DSBL_MSK, + HW_ATL_RX_REG_RES_DSBL_SHIFT, rx_reg_res_dis); } /* tdm */ -void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca) +void hw_atl_tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca) { - aq_hw_write_reg_bit(aq_hw, tdm_dcadcpuid_adr(dca), - tdm_dcadcpuid_msk, - tdm_dcadcpuid_shift, cpuid); + aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCADCPUID_ADR(dca), + HW_ATL_TDM_DCADCPUID_MSK, + HW_ATL_TDM_DCADCPUID_SHIFT, cpuid); } -void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw, - u32 large_send_offload_en) +void hw_atl_tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw, + u32 large_send_offload_en) { - aq_hw_write_reg(aq_hw, tdm_lso_en_adr, large_send_offload_en); + aq_hw_write_reg(aq_hw, HW_ATL_TDM_LSO_EN_ADR, large_send_offload_en); } -void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en) +void hw_atl_tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en) { - aq_hw_write_reg_bit(aq_hw, tdm_dca_en_adr, tdm_dca_en_msk, - tdm_dca_en_shift, tx_dca_en); + aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCA_EN_ADR, HW_ATL_TDM_DCA_EN_MSK, + HW_ATL_TDM_DCA_EN_SHIFT, tx_dca_en); } -void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode) +void hw_atl_tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode) { - aq_hw_write_reg_bit(aq_hw, tdm_dca_mode_adr, tdm_dca_mode_msk, - tdm_dca_mode_shift, tx_dca_mode); + aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCA_MODE_ADR, + HW_ATL_TDM_DCA_MODE_MSK, + HW_ATL_TDM_DCA_MODE_SHIFT, tx_dca_mode); } -void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca) +void hw_atl_tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, + u32 dca) { - aq_hw_write_reg_bit(aq_hw, tdm_dcaddesc_en_adr(dca), - tdm_dcaddesc_en_msk, tdm_dcaddesc_en_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCADDESC_EN_ADR(dca), + HW_ATL_TDM_DCADDESC_EN_MSK, + HW_ATL_TDM_DCADDESC_EN_SHIFT, tx_desc_dca_en); } -void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor) +void hw_atl_tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, + u32 descriptor) { - aq_hw_write_reg_bit(aq_hw, tdm_descden_adr(descriptor), - tdm_descden_msk, - tdm_descden_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDEN_ADR(descriptor), + HW_ATL_TDM_DESCDEN_MSK, + HW_ATL_TDM_DESCDEN_SHIFT, tx_desc_en); } -u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor) +u32 hw_atl_tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor) { - return aq_hw_read_reg_bit(aq_hw, tdm_descdhd_adr(descriptor), - tdm_descdhd_msk, tdm_descdhd_shift); + return aq_hw_read_reg_bit(aq_hw, HW_ATL_TDM_DESCDHD_ADR(descriptor), + HW_ATL_TDM_DESCDHD_MSK, + HW_ATL_TDM_DESCDHD_SHIFT); } -void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len, - u32 descriptor) +void hw_atl_tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len, + u32 descriptor) { - aq_hw_write_reg_bit(aq_hw, tdm_descdlen_adr(descriptor), - tdm_descdlen_msk, - tdm_descdlen_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDLEN_ADR(descriptor), + HW_ATL_TDM_DESCDLEN_MSK, + HW_ATL_TDM_DESCDLEN_SHIFT, tx_desc_len); } -void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, - u32 tx_desc_wr_wb_irq_en) +void hw_atl_tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, + u32 tx_desc_wr_wb_irq_en) { - aq_hw_write_reg_bit(aq_hw, tdm_int_desc_wrb_en_adr, - tdm_int_desc_wrb_en_msk, - tdm_int_desc_wrb_en_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_INT_DESC_WRB_EN_ADR, + HW_ATL_TDM_INT_DESC_WRB_EN_MSK, + HW_ATL_TDM_INT_DESC_WRB_EN_SHIFT, tx_desc_wr_wb_irq_en); } -void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw, - u32 tx_desc_wr_wb_threshold, - u32 descriptor) +void hw_atl_tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw, + u32 tx_desc_wr_wb_threshold, + u32 descriptor) { - aq_hw_write_reg_bit(aq_hw, tdm_descdwrb_thresh_adr(descriptor), - tdm_descdwrb_thresh_msk, - tdm_descdwrb_thresh_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDWRB_THRESH_ADR(descriptor), + HW_ATL_TDM_DESCDWRB_THRESH_MSK, + HW_ATL_TDM_DESCDWRB_THRESH_SHIFT, tx_desc_wr_wb_threshold); } -void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw, - u32 tdm_irq_moderation_en) +void hw_atl_tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw, + u32 tdm_irq_moderation_en) { - aq_hw_write_reg_bit(aq_hw, tdm_int_mod_en_adr, - tdm_int_mod_en_msk, - tdm_int_mod_en_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_INT_MOD_EN_ADR, + HW_ATL_TDM_INT_MOD_EN_MSK, + HW_ATL_TDM_INT_MOD_EN_SHIFT, tdm_irq_moderation_en); } /* thm */ -void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw, - u32 lso_tcp_flag_of_first_pkt) +void hw_atl_thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_first_pkt) { - aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_first_adr, - thm_lso_tcp_flag_first_msk, - thm_lso_tcp_flag_first_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_FIRST_ADR, + HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSK, + HW_ATL_THM_LSO_TCP_FLAG_FIRST_SHIFT, lso_tcp_flag_of_first_pkt); } -void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw, - u32 lso_tcp_flag_of_last_pkt) +void hw_atl_thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_last_pkt) { - aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_last_adr, - thm_lso_tcp_flag_last_msk, - thm_lso_tcp_flag_last_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_LAST_ADR, + HW_ATL_THM_LSO_TCP_FLAG_LAST_MSK, + HW_ATL_THM_LSO_TCP_FLAG_LAST_SHIFT, lso_tcp_flag_of_last_pkt); } -void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw, - u32 lso_tcp_flag_of_middle_pkt) +void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_middle_pkt) { - aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_mid_adr, - thm_lso_tcp_flag_mid_msk, - thm_lso_tcp_flag_mid_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_MID_ADR, + HW_ATL_THM_LSO_TCP_FLAG_MID_MSK, + HW_ATL_THM_LSO_TCP_FLAG_MID_SHIFT, lso_tcp_flag_of_middle_pkt); } /* TPB: tx packet buffer */ -void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en) +void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en) { - aq_hw_write_reg_bit(aq_hw, tpb_tx_buf_en_adr, tpb_tx_buf_en_msk, - tpb_tx_buf_en_shift, tx_buff_en); + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_BUF_EN_ADR, + HW_ATL_TPB_TX_BUF_EN_MSK, + HW_ATL_TPB_TX_BUF_EN_SHIFT, tx_buff_en); } -void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, - u32 tx_buff_hi_threshold_per_tc, +void hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_buff_hi_threshold_per_tc, u32 buffer) { - aq_hw_write_reg_bit(aq_hw, tpb_txbhi_thresh_adr(buffer), - tpb_txbhi_thresh_msk, tpb_txbhi_thresh_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBHI_THRESH_ADR(buffer), + HW_ATL_TPB_TXBHI_THRESH_MSK, + HW_ATL_TPB_TXBHI_THRESH_SHIFT, tx_buff_hi_threshold_per_tc); } -void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, - u32 tx_buff_lo_threshold_per_tc, +void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_buff_lo_threshold_per_tc, u32 buffer) { - aq_hw_write_reg_bit(aq_hw, tpb_txblo_thresh_adr(buffer), - tpb_txblo_thresh_msk, tpb_txblo_thresh_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBLO_THRESH_ADR(buffer), + HW_ATL_TPB_TXBLO_THRESH_MSK, + HW_ATL_TPB_TXBLO_THRESH_SHIFT, tx_buff_lo_threshold_per_tc); } -void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en) +void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en) { - aq_hw_write_reg_bit(aq_hw, tpb_dma_sys_lbk_adr, - tpb_dma_sys_lbk_msk, - tpb_dma_sys_lbk_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_DMA_SYS_LBK_ADR, + HW_ATL_TPB_DMA_SYS_LBK_MSK, + HW_ATL_TPB_DMA_SYS_LBK_SHIFT, tx_dma_sys_lbk_en); } -void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, - u32 tx_pkt_buff_size_per_tc, u32 buffer) +void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_buff_size_per_tc, u32 buffer) { - aq_hw_write_reg_bit(aq_hw, tpb_txbbuf_size_adr(buffer), - tpb_txbbuf_size_msk, - tpb_txbbuf_size_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBBUF_SIZE_ADR(buffer), + HW_ATL_TPB_TXBBUF_SIZE_MSK, + HW_ATL_TPB_TXBBUF_SIZE_SHIFT, tx_pkt_buff_size_per_tc); } -void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en) +void hw_atl_tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en) { - aq_hw_write_reg_bit(aq_hw, tpb_tx_scp_ins_en_adr, - tpb_tx_scp_ins_en_msk, - tpb_tx_scp_ins_en_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_SCP_INS_EN_ADR, + HW_ATL_TPB_TX_SCP_INS_EN_MSK, + HW_ATL_TPB_TX_SCP_INS_EN_SHIFT, tx_path_scp_ins_en); } /* TPO: tx packet offload */ -void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, - u32 ipv4header_crc_offload_en) +void hw_atl_tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 ipv4header_crc_offload_en) { - aq_hw_write_reg_bit(aq_hw, tpo_ipv4chk_en_adr, - tpo_ipv4chk_en_msk, - tpo_ipv4chk_en_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPO_IPV4CHK_EN_ADR, + HW_ATL_TPO_IPV4CHK_EN_MSK, + HW_ATL_TPO_IPV4CHK_EN_SHIFT, ipv4header_crc_offload_en); } -void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, - u32 tcp_udp_crc_offload_en) +void hw_atl_tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 tcp_udp_crc_offload_en) { - aq_hw_write_reg_bit(aq_hw, tpol4chk_en_adr, - tpol4chk_en_msk, - tpol4chk_en_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPOL4CHK_EN_ADR, + HW_ATL_TPOL4CHK_EN_MSK, + HW_ATL_TPOL4CHK_EN_SHIFT, tcp_udp_crc_offload_en); } -void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en) +void hw_atl_tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_sys_lbk_en) { - aq_hw_write_reg_bit(aq_hw, tpo_pkt_sys_lbk_adr, - tpo_pkt_sys_lbk_msk, - tpo_pkt_sys_lbk_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPO_PKT_SYS_LBK_ADR, + HW_ATL_TPO_PKT_SYS_LBK_MSK, + HW_ATL_TPO_PKT_SYS_LBK_SHIFT, tx_pkt_sys_lbk_en); } /* TPS: tx packet scheduler */ -void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw, - u32 tx_pkt_shed_data_arb_mode) +void hw_atl_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_data_arb_mode) { - aq_hw_write_reg_bit(aq_hw, tps_data_tc_arb_mode_adr, - tps_data_tc_arb_mode_msk, - tps_data_tc_arb_mode_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TC_ARB_MODE_ADR, + HW_ATL_TPS_DATA_TC_ARB_MODE_MSK, + HW_ATL_TPS_DATA_TC_ARB_MODE_SHIFT, tx_pkt_shed_data_arb_mode); } -void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw, - u32 curr_time_res) +void hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw, + u32 curr_time_res) { - aq_hw_write_reg_bit(aq_hw, tps_desc_rate_ta_rst_adr, - tps_desc_rate_ta_rst_msk, - tps_desc_rate_ta_rst_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_TA_RST_ADR, + HW_ATL_TPS_DESC_RATE_TA_RST_MSK, + HW_ATL_TPS_DESC_RATE_TA_RST_SHIFT, curr_time_res); } -void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw, - u32 tx_pkt_shed_desc_rate_lim) +void hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_rate_lim) { - aq_hw_write_reg_bit(aq_hw, tps_desc_rate_lim_adr, - tps_desc_rate_lim_msk, - tps_desc_rate_lim_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_LIM_ADR, + HW_ATL_TPS_DESC_RATE_LIM_MSK, + HW_ATL_TPS_DESC_RATE_LIM_SHIFT, tx_pkt_shed_desc_rate_lim); } -void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw, - u32 tx_pkt_shed_desc_tc_arb_mode) +void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw, + u32 arb_mode) { - aq_hw_write_reg_bit(aq_hw, tps_desc_tc_arb_mode_adr, - tps_desc_tc_arb_mode_msk, - tps_desc_tc_arb_mode_shift, - tx_pkt_shed_desc_tc_arb_mode); + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TC_ARB_MODE_ADR, + HW_ATL_TPS_DESC_TC_ARB_MODE_MSK, + HW_ATL_TPS_DESC_TC_ARB_MODE_SHIFT, + arb_mode); } -void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw, - u32 tx_pkt_shed_desc_tc_max_credit, - u32 tc) +void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw, + u32 max_credit, + u32 tc) { - aq_hw_write_reg_bit(aq_hw, tps_desc_tctcredit_max_adr(tc), - tps_desc_tctcredit_max_msk, - tps_desc_tctcredit_max_shift, - tx_pkt_shed_desc_tc_max_credit); + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTCREDIT_MAX_ADR(tc), + HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSK, + HW_ATL_TPS_DESC_TCTCREDIT_MAX_SHIFT, + max_credit); } -void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw, - u32 tx_pkt_shed_desc_tc_weight, u32 tc) +void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_tc_weight, + u32 tc) { - aq_hw_write_reg_bit(aq_hw, tps_desc_tctweight_adr(tc), - tps_desc_tctweight_msk, - tps_desc_tctweight_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTWEIGHT_ADR(tc), + HW_ATL_TPS_DESC_TCTWEIGHT_MSK, + HW_ATL_TPS_DESC_TCTWEIGHT_SHIFT, tx_pkt_shed_desc_tc_weight); } -void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw, - u32 tx_pkt_shed_desc_vm_arb_mode) +void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw, + u32 arb_mode) { - aq_hw_write_reg_bit(aq_hw, tps_desc_vm_arb_mode_adr, - tps_desc_vm_arb_mode_msk, - tps_desc_vm_arb_mode_shift, - tx_pkt_shed_desc_vm_arb_mode); + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_VM_ARB_MODE_ADR, + HW_ATL_TPS_DESC_VM_ARB_MODE_MSK, + HW_ATL_TPS_DESC_VM_ARB_MODE_SHIFT, + arb_mode); } -void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw, - u32 tx_pkt_shed_tc_data_max_credit, - u32 tc) +void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw, + u32 max_credit, + u32 tc) { - aq_hw_write_reg_bit(aq_hw, tps_data_tctcredit_max_adr(tc), - tps_data_tctcredit_max_msk, - tps_data_tctcredit_max_shift, - tx_pkt_shed_tc_data_max_credit); + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTCREDIT_MAX_ADR(tc), + HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSK, + HW_ATL_TPS_DATA_TCTCREDIT_MAX_SHIFT, + max_credit); } -void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw, - u32 tx_pkt_shed_tc_data_weight, u32 tc) +void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_tc_data_weight, + u32 tc) { - aq_hw_write_reg_bit(aq_hw, tps_data_tctweight_adr(tc), - tps_data_tctweight_msk, - tps_data_tctweight_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTWEIGHT_ADR(tc), + HW_ATL_TPS_DATA_TCTWEIGHT_MSK, + HW_ATL_TPS_DATA_TCTWEIGHT_SHIFT, tx_pkt_shed_tc_data_weight); } /* tx */ -void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis) +void hw_atl_tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis) { - aq_hw_write_reg_bit(aq_hw, tx_reg_res_dsbl_adr, - tx_reg_res_dsbl_msk, - tx_reg_res_dsbl_shift, tx_reg_res_dis); + aq_hw_write_reg_bit(aq_hw, HW_ATL_TX_REG_RES_DSBL_ADR, + HW_ATL_TX_REG_RES_DSBL_MSK, + HW_ATL_TX_REG_RES_DSBL_SHIFT, tx_reg_res_dis); } /* msm */ -u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw) +u32 hw_atl_msm_reg_access_status_get(struct aq_hw_s *aq_hw) { - return aq_hw_read_reg_bit(aq_hw, msm_reg_access_busy_adr, - msm_reg_access_busy_msk, - msm_reg_access_busy_shift); + return aq_hw_read_reg_bit(aq_hw, HW_ATL_MSM_REG_ACCESS_BUSY_ADR, + HW_ATL_MSM_REG_ACCESS_BUSY_MSK, + HW_ATL_MSM_REG_ACCESS_BUSY_SHIFT); } -void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw, - u32 reg_addr_for_indirect_addr) +void hw_atl_msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw, + u32 reg_addr_for_indirect_addr) { - aq_hw_write_reg_bit(aq_hw, msm_reg_addr_adr, - msm_reg_addr_msk, - msm_reg_addr_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_ADDR_ADR, + HW_ATL_MSM_REG_ADDR_MSK, + HW_ATL_MSM_REG_ADDR_SHIFT, reg_addr_for_indirect_addr); } -void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe) +void hw_atl_msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe) { - aq_hw_write_reg_bit(aq_hw, msm_reg_rd_strobe_adr, - msm_reg_rd_strobe_msk, - msm_reg_rd_strobe_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_RD_STROBE_ADR, + HW_ATL_MSM_REG_RD_STROBE_MSK, + HW_ATL_MSM_REG_RD_STROBE_SHIFT, reg_rd_strobe); } -u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw) +u32 hw_atl_msm_reg_rd_data_get(struct aq_hw_s *aq_hw) { - return aq_hw_read_reg(aq_hw, msm_reg_rd_data_adr); + return aq_hw_read_reg(aq_hw, HW_ATL_MSM_REG_RD_DATA_ADR); } -void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data) +void hw_atl_msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data) { - aq_hw_write_reg(aq_hw, msm_reg_wr_data_adr, reg_wr_data); + aq_hw_write_reg(aq_hw, HW_ATL_MSM_REG_WR_DATA_ADR, reg_wr_data); } -void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe) +void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe) { - aq_hw_write_reg_bit(aq_hw, msm_reg_wr_strobe_adr, - msm_reg_wr_strobe_msk, - msm_reg_wr_strobe_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_WR_STROBE_ADR, + HW_ATL_MSM_REG_WR_STROBE_MSK, + HW_ATL_MSM_REG_WR_STROBE_SHIFT, reg_wr_strobe); } /* pci */ -void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis) +void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis) { - aq_hw_write_reg_bit(aq_hw, pci_reg_res_dsbl_adr, - pci_reg_res_dsbl_msk, - pci_reg_res_dsbl_shift, + aq_hw_write_reg_bit(aq_hw, HW_ATL_PCI_REG_RES_DSBL_ADR, + HW_ATL_PCI_REG_RES_DSBL_MSK, + HW_ATL_PCI_REG_RES_DSBL_SHIFT, pci_reg_res_dis); } -void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, u32 glb_cpu_scratch_scp, - u32 scratch_scp) +void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, + u32 glb_cpu_scratch_scp, + u32 scratch_scp) { - aq_hw_write_reg(aq_hw, glb_cpu_scratch_scp_adr(scratch_scp), + aq_hw_write_reg(aq_hw, HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp), glb_cpu_scratch_scp); } diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h index ed1085b95adb..dfb426f2dc2c 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h @@ -21,657 +21,681 @@ struct aq_hw_s; /* global */ /* set global microprocessor semaphore */ -void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem, - u32 semaphore); +void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem, + u32 semaphore); /* get global microprocessor semaphore */ -u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore); +u32 hw_atl_reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore); /* set global register reset disable */ -void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis); +void hw_atl_glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis); /* set soft reset */ -void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res); +void hw_atl_glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res); /* get soft reset */ -u32 glb_soft_res_get(struct aq_hw_s *aq_hw); +u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw); /* stats */ -u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw); +u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw); /* get rx dma good octet counter lsw */ -u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw); +u32 hw_atl_stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw); /* get rx dma good packet counter lsw */ -u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw); +u32 hw_atl_stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw); /* get tx dma good octet counter lsw */ -u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw); +u32 hw_atl_stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw); /* get tx dma good packet counter lsw */ -u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw); +u32 hw_atl_stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw); /* get rx dma good octet counter msw */ -u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw); +u32 hw_atl_stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw); /* get rx dma good packet counter msw */ -u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw); +u32 hw_atl_stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw); /* get tx dma good octet counter msw */ -u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw); +u32 hw_atl_stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw); /* get tx dma good packet counter msw */ -u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw); +u32 hw_atl_stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw); /* get msm rx errors counter register */ -u32 reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw); +u32 hw_atl_reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw); /* get msm rx unicast frames counter register */ -u32 reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw); +u32 hw_atl_reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw); /* get msm rx multicast frames counter register */ -u32 reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw); +u32 hw_atl_reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw); /* get msm rx broadcast frames counter register */ -u32 reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw); +u32 hw_atl_reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw); /* get msm rx broadcast octets counter register 1 */ -u32 reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw); +u32 hw_atl_reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw); /* get msm rx unicast octets counter register 0 */ -u32 reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw); +u32 hw_atl_reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw); /* get rx dma statistics counter 7 */ -u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw); +u32 hw_atl_reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw); /* get msm tx errors counter register */ -u32 reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw); +u32 hw_atl_reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw); /* get msm tx unicast frames counter register */ -u32 reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw); +u32 hw_atl_reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw); /* get msm tx multicast frames counter register */ -u32 reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw); +u32 hw_atl_reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw); /* get msm tx broadcast frames counter register */ -u32 reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw); +u32 hw_atl_reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw); /* get msm tx multicast octets counter register 1 */ -u32 reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw_s *aq_hw); +u32 hw_atl_reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw_s *aq_hw); /* get msm tx broadcast octets counter register 1 */ -u32 reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw_s *aq_hw); +u32 hw_atl_reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw_s *aq_hw); /* get msm tx unicast octets counter register 0 */ -u32 reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw_s *aq_hw); +u32 hw_atl_reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw_s *aq_hw); /* get global mif identification */ -u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw); +u32 hw_atl_reg_glb_mif_id_get(struct aq_hw_s *aq_hw); /* interrupt */ /* set interrupt auto mask lsw */ -void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw); +void hw_atl_itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, + u32 irq_auto_masklsw); /* set interrupt mapping enable rx */ -void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx); +void hw_atl_itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, + u32 rx); /* set interrupt mapping enable tx */ -void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx); +void hw_atl_itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, + u32 tx); /* set interrupt mapping rx */ -void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx); +void hw_atl_itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx); /* set interrupt mapping tx */ -void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx); +void hw_atl_itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx); /* set interrupt mask clear lsw */ -void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw); +void hw_atl_itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, + u32 irq_msk_clearlsw); /* set interrupt mask set lsw */ -void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw); +void hw_atl_itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw); /* set interrupt register reset disable */ -void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis); +void hw_atl_itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis); /* set interrupt status clear lsw */ -void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw, - u32 irq_status_clearlsw); +void hw_atl_itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw, + u32 irq_status_clearlsw); /* get interrupt status lsw */ -u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw); +u32 hw_atl_itr_irq_statuslsw_get(struct aq_hw_s *aq_hw); /* get reset interrupt */ -u32 itr_res_irq_get(struct aq_hw_s *aq_hw); +u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw); /* set reset interrupt */ -void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq); +void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq); /* rdm */ /* set cpu id */ -void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca); +void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca); /* set rx dca enable */ -void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en); +void hw_atl_rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en); /* set rx dca mode */ -void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode); +void hw_atl_rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode); /* set rx descriptor data buffer size */ -void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw, - u32 rx_desc_data_buff_size, +void hw_atl_rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw, + u32 rx_desc_data_buff_size, u32 descriptor); /* set rx descriptor dca enable */ -void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en, - u32 dca); +void hw_atl_rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en, + u32 dca); /* set rx descriptor enable */ -void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en, - u32 descriptor); +void hw_atl_rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en, + u32 descriptor); /* set rx descriptor header splitting */ -void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw, - u32 rx_desc_head_splitting, +void hw_atl_rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw, + u32 rx_desc_head_splitting, u32 descriptor); /* get rx descriptor head pointer */ -u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor); +u32 hw_atl_rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor); /* set rx descriptor length */ -void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len, - u32 descriptor); +void hw_atl_rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len, + u32 descriptor); /* set rx descriptor write-back interrupt enable */ -void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, - u32 rx_desc_wr_wb_irq_en); +void hw_atl_rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, + u32 rx_desc_wr_wb_irq_en); /* set rx header dca enable */ -void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en, - u32 dca); +void hw_atl_rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en, + u32 dca); /* set rx payload dca enable */ -void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca); +void hw_atl_rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, + u32 dca); /* set rx descriptor header buffer size */ -void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw, - u32 rx_desc_head_buff_size, - u32 descriptor); +void hw_atl_rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw, + u32 rx_desc_head_buff_size, + u32 descriptor); /* set rx descriptor reset */ -void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res, - u32 descriptor); +void hw_atl_rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res, + u32 descriptor); /* Set RDM Interrupt Moderation Enable */ -void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en); +void hw_atl_rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, + u32 rdm_intr_moder_en); /* reg */ /* set general interrupt mapping register */ -void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx); +void hw_atl_reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, + u32 regidx); /* get general interrupt status register */ -u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw); +u32 hw_atl_reg_gen_irq_status_get(struct aq_hw_s *aq_hw); /* set interrupt global control register */ -void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl); +void hw_atl_reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl); /* set interrupt throttle register */ -void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle); +void hw_atl_reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle); /* set rx dma descriptor base address lsw */ -void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, - u32 rx_dma_desc_base_addrlsw, +void hw_atl_reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_base_addrlsw, u32 descriptor); /* set rx dma descriptor base address msw */ -void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, - u32 rx_dma_desc_base_addrmsw, +void hw_atl_reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_base_addrmsw, u32 descriptor); /* get rx dma descriptor status register */ -u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor); +u32 hw_atl_reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor); /* set rx dma descriptor tail pointer register */ -void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, - u32 rx_dma_desc_tail_ptr, +void hw_atl_reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_tail_ptr, u32 descriptor); /* set rx filter multicast filter mask register */ -void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw, - u32 rx_flr_mcst_flr_msk); +void hw_atl_reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw, + u32 rx_flr_mcst_flr_msk); /* set rx filter multicast filter register */ -void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr, - u32 filter); +void hw_atl_reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr, + u32 filter); /* set rx filter rss control register 1 */ -void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw, - u32 rx_flr_rss_control1); +void hw_atl_reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw, + u32 rx_flr_rss_control1); /* Set RX Filter Control Register 2 */ -void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2); +void hw_atl_reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2); /* Set RX Interrupt Moderation Control Register */ -void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, - u32 rx_intr_moderation_ctl, +void hw_atl_reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, + u32 rx_intr_moderation_ctl, u32 queue); /* set tx dma debug control */ -void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl); +void hw_atl_reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, + u32 tx_dma_debug_ctl); /* set tx dma descriptor base address lsw */ -void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, - u32 tx_dma_desc_base_addrlsw, +void hw_atl_reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_base_addrlsw, u32 descriptor); /* set tx dma descriptor base address msw */ -void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, - u32 tx_dma_desc_base_addrmsw, +void hw_atl_reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_base_addrmsw, u32 descriptor); /* set tx dma descriptor tail pointer register */ -void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, - u32 tx_dma_desc_tail_ptr, - u32 descriptor); +void hw_atl_reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_tail_ptr, + u32 descriptor); /* Set TX Interrupt Moderation Control Register */ -void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, - u32 tx_intr_moderation_ctl, - u32 queue); +void hw_atl_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, + u32 tx_intr_moderation_ctl, + u32 queue); /* set global microprocessor scratch pad */ -void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, - u32 glb_cpu_scratch_scp, u32 scratch_scp); +void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, + u32 glb_cpu_scratch_scp, + u32 scratch_scp); /* rpb */ /* set dma system loopback */ -void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk); +void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk); /* set rx traffic class mode */ -void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw, - u32 rx_traf_class_mode); +void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw, + u32 rx_traf_class_mode); /* set rx buffer enable */ -void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en); +void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en); /* set rx buffer high threshold (per tc) */ -void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, - u32 rx_buff_hi_threshold_per_tc, - u32 buffer); +void hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_buff_hi_threshold_per_tc, + u32 buffer); /* set rx buffer low threshold (per tc) */ -void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, - u32 rx_buff_lo_threshold_per_tc, +void hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_buff_lo_threshold_per_tc, u32 buffer); /* set rx flow control mode */ -void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode); +void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode); /* set rx packet buffer size (per tc) */ -void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, - u32 rx_pkt_buff_size_per_tc, - u32 buffer); +void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_pkt_buff_size_per_tc, + u32 buffer); /* set rx xoff enable (per tc) */ -void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc, - u32 buffer); +void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc, + u32 buffer); /* rpf */ /* set l2 broadcast count threshold */ -void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw, - u32 l2broadcast_count_threshold); +void hw_atl_rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw, + u32 l2broadcast_count_threshold); /* set l2 broadcast enable */ -void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en); +void hw_atl_rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en); /* set l2 broadcast filter action */ -void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw, - u32 l2broadcast_flr_act); +void hw_atl_rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw, + u32 l2broadcast_flr_act); /* set l2 multicast filter enable */ -void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en, - u32 filter); +void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, + u32 l2multicast_flr_en, + u32 filter); /* set l2 promiscuous mode enable */ -void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw, - u32 l2promiscuous_mode_en); +void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw, + u32 l2promiscuous_mode_en); /* set l2 unicast filter action */ -void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act, - u32 filter); +void hw_atl_rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, + u32 l2unicast_flr_act, + u32 filter); /* set l2 unicast filter enable */ -void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en, - u32 filter); +void hw_atl_rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en, + u32 filter); /* set l2 unicast destination address lsw */ -void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw, - u32 l2unicast_dest_addresslsw, +void hw_atl_rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw, + u32 l2unicast_dest_addresslsw, u32 filter); /* set l2 unicast destination address msw */ -void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw, - u32 l2unicast_dest_addressmsw, +void hw_atl_rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw, + u32 l2unicast_dest_addressmsw, u32 filter); /* Set L2 Accept all Multicast packets */ -void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw, - u32 l2_accept_all_mc_packets); +void hw_atl_rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw, + u32 l2_accept_all_mc_packets); /* set user-priority tc mapping */ -void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw, - u32 user_priority_tc_map, u32 tc); +void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw, + u32 user_priority_tc_map, u32 tc); /* set rss key address */ -void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr); +void hw_atl_rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr); /* set rss key write data */ -void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data); +void hw_atl_rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data); /* get rss key write enable */ -u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw); +u32 hw_atl_rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw); /* set rss key write enable */ -void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en); +void hw_atl_rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en); /* set rss redirection table address */ -void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw, - u32 rss_redir_tbl_addr); +void hw_atl_rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw, + u32 rss_redir_tbl_addr); /* set rss redirection table write data */ -void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw, - u32 rss_redir_tbl_wr_data); +void hw_atl_rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw, + u32 rss_redir_tbl_wr_data); /* get rss redirection write enable */ -u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw); +u32 hw_atl_rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw); /* set rss redirection write enable */ -void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en); +void hw_atl_rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en); /* set tpo to rpf system loopback */ -void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw, - u32 tpo_to_rpf_sys_lbk); +void hw_atl_rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw, + u32 tpo_to_rpf_sys_lbk); /* set vlan inner ethertype */ -void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht); +void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht); /* set vlan outer ethertype */ -void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht); +void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht); /* set vlan promiscuous mode enable */ -void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en); +void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, + u32 vlan_prom_mode_en); /* Set VLAN untagged action */ -void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act); +void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, + u32 vlan_untagged_act); /* Set VLAN accept untagged packets */ -void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw, - u32 vlan_accept_untagged_packets); +void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw, + u32 vlan_acc_untagged_packets); /* Set VLAN filter enable */ -void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter); +void hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, + u32 filter); /* Set VLAN Filter Action */ -void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act, - u32 filter); +void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act, + u32 filter); /* Set VLAN ID Filter */ -void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter); +void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, + u32 filter); /* set ethertype filter enable */ -void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter); +void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, + u32 filter); /* set ethertype user-priority enable */ -void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw, - u32 etht_user_priority_en, u32 filter); +void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw, + u32 etht_user_priority_en, + u32 filter); /* set ethertype rx queue enable */ -void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en, - u32 filter); +void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, + u32 etht_rx_queue_en, + u32 filter); /* set ethertype rx queue */ -void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue, - u32 filter); +void hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue, + u32 filter); /* set ethertype user-priority */ -void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority, - u32 filter); +void hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, + u32 etht_user_priority, + u32 filter); /* set ethertype management queue */ -void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue, - u32 filter); +void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue, + u32 filter); /* set ethertype filter action */ -void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act, - u32 filter); +void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act, + u32 filter); /* set ethertype filter */ -void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter); +void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter); /* rpo */ /* set ipv4 header checksum offload enable */ -void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, - u32 ipv4header_crc_offload_en); +void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 ipv4header_crc_offload_en); /* set rx descriptor vlan stripping */ -void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw, - u32 rx_desc_vlan_stripping, - u32 descriptor); +void hw_atl_rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw, + u32 rx_desc_vlan_stripping, + u32 descriptor); /* set tcp/udp checksum offload enable */ -void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, - u32 tcp_udp_crc_offload_en); +void hw_atl_rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 tcp_udp_crc_offload_en); /* Set LRO Patch Optimization Enable. */ -void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw, - u32 lro_patch_optimization_en); +void hw_atl_rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw, + u32 lro_patch_optimization_en); /* Set Large Receive Offload Enable */ -void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en); +void hw_atl_rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en); /* Set LRO Q Sessions Limit */ -void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw, u32 lro_qsessions_lim); +void hw_atl_rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw, + u32 lro_qsessions_lim); /* Set LRO Total Descriptor Limit */ -void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim); +void hw_atl_rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, + u32 lro_total_desc_lim); /* Set LRO Min Payload of First Packet */ -void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw, - u32 lro_min_pld_of_first_pkt); +void hw_atl_rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw, + u32 lro_min_pld_of_first_pkt); /* Set LRO Packet Limit */ -void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_packet_lim); +void hw_atl_rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_packet_lim); /* Set LRO Max Number of Descriptors */ -void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw, - u32 lro_max_desc_num, u32 lro); +void hw_atl_rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw, + u32 lro_max_desc_num, u32 lro); /* Set LRO Time Base Divider */ -void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw, - u32 lro_time_base_divider); +void hw_atl_rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw, + u32 lro_time_base_divider); /*Set LRO Inactive Interval */ -void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw, - u32 lro_inactive_interval); +void hw_atl_rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw, + u32 lro_inactive_interval); /*Set LRO Max Coalescing Interval */ -void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw, - u32 lro_max_coalescing_interval); +void hw_atl_rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw, + u32 lro_max_coal_interval); /* rx */ /* set rx register reset disable */ -void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis); +void hw_atl_rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis); /* tdm */ /* set cpu id */ -void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca); +void hw_atl_tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca); /* set large send offload enable */ -void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw, - u32 large_send_offload_en); +void hw_atl_tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw, + u32 large_send_offload_en); /* set tx descriptor enable */ -void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor); +void hw_atl_tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, + u32 descriptor); /* set tx dca enable */ -void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en); +void hw_atl_tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en); /* set tx dca mode */ -void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode); +void hw_atl_tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode); /* set tx descriptor dca enable */ -void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca); +void hw_atl_tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, + u32 dca); /* get tx descriptor head pointer */ -u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor); +u32 hw_atl_tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor); /* set tx descriptor length */ -void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len, - u32 descriptor); +void hw_atl_tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len, + u32 descriptor); /* set tx descriptor write-back interrupt enable */ -void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, - u32 tx_desc_wr_wb_irq_en); +void hw_atl_tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, + u32 tx_desc_wr_wb_irq_en); /* set tx descriptor write-back threshold */ -void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw, - u32 tx_desc_wr_wb_threshold, +void hw_atl_tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw, + u32 tx_desc_wr_wb_threshold, u32 descriptor); /* Set TDM Interrupt Moderation Enable */ -void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw, - u32 tdm_irq_moderation_en); +void hw_atl_tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw, + u32 tdm_irq_moderation_en); /* thm */ /* set lso tcp flag of first packet */ -void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw, - u32 lso_tcp_flag_of_first_pkt); +void hw_atl_thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_first_pkt); /* set lso tcp flag of last packet */ -void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw, - u32 lso_tcp_flag_of_last_pkt); +void hw_atl_thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_last_pkt); /* set lso tcp flag of middle packet */ -void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw, - u32 lso_tcp_flag_of_middle_pkt); +void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_middle_pkt); /* tpb */ /* set tx buffer enable */ -void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en); +void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en); /* set tx buffer high threshold (per tc) */ -void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, - u32 tx_buff_hi_threshold_per_tc, +void hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_buff_hi_threshold_per_tc, u32 buffer); /* set tx buffer low threshold (per tc) */ -void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, - u32 tx_buff_lo_threshold_per_tc, +void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_buff_lo_threshold_per_tc, u32 buffer); /* set tx dma system loopback enable */ -void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en); +void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en); /* set tx packet buffer size (per tc) */ -void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, - u32 tx_pkt_buff_size_per_tc, u32 buffer); +void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_buff_size_per_tc, u32 buffer); /* set tx path pad insert enable */ -void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en); +void hw_atl_tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en); /* tpo */ /* set ipv4 header checksum offload enable */ -void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, - u32 ipv4header_crc_offload_en); +void hw_atl_tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 ipv4header_crc_offload_en); /* set tcp/udp checksum offload enable */ -void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, - u32 tcp_udp_crc_offload_en); +void hw_atl_tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 tcp_udp_crc_offload_en); /* set tx pkt system loopback enable */ -void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en); +void hw_atl_tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_sys_lbk_en); /* tps */ /* set tx packet scheduler data arbitration mode */ -void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw, - u32 tx_pkt_shed_data_arb_mode); +void hw_atl_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_data_arb_mode); /* set tx packet scheduler descriptor rate current time reset */ -void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw, - u32 curr_time_res); +void hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw, + u32 curr_time_res); /* set tx packet scheduler descriptor rate limit */ -void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw, - u32 tx_pkt_shed_desc_rate_lim); +void hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_rate_lim); /* set tx packet scheduler descriptor tc arbitration mode */ -void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw, - u32 tx_pkt_shed_desc_tc_arb_mode); +void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw, + u32 arb_mode); /* set tx packet scheduler descriptor tc max credit */ -void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw, - u32 tx_pkt_shed_desc_tc_max_credit, +void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw, + u32 max_credit, u32 tc); /* set tx packet scheduler descriptor tc weight */ -void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw, - u32 tx_pkt_shed_desc_tc_weight, +void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_tc_weight, u32 tc); /* set tx packet scheduler descriptor vm arbitration mode */ -void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw, - u32 tx_pkt_shed_desc_vm_arb_mode); +void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw, + u32 arb_mode); /* set tx packet scheduler tc data max credit */ -void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw, - u32 tx_pkt_shed_tc_data_max_credit, +void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw, + u32 max_credit, u32 tc); /* set tx packet scheduler tc data weight */ -void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw, - u32 tx_pkt_shed_tc_data_weight, +void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_tc_data_weight, u32 tc); /* tx */ /* set tx register reset disable */ -void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis); +void hw_atl_tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis); /* msm */ /* get register access status */ -u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw); +u32 hw_atl_msm_reg_access_status_get(struct aq_hw_s *aq_hw); /* set register address for indirect address */ -void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw, - u32 reg_addr_for_indirect_addr); +void hw_atl_msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw, + u32 reg_addr_for_indirect_addr); /* set register read strobe */ -void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe); +void hw_atl_msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe); /* get register read data */ -u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw); +u32 hw_atl_msm_reg_rd_data_get(struct aq_hw_s *aq_hw); /* set register write data */ -void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data); +void hw_atl_msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data); /* set register write strobe */ -void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe); +void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe); /* pci */ /* set pci register reset disable */ -void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis); +void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis); #endif /* HW_ATL_LLH_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h index 93450ec930e8..e0cf70120f1d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h @@ -18,91 +18,91 @@ * base address: 0x000003a0 * parameter: semaphore {s} | stride size 0x4 | range [0, 15] */ -#define glb_cpu_sem_adr(semaphore) (0x000003a0u + (semaphore) * 0x4) +#define HW_ATL_GLB_CPU_SEM_ADR(semaphore) (0x000003a0u + (semaphore) * 0x4) /* register address for bitfield rx dma good octet counter lsw [1f:0] */ -#define stats_rx_dma_good_octet_counterlsw__adr 0x00006808 +#define HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERLSW 0x00006808 /* register address for bitfield rx dma good packet counter lsw [1f:0] */ -#define stats_rx_dma_good_pkt_counterlsw__adr 0x00006800 +#define HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERLSW 0x00006800 /* register address for bitfield tx dma good octet counter lsw [1f:0] */ -#define stats_tx_dma_good_octet_counterlsw__adr 0x00008808 +#define HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERLSW 0x00008808 /* register address for bitfield tx dma good packet counter lsw [1f:0] */ -#define stats_tx_dma_good_pkt_counterlsw__adr 0x00008800 +#define HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERLSW 0x00008800 /* register address for bitfield rx dma good octet counter msw [3f:20] */ -#define stats_rx_dma_good_octet_countermsw__adr 0x0000680c +#define HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERMSW 0x0000680c /* register address for bitfield rx dma good packet counter msw [3f:20] */ -#define stats_rx_dma_good_pkt_countermsw__adr 0x00006804 +#define HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERMSW 0x00006804 /* register address for bitfield tx dma good octet counter msw [3f:20] */ -#define stats_tx_dma_good_octet_countermsw__adr 0x0000880c +#define HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERMSW 0x0000880c /* register address for bitfield tx dma good packet counter msw [3f:20] */ -#define stats_tx_dma_good_pkt_countermsw__adr 0x00008804 +#define HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERMSW 0x00008804 /* preprocessor definitions for msm rx errors counter register */ -#define mac_msm_rx_errs_cnt_adr 0x00000120u +#define HW_ATL_MAC_MSM_RX_ERRS_CNT_ADR 0x00000120u /* preprocessor definitions for msm rx unicast frames counter register */ -#define mac_msm_rx_ucst_frm_cnt_adr 0x000000e0u +#define HW_ATL_MAC_MSM_RX_UCST_FRM_CNT_ADR 0x000000e0u /* preprocessor definitions for msm rx multicast frames counter register */ -#define mac_msm_rx_mcst_frm_cnt_adr 0x000000e8u +#define HW_ATL_MAC_MSM_RX_MCST_FRM_CNT_ADR 0x000000e8u /* preprocessor definitions for msm rx broadcast frames counter register */ -#define mac_msm_rx_bcst_frm_cnt_adr 0x000000f0u +#define HW_ATL_MAC_MSM_RX_BCST_FRM_CNT_ADR 0x000000f0u /* preprocessor definitions for msm rx broadcast octets counter register 1 */ -#define mac_msm_rx_bcst_octets_counter1_adr 0x000001b0u +#define HW_ATL_MAC_MSM_RX_BCST_OCTETS_COUNTER1_ADR 0x000001b0u /* preprocessor definitions for msm rx broadcast octets counter register 2 */ -#define mac_msm_rx_bcst_octets_counter2_adr 0x000001b4u +#define HW_ATL_MAC_MSM_RX_BCST_OCTETS_COUNTER2_ADR 0x000001b4u /* preprocessor definitions for msm rx unicast octets counter register 0 */ -#define mac_msm_rx_ucst_octets_counter0_adr 0x000001b8u +#define HW_ATL_MAC_MSM_RX_UCST_OCTETS_COUNTER0_ADR 0x000001b8u /* preprocessor definitions for rx dma statistics counter 7 */ -#define rx_dma_stat_counter7_adr 0x00006818u +#define HW_ATL_RX_DMA_STAT_COUNTER7_ADR 0x00006818u /* preprocessor definitions for msm tx unicast frames counter register */ -#define mac_msm_tx_ucst_frm_cnt_adr 0x00000108u +#define HW_ATL_MAC_MSM_TX_UCST_FRM_CNT_ADR 0x00000108u /* preprocessor definitions for msm tx multicast frames counter register */ -#define mac_msm_tx_mcst_frm_cnt_adr 0x00000110u +#define HW_ATL_MAC_MSM_TX_MCST_FRM_CNT_ADR 0x00000110u /* preprocessor definitions for global mif identification */ -#define glb_mif_id_adr 0x0000001cu +#define HW_ATL_GLB_MIF_ID_ADR 0x0000001cu /* register address for bitfield iamr_lsw[1f:0] */ -#define itr_iamrlsw_adr 0x00002090 +#define HW_ATL_ITR_IAMRLSW_ADR 0x00002090 /* register address for bitfield rx dma drop packet counter [1f:0] */ -#define rpb_rx_dma_drop_pkt_cnt_adr 0x00006818 +#define HW_ATL_RPB_RX_DMA_DROP_PKT_CNT_ADR 0x00006818 /* register address for bitfield imcr_lsw[1f:0] */ -#define itr_imcrlsw_adr 0x00002070 +#define HW_ATL_ITR_IMCRLSW_ADR 0x00002070 /* register address for bitfield imsr_lsw[1f:0] */ -#define itr_imsrlsw_adr 0x00002060 +#define HW_ATL_ITR_IMSRLSW_ADR 0x00002060 /* register address for bitfield itr_reg_res_dsbl */ -#define itr_reg_res_dsbl_adr 0x00002300 +#define HW_ATL_ITR_REG_RES_DSBL_ADR 0x00002300 /* bitmask for bitfield itr_reg_res_dsbl */ -#define itr_reg_res_dsbl_msk 0x20000000 +#define HW_ATL_ITR_REG_RES_DSBL_MSK 0x20000000 /* lower bit position of bitfield itr_reg_res_dsbl */ -#define itr_reg_res_dsbl_shift 29 +#define HW_ATL_ITR_REG_RES_DSBL_SHIFT 29 /* register address for bitfield iscr_lsw[1f:0] */ -#define itr_iscrlsw_adr 0x00002050 +#define HW_ATL_ITR_ISCRLSW_ADR 0x00002050 /* register address for bitfield isr_lsw[1f:0] */ -#define itr_isrlsw_adr 0x00002000 +#define HW_ATL_ITR_ISRLSW_ADR 0x00002000 /* register address for bitfield itr_reset */ -#define itr_res_adr 0x00002300 +#define HW_ATL_ITR_RES_ADR 0x00002300 /* bitmask for bitfield itr_reset */ -#define itr_res_msk 0x80000000 +#define HW_ATL_ITR_RES_MSK 0x80000000 /* lower bit position of bitfield itr_reset */ -#define itr_res_shift 31 +#define HW_ATL_ITR_RES_SHIFT 31 /* register address for bitfield dca{d}_cpuid[7:0] */ -#define rdm_dcadcpuid_adr(dca) (0x00006100 + (dca) * 0x4) +#define HW_ATL_RDM_DCADCPUID_ADR(dca) (0x00006100 + (dca) * 0x4) /* bitmask for bitfield dca{d}_cpuid[7:0] */ -#define rdm_dcadcpuid_msk 0x000000ff +#define HW_ATL_RDM_DCADCPUID_MSK 0x000000ff /* lower bit position of bitfield dca{d}_cpuid[7:0] */ -#define rdm_dcadcpuid_shift 0 +#define HW_ATL_RDM_DCADCPUID_SHIFT 0 /* register address for bitfield dca_en */ -#define rdm_dca_en_adr 0x00006180 +#define HW_ATL_RDM_DCA_EN_ADR 0x00006180 /* rx dca_en bitfield definitions * preprocessor definitions for the bitfield "dca_en". @@ -110,17 +110,17 @@ */ /* register address for bitfield dca_en */ -#define rdm_dca_en_adr 0x00006180 +#define HW_ATL_RDM_DCA_EN_ADR 0x00006180 /* bitmask for bitfield dca_en */ -#define rdm_dca_en_msk 0x80000000 +#define HW_ATL_RDM_DCA_EN_MSK 0x80000000 /* inverted bitmask for bitfield dca_en */ -#define rdm_dca_en_mskn 0x7fffffff +#define HW_ATL_RDM_DCA_EN_MSKN 0x7fffffff /* lower bit position of bitfield dca_en */ -#define rdm_dca_en_shift 31 +#define HW_ATL_RDM_DCA_EN_SHIFT 31 /* width of bitfield dca_en */ -#define rdm_dca_en_width 1 +#define HW_ATL_RDM_DCA_EN_WIDTH 1 /* default value of bitfield dca_en */ -#define rdm_dca_en_default 0x1 +#define HW_ATL_RDM_DCA_EN_DEFAULT 0x1 /* rx dca_mode[3:0] bitfield definitions * preprocessor definitions for the bitfield "dca_mode[3:0]". @@ -128,17 +128,17 @@ */ /* register address for bitfield dca_mode[3:0] */ -#define rdm_dca_mode_adr 0x00006180 +#define HW_ATL_RDM_DCA_MODE_ADR 0x00006180 /* bitmask for bitfield dca_mode[3:0] */ -#define rdm_dca_mode_msk 0x0000000f +#define HW_ATL_RDM_DCA_MODE_MSK 0x0000000f /* inverted bitmask for bitfield dca_mode[3:0] */ -#define rdm_dca_mode_mskn 0xfffffff0 +#define HW_ATL_RDM_DCA_MODE_MSKN 0xfffffff0 /* lower bit position of bitfield dca_mode[3:0] */ -#define rdm_dca_mode_shift 0 +#define HW_ATL_RDM_DCA_MODE_SHIFT 0 /* width of bitfield dca_mode[3:0] */ -#define rdm_dca_mode_width 4 +#define HW_ATL_RDM_DCA_MODE_WIDTH 4 /* default value of bitfield dca_mode[3:0] */ -#define rdm_dca_mode_default 0x0 +#define HW_ATL_RDM_DCA_MODE_DEFAULT 0x0 /* rx desc{d}_data_size[4:0] bitfield definitions * preprocessor definitions for the bitfield "desc{d}_data_size[4:0]". @@ -147,17 +147,18 @@ */ /* register address for bitfield desc{d}_data_size[4:0] */ -#define rdm_descddata_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20) +#define HW_ATL_RDM_DESCDDATA_SIZE_ADR(descriptor) \ + (0x00005b18 + (descriptor) * 0x20) /* bitmask for bitfield desc{d}_data_size[4:0] */ -#define rdm_descddata_size_msk 0x0000001f +#define HW_ATL_RDM_DESCDDATA_SIZE_MSK 0x0000001f /* inverted bitmask for bitfield desc{d}_data_size[4:0] */ -#define rdm_descddata_size_mskn 0xffffffe0 +#define HW_ATL_RDM_DESCDDATA_SIZE_MSKN 0xffffffe0 /* lower bit position of bitfield desc{d}_data_size[4:0] */ -#define rdm_descddata_size_shift 0 +#define HW_ATL_RDM_DESCDDATA_SIZE_SHIFT 0 /* width of bitfield desc{d}_data_size[4:0] */ -#define rdm_descddata_size_width 5 +#define HW_ATL_RDM_DESCDDATA_SIZE_WIDTH 5 /* default value of bitfield desc{d}_data_size[4:0] */ -#define rdm_descddata_size_default 0x0 +#define HW_ATL_RDM_DESCDDATA_SIZE_DEFAULT 0x0 /* rx dca{d}_desc_en bitfield definitions * preprocessor definitions for the bitfield "dca{d}_desc_en". @@ -166,17 +167,17 @@ */ /* register address for bitfield dca{d}_desc_en */ -#define rdm_dcaddesc_en_adr(dca) (0x00006100 + (dca) * 0x4) +#define HW_ATL_RDM_DCADDESC_EN_ADR(dca) (0x00006100 + (dca) * 0x4) /* bitmask for bitfield dca{d}_desc_en */ -#define rdm_dcaddesc_en_msk 0x80000000 +#define HW_ATL_RDM_DCADDESC_EN_MSK 0x80000000 /* inverted bitmask for bitfield dca{d}_desc_en */ -#define rdm_dcaddesc_en_mskn 0x7fffffff +#define HW_ATL_RDM_DCADDESC_EN_MSKN 0x7fffffff /* lower bit position of bitfield dca{d}_desc_en */ -#define rdm_dcaddesc_en_shift 31 +#define HW_ATL_RDM_DCADDESC_EN_SHIFT 31 /* width of bitfield dca{d}_desc_en */ -#define rdm_dcaddesc_en_width 1 +#define HW_ATL_RDM_DCADDESC_EN_WIDTH 1 /* default value of bitfield dca{d}_desc_en */ -#define rdm_dcaddesc_en_default 0x0 +#define HW_ATL_RDM_DCADDESC_EN_DEFAULT 0x0 /* rx desc{d}_en bitfield definitions * preprocessor definitions for the bitfield "desc{d}_en". @@ -185,17 +186,17 @@ */ /* register address for bitfield desc{d}_en */ -#define rdm_descden_adr(descriptor) (0x00005b08 + (descriptor) * 0x20) +#define HW_ATL_RDM_DESCDEN_ADR(descriptor) (0x00005b08 + (descriptor) * 0x20) /* bitmask for bitfield desc{d}_en */ -#define rdm_descden_msk 0x80000000 +#define HW_ATL_RDM_DESCDEN_MSK 0x80000000 /* inverted bitmask for bitfield desc{d}_en */ -#define rdm_descden_mskn 0x7fffffff +#define HW_ATL_RDM_DESCDEN_MSKN 0x7fffffff /* lower bit position of bitfield desc{d}_en */ -#define rdm_descden_shift 31 +#define HW_ATL_RDM_DESCDEN_SHIFT 31 /* width of bitfield desc{d}_en */ -#define rdm_descden_width 1 +#define HW_ATL_RDM_DESCDEN_WIDTH 1 /* default value of bitfield desc{d}_en */ -#define rdm_descden_default 0x0 +#define HW_ATL_RDM_DESCDEN_DEFAULT 0x0 /* rx desc{d}_hdr_size[4:0] bitfield definitions * preprocessor definitions for the bitfield "desc{d}_hdr_size[4:0]". @@ -204,17 +205,18 @@ */ /* register address for bitfield desc{d}_hdr_size[4:0] */ -#define rdm_descdhdr_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20) +#define HW_ATL_RDM_DESCDHDR_SIZE_ADR(descriptor) \ + (0x00005b18 + (descriptor) * 0x20) /* bitmask for bitfield desc{d}_hdr_size[4:0] */ -#define rdm_descdhdr_size_msk 0x00001f00 +#define HW_ATL_RDM_DESCDHDR_SIZE_MSK 0x00001f00 /* inverted bitmask for bitfield desc{d}_hdr_size[4:0] */ -#define rdm_descdhdr_size_mskn 0xffffe0ff +#define HW_ATL_RDM_DESCDHDR_SIZE_MSKN 0xffffe0ff /* lower bit position of bitfield desc{d}_hdr_size[4:0] */ -#define rdm_descdhdr_size_shift 8 +#define HW_ATL_RDM_DESCDHDR_SIZE_SHIFT 8 /* width of bitfield desc{d}_hdr_size[4:0] */ -#define rdm_descdhdr_size_width 5 +#define HW_ATL_RDM_DESCDHDR_SIZE_WIDTH 5 /* default value of bitfield desc{d}_hdr_size[4:0] */ -#define rdm_descdhdr_size_default 0x0 +#define HW_ATL_RDM_DESCDHDR_SIZE_DEFAULT 0x0 /* rx desc{d}_hdr_split bitfield definitions * preprocessor definitions for the bitfield "desc{d}_hdr_split". @@ -223,17 +225,18 @@ */ /* register address for bitfield desc{d}_hdr_split */ -#define rdm_descdhdr_split_adr(descriptor) (0x00005b08 + (descriptor) * 0x20) +#define HW_ATL_RDM_DESCDHDR_SPLIT_ADR(descriptor) \ + (0x00005b08 + (descriptor) * 0x20) /* bitmask for bitfield desc{d}_hdr_split */ -#define rdm_descdhdr_split_msk 0x10000000 +#define HW_ATL_RDM_DESCDHDR_SPLIT_MSK 0x10000000 /* inverted bitmask for bitfield desc{d}_hdr_split */ -#define rdm_descdhdr_split_mskn 0xefffffff +#define HW_ATL_RDM_DESCDHDR_SPLIT_MSKN 0xefffffff /* lower bit position of bitfield desc{d}_hdr_split */ -#define rdm_descdhdr_split_shift 28 +#define HW_ATL_RDM_DESCDHDR_SPLIT_SHIFT 28 /* width of bitfield desc{d}_hdr_split */ -#define rdm_descdhdr_split_width 1 +#define HW_ATL_RDM_DESCDHDR_SPLIT_WIDTH 1 /* default value of bitfield desc{d}_hdr_split */ -#define rdm_descdhdr_split_default 0x0 +#define HW_ATL_RDM_DESCDHDR_SPLIT_DEFAULT 0x0 /* rx desc{d}_hd[c:0] bitfield definitions * preprocessor definitions for the bitfield "desc{d}_hd[c:0]". @@ -242,15 +245,15 @@ */ /* register address for bitfield desc{d}_hd[c:0] */ -#define rdm_descdhd_adr(descriptor) (0x00005b0c + (descriptor) * 0x20) +#define HW_ATL_RDM_DESCDHD_ADR(descriptor) (0x00005b0c + (descriptor) * 0x20) /* bitmask for bitfield desc{d}_hd[c:0] */ -#define rdm_descdhd_msk 0x00001fff +#define HW_ATL_RDM_DESCDHD_MSK 0x00001fff /* inverted bitmask for bitfield desc{d}_hd[c:0] */ -#define rdm_descdhd_mskn 0xffffe000 +#define HW_ATL_RDM_DESCDHD_MSKN 0xffffe000 /* lower bit position of bitfield desc{d}_hd[c:0] */ -#define rdm_descdhd_shift 0 +#define HW_ATL_RDM_DESCDHD_SHIFT 0 /* width of bitfield desc{d}_hd[c:0] */ -#define rdm_descdhd_width 13 +#define HW_ATL_RDM_DESCDHD_WIDTH 13 /* rx desc{d}_len[9:0] bitfield definitions * preprocessor definitions for the bitfield "desc{d}_len[9:0]". @@ -259,17 +262,17 @@ */ /* register address for bitfield desc{d}_len[9:0] */ -#define rdm_descdlen_adr(descriptor) (0x00005b08 + (descriptor) * 0x20) +#define HW_ATL_RDM_DESCDLEN_ADR(descriptor) (0x00005b08 + (descriptor) * 0x20) /* bitmask for bitfield desc{d}_len[9:0] */ -#define rdm_descdlen_msk 0x00001ff8 +#define HW_ATL_RDM_DESCDLEN_MSK 0x00001ff8 /* inverted bitmask for bitfield desc{d}_len[9:0] */ -#define rdm_descdlen_mskn 0xffffe007 +#define HW_ATL_RDM_DESCDLEN_MSKN 0xffffe007 /* lower bit position of bitfield desc{d}_len[9:0] */ -#define rdm_descdlen_shift 3 +#define HW_ATL_RDM_DESCDLEN_SHIFT 3 /* width of bitfield desc{d}_len[9:0] */ -#define rdm_descdlen_width 10 +#define HW_ATL_RDM_DESCDLEN_WIDTH 10 /* default value of bitfield desc{d}_len[9:0] */ -#define rdm_descdlen_default 0x0 +#define HW_ATL_RDM_DESCDLEN_DEFAULT 0x0 /* rx desc{d}_reset bitfield definitions * preprocessor definitions for the bitfield "desc{d}_reset". @@ -278,17 +281,17 @@ */ /* register address for bitfield desc{d}_reset */ -#define rdm_descdreset_adr(descriptor) (0x00005b08 + (descriptor) * 0x20) +#define HW_ATL_RDM_DESCDRESET_ADR(descriptor) (0x00005b08 + (descriptor) * 0x20) /* bitmask for bitfield desc{d}_reset */ -#define rdm_descdreset_msk 0x02000000 +#define HW_ATL_RDM_DESCDRESET_MSK 0x02000000 /* inverted bitmask for bitfield desc{d}_reset */ -#define rdm_descdreset_mskn 0xfdffffff +#define HW_ATL_RDM_DESCDRESET_MSKN 0xfdffffff /* lower bit position of bitfield desc{d}_reset */ -#define rdm_descdreset_shift 25 +#define HW_ATL_RDM_DESCDRESET_SHIFT 25 /* width of bitfield desc{d}_reset */ -#define rdm_descdreset_width 1 +#define HW_ATL_RDM_DESCDRESET_WIDTH 1 /* default value of bitfield desc{d}_reset */ -#define rdm_descdreset_default 0x0 +#define HW_ATL_RDM_DESCDRESET_DEFAULT 0x0 /* rx int_desc_wrb_en bitfield definitions * preprocessor definitions for the bitfield "int_desc_wrb_en". @@ -296,17 +299,17 @@ */ /* register address for bitfield int_desc_wrb_en */ -#define rdm_int_desc_wrb_en_adr 0x00005a30 +#define HW_ATL_RDM_INT_DESC_WRB_EN_ADR 0x00005a30 /* bitmask for bitfield int_desc_wrb_en */ -#define rdm_int_desc_wrb_en_msk 0x00000004 +#define HW_ATL_RDM_INT_DESC_WRB_EN_MSK 0x00000004 /* inverted bitmask for bitfield int_desc_wrb_en */ -#define rdm_int_desc_wrb_en_mskn 0xfffffffb +#define HW_ATL_RDM_INT_DESC_WRB_EN_MSKN 0xfffffffb /* lower bit position of bitfield int_desc_wrb_en */ -#define rdm_int_desc_wrb_en_shift 2 +#define HW_ATL_RDM_INT_DESC_WRB_EN_SHIFT 2 /* width of bitfield int_desc_wrb_en */ -#define rdm_int_desc_wrb_en_width 1 +#define HW_ATL_RDM_INT_DESC_WRB_EN_WIDTH 1 /* default value of bitfield int_desc_wrb_en */ -#define rdm_int_desc_wrb_en_default 0x0 +#define HW_ATL_RDM_INT_DESC_WRB_EN_DEFAULT 0x0 /* rx dca{d}_hdr_en bitfield definitions * preprocessor definitions for the bitfield "dca{d}_hdr_en". @@ -315,17 +318,17 @@ */ /* register address for bitfield dca{d}_hdr_en */ -#define rdm_dcadhdr_en_adr(dca) (0x00006100 + (dca) * 0x4) +#define HW_ATL_RDM_DCADHDR_EN_ADR(dca) (0x00006100 + (dca) * 0x4) /* bitmask for bitfield dca{d}_hdr_en */ -#define rdm_dcadhdr_en_msk 0x40000000 +#define HW_ATL_RDM_DCADHDR_EN_MSK 0x40000000 /* inverted bitmask for bitfield dca{d}_hdr_en */ -#define rdm_dcadhdr_en_mskn 0xbfffffff +#define HW_ATL_RDM_DCADHDR_EN_MSKN 0xbfffffff /* lower bit position of bitfield dca{d}_hdr_en */ -#define rdm_dcadhdr_en_shift 30 +#define HW_ATL_RDM_DCADHDR_EN_SHIFT 30 /* width of bitfield dca{d}_hdr_en */ -#define rdm_dcadhdr_en_width 1 +#define HW_ATL_RDM_DCADHDR_EN_WIDTH 1 /* default value of bitfield dca{d}_hdr_en */ -#define rdm_dcadhdr_en_default 0x0 +#define HW_ATL_RDM_DCADHDR_EN_DEFAULT 0x0 /* rx dca{d}_pay_en bitfield definitions * preprocessor definitions for the bitfield "dca{d}_pay_en". @@ -334,17 +337,17 @@ */ /* register address for bitfield dca{d}_pay_en */ -#define rdm_dcadpay_en_adr(dca) (0x00006100 + (dca) * 0x4) +#define HW_ATL_RDM_DCADPAY_EN_ADR(dca) (0x00006100 + (dca) * 0x4) /* bitmask for bitfield dca{d}_pay_en */ -#define rdm_dcadpay_en_msk 0x20000000 +#define HW_ATL_RDM_DCADPAY_EN_MSK 0x20000000 /* inverted bitmask for bitfield dca{d}_pay_en */ -#define rdm_dcadpay_en_mskn 0xdfffffff +#define HW_ATL_RDM_DCADPAY_EN_MSKN 0xdfffffff /* lower bit position of bitfield dca{d}_pay_en */ -#define rdm_dcadpay_en_shift 29 +#define HW_ATL_RDM_DCADPAY_EN_SHIFT 29 /* width of bitfield dca{d}_pay_en */ -#define rdm_dcadpay_en_width 1 +#define HW_ATL_RDM_DCADPAY_EN_WIDTH 1 /* default value of bitfield dca{d}_pay_en */ -#define rdm_dcadpay_en_default 0x0 +#define HW_ATL_RDM_DCADPAY_EN_DEFAULT 0x0 /* RX rdm_int_rim_en Bitfield Definitions * Preprocessor definitions for the bitfield "rdm_int_rim_en". @@ -352,51 +355,51 @@ */ /* Register address for bitfield rdm_int_rim_en */ -#define rdm_int_rim_en_adr 0x00005A30 +#define HW_ATL_RDM_INT_RIM_EN_ADR 0x00005A30 /* Bitmask for bitfield rdm_int_rim_en */ -#define rdm_int_rim_en_msk 0x00000008 +#define HW_ATL_RDM_INT_RIM_EN_MSK 0x00000008 /* Inverted bitmask for bitfield rdm_int_rim_en */ -#define rdm_int_rim_en_mskn 0xFFFFFFF7 +#define HW_ATL_RDM_INT_RIM_EN_MSKN 0xFFFFFFF7 /* Lower bit position of bitfield rdm_int_rim_en */ -#define rdm_int_rim_en_shift 3 +#define HW_ATL_RDM_INT_RIM_EN_SHIFT 3 /* Width of bitfield rdm_int_rim_en */ -#define rdm_int_rim_en_width 1 +#define HW_ATL_RDM_INT_RIM_EN_WIDTH 1 /* Default value of bitfield rdm_int_rim_en */ -#define rdm_int_rim_en_default 0x0 +#define HW_ATL_RDM_INT_RIM_EN_DEFAULT 0x0 /* general interrupt mapping register definitions * preprocessor definitions for general interrupt mapping register * base address: 0x00002180 * parameter: regidx {f} | stride size 0x4 | range [0, 3] */ -#define gen_intr_map_adr(regidx) (0x00002180u + (regidx) * 0x4) +#define HW_ATL_GEN_INTR_MAP_ADR(regidx) (0x00002180u + (regidx) * 0x4) /* general interrupt status register definitions * preprocessor definitions for general interrupt status register * address: 0x000021A0 */ -#define gen_intr_stat_adr 0x000021A4U +#define HW_ATL_GEN_INTR_STAT_ADR 0x000021A4U /* interrupt global control register definitions * preprocessor definitions for interrupt global control register * address: 0x00002300 */ -#define intr_glb_ctl_adr 0x00002300u +#define HW_ATL_INTR_GLB_CTL_ADR 0x00002300u /* interrupt throttle register definitions * preprocessor definitions for interrupt throttle register * base address: 0x00002800 * parameter: throttle {t} | stride size 0x4 | range [0, 31] */ -#define intr_thr_adr(throttle) (0x00002800u + (throttle) * 0x4) +#define HW_ATL_INTR_THR_ADR(throttle) (0x00002800u + (throttle) * 0x4) /* rx dma descriptor base address lsw definitions * preprocessor definitions for rx dma descriptor base address lsw * base address: 0x00005b00 * parameter: descriptor {d} | stride size 0x20 | range [0, 31] */ -#define rx_dma_desc_base_addrlsw_adr(descriptor) \ +#define HW_ATL_RX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor) \ (0x00005b00u + (descriptor) * 0x20) /* rx dma descriptor base address msw definitions @@ -404,7 +407,7 @@ * base address: 0x00005b04 * parameter: descriptor {d} | stride size 0x20 | range [0, 31] */ -#define rx_dma_desc_base_addrmsw_adr(descriptor) \ +#define HW_ATL_RX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor) \ (0x00005b04u + (descriptor) * 0x20) /* rx dma descriptor status register definitions @@ -412,46 +415,48 @@ * base address: 0x00005b14 * parameter: descriptor {d} | stride size 0x20 | range [0, 31] */ -#define rx_dma_desc_stat_adr(descriptor) (0x00005b14u + (descriptor) * 0x20) +#define HW_ATL_RX_DMA_DESC_STAT_ADR(descriptor) \ + (0x00005b14u + (descriptor) * 0x20) /* rx dma descriptor tail pointer register definitions * preprocessor definitions for rx dma descriptor tail pointer register * base address: 0x00005b10 * parameter: descriptor {d} | stride size 0x20 | range [0, 31] */ -#define rx_dma_desc_tail_ptr_adr(descriptor) (0x00005b10u + (descriptor) * 0x20) +#define HW_ATL_RX_DMA_DESC_TAIL_PTR_ADR(descriptor) \ + (0x00005b10u + (descriptor) * 0x20) /* rx interrupt moderation control register definitions * Preprocessor definitions for RX Interrupt Moderation Control Register * Base Address: 0x00005A40 * Parameter: RIM {R} | stride size 0x4 | range [0, 31] */ -#define rx_intr_moderation_ctl_adr(rim) (0x00005A40u + (rim) * 0x4) +#define HW_ATL_RX_INTR_MODERATION_CTL_ADR(rim) (0x00005A40u + (rim) * 0x4) /* rx filter multicast filter mask register definitions * preprocessor definitions for rx filter multicast filter mask register * address: 0x00005270 */ -#define rx_flr_mcst_flr_msk_adr 0x00005270u +#define HW_ATL_RX_FLR_MCST_FLR_MSK_ADR 0x00005270u /* rx filter multicast filter register definitions * preprocessor definitions for rx filter multicast filter register * base address: 0x00005250 * parameter: filter {f} | stride size 0x4 | range [0, 7] */ -#define rx_flr_mcst_flr_adr(filter) (0x00005250u + (filter) * 0x4) +#define HW_ATL_RX_FLR_MCST_FLR_ADR(filter) (0x00005250u + (filter) * 0x4) /* RX Filter RSS Control Register 1 Definitions * Preprocessor definitions for RX Filter RSS Control Register 1 * Address: 0x000054C0 */ -#define rx_flr_rss_control1_adr 0x000054C0u +#define HW_ATL_RX_FLR_RSS_CONTROL1_ADR 0x000054C0u /* RX Filter Control Register 2 Definitions * Preprocessor definitions for RX Filter Control Register 2 * Address: 0x00005104 */ -#define rx_flr_control2_adr 0x00005104u +#define HW_ATL_RX_FLR_CONTROL2_ADR 0x00005104u /* tx tx dma debug control [1f:0] bitfield definitions * preprocessor definitions for the bitfield "tx dma debug control [1f:0]". @@ -459,24 +464,24 @@ */ /* register address for bitfield tx dma debug control [1f:0] */ -#define tdm_tx_dma_debug_ctl_adr 0x00008920 +#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_ADR 0x00008920 /* bitmask for bitfield tx dma debug control [1f:0] */ -#define tdm_tx_dma_debug_ctl_msk 0xffffffff +#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_MSK 0xffffffff /* inverted bitmask for bitfield tx dma debug control [1f:0] */ -#define tdm_tx_dma_debug_ctl_mskn 0x00000000 +#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_MSKN 0x00000000 /* lower bit position of bitfield tx dma debug control [1f:0] */ -#define tdm_tx_dma_debug_ctl_shift 0 +#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_SHIFT 0 /* width of bitfield tx dma debug control [1f:0] */ -#define tdm_tx_dma_debug_ctl_width 32 +#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_WIDTH 32 /* default value of bitfield tx dma debug control [1f:0] */ -#define tdm_tx_dma_debug_ctl_default 0x0 +#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_DEFAULT 0x0 /* tx dma descriptor base address lsw definitions * preprocessor definitions for tx dma descriptor base address lsw * base address: 0x00007c00 * parameter: descriptor {d} | stride size 0x40 | range [0, 31] */ -#define tx_dma_desc_base_addrlsw_adr(descriptor) \ +#define HW_ATL_TX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor) \ (0x00007c00u + (descriptor) * 0x40) /* tx dma descriptor tail pointer register definitions @@ -484,7 +489,8 @@ * base address: 0x00007c10 * parameter: descriptor {d} | stride size 0x40 | range [0, 31] */ -#define tx_dma_desc_tail_ptr_adr(descriptor) (0x00007c10u + (descriptor) * 0x40) +#define HW_ATL_TX_DMA_DESC_TAIL_PTR_ADR(descriptor) \ + (0x00007c10u + (descriptor) * 0x40) /* rx dma_sys_loopback bitfield definitions * preprocessor definitions for the bitfield "dma_sys_loopback". @@ -492,17 +498,17 @@ */ /* register address for bitfield dma_sys_loopback */ -#define rpb_dma_sys_lbk_adr 0x00005000 +#define HW_ATL_RPB_DMA_SYS_LBK_ADR 0x00005000 /* bitmask for bitfield dma_sys_loopback */ -#define rpb_dma_sys_lbk_msk 0x00000040 +#define HW_ATL_RPB_DMA_SYS_LBK_MSK 0x00000040 /* inverted bitmask for bitfield dma_sys_loopback */ -#define rpb_dma_sys_lbk_mskn 0xffffffbf +#define HW_ATL_RPB_DMA_SYS_LBK_MSKN 0xffffffbf /* lower bit position of bitfield dma_sys_loopback */ -#define rpb_dma_sys_lbk_shift 6 +#define HW_ATL_RPB_DMA_SYS_LBK_SHIFT 6 /* width of bitfield dma_sys_loopback */ -#define rpb_dma_sys_lbk_width 1 +#define HW_ATL_RPB_DMA_SYS_LBK_WIDTH 1 /* default value of bitfield dma_sys_loopback */ -#define rpb_dma_sys_lbk_default 0x0 +#define HW_ATL_RPB_DMA_SYS_LBK_DEFAULT 0x0 /* rx rx_tc_mode bitfield definitions * preprocessor definitions for the bitfield "rx_tc_mode". @@ -510,17 +516,17 @@ */ /* register address for bitfield rx_tc_mode */ -#define rpb_rpf_rx_tc_mode_adr 0x00005700 +#define HW_ATL_RPB_RPF_RX_TC_MODE_ADR 0x00005700 /* bitmask for bitfield rx_tc_mode */ -#define rpb_rpf_rx_tc_mode_msk 0x00000100 +#define HW_ATL_RPB_RPF_RX_TC_MODE_MSK 0x00000100 /* inverted bitmask for bitfield rx_tc_mode */ -#define rpb_rpf_rx_tc_mode_mskn 0xfffffeff +#define HW_ATL_RPB_RPF_RX_TC_MODE_MSKN 0xfffffeff /* lower bit position of bitfield rx_tc_mode */ -#define rpb_rpf_rx_tc_mode_shift 8 +#define HW_ATL_RPB_RPF_RX_TC_MODE_SHIFT 8 /* width of bitfield rx_tc_mode */ -#define rpb_rpf_rx_tc_mode_width 1 +#define HW_ATL_RPB_RPF_RX_TC_MODE_WIDTH 1 /* default value of bitfield rx_tc_mode */ -#define rpb_rpf_rx_tc_mode_default 0x0 +#define HW_ATL_RPB_RPF_RX_TC_MODE_DEFAULT 0x0 /* rx rx_buf_en bitfield definitions * preprocessor definitions for the bitfield "rx_buf_en". @@ -528,17 +534,17 @@ */ /* register address for bitfield rx_buf_en */ -#define rpb_rx_buf_en_adr 0x00005700 +#define HW_ATL_RPB_RX_BUF_EN_ADR 0x00005700 /* bitmask for bitfield rx_buf_en */ -#define rpb_rx_buf_en_msk 0x00000001 +#define HW_ATL_RPB_RX_BUF_EN_MSK 0x00000001 /* inverted bitmask for bitfield rx_buf_en */ -#define rpb_rx_buf_en_mskn 0xfffffffe +#define HW_ATL_RPB_RX_BUF_EN_MSKN 0xfffffffe /* lower bit position of bitfield rx_buf_en */ -#define rpb_rx_buf_en_shift 0 +#define HW_ATL_RPB_RX_BUF_EN_SHIFT 0 /* width of bitfield rx_buf_en */ -#define rpb_rx_buf_en_width 1 +#define HW_ATL_RPB_RX_BUF_EN_WIDTH 1 /* default value of bitfield rx_buf_en */ -#define rpb_rx_buf_en_default 0x0 +#define HW_ATL_RPB_RX_BUF_EN_DEFAULT 0x0 /* rx rx{b}_hi_thresh[d:0] bitfield definitions * preprocessor definitions for the bitfield "rx{b}_hi_thresh[d:0]". @@ -547,17 +553,17 @@ */ /* register address for bitfield rx{b}_hi_thresh[d:0] */ -#define rpb_rxbhi_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10) +#define HW_ATL_RPB_RXBHI_THRESH_ADR(buffer) (0x00005714 + (buffer) * 0x10) /* bitmask for bitfield rx{b}_hi_thresh[d:0] */ -#define rpb_rxbhi_thresh_msk 0x3fff0000 +#define HW_ATL_RPB_RXBHI_THRESH_MSK 0x3fff0000 /* inverted bitmask for bitfield rx{b}_hi_thresh[d:0] */ -#define rpb_rxbhi_thresh_mskn 0xc000ffff +#define HW_ATL_RPB_RXBHI_THRESH_MSKN 0xc000ffff /* lower bit position of bitfield rx{b}_hi_thresh[d:0] */ -#define rpb_rxbhi_thresh_shift 16 +#define HW_ATL_RPB_RXBHI_THRESH_SHIFT 16 /* width of bitfield rx{b}_hi_thresh[d:0] */ -#define rpb_rxbhi_thresh_width 14 +#define HW_ATL_RPB_RXBHI_THRESH_WIDTH 14 /* default value of bitfield rx{b}_hi_thresh[d:0] */ -#define rpb_rxbhi_thresh_default 0x0 +#define HW_ATL_RPB_RXBHI_THRESH_DEFAULT 0x0 /* rx rx{b}_lo_thresh[d:0] bitfield definitions * preprocessor definitions for the bitfield "rx{b}_lo_thresh[d:0]". @@ -566,17 +572,17 @@ */ /* register address for bitfield rx{b}_lo_thresh[d:0] */ -#define rpb_rxblo_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10) +#define HW_ATL_RPB_RXBLO_THRESH_ADR(buffer) (0x00005714 + (buffer) * 0x10) /* bitmask for bitfield rx{b}_lo_thresh[d:0] */ -#define rpb_rxblo_thresh_msk 0x00003fff +#define HW_ATL_RPB_RXBLO_THRESH_MSK 0x00003fff /* inverted bitmask for bitfield rx{b}_lo_thresh[d:0] */ -#define rpb_rxblo_thresh_mskn 0xffffc000 +#define HW_ATL_RPB_RXBLO_THRESH_MSKN 0xffffc000 /* lower bit position of bitfield rx{b}_lo_thresh[d:0] */ -#define rpb_rxblo_thresh_shift 0 +#define HW_ATL_RPB_RXBLO_THRESH_SHIFT 0 /* width of bitfield rx{b}_lo_thresh[d:0] */ -#define rpb_rxblo_thresh_width 14 +#define HW_ATL_RPB_RXBLO_THRESH_WIDTH 14 /* default value of bitfield rx{b}_lo_thresh[d:0] */ -#define rpb_rxblo_thresh_default 0x0 +#define HW_ATL_RPB_RXBLO_THRESH_DEFAULT 0x0 /* rx rx_fc_mode[1:0] bitfield definitions * preprocessor definitions for the bitfield "rx_fc_mode[1:0]". @@ -584,17 +590,17 @@ */ /* register address for bitfield rx_fc_mode[1:0] */ -#define rpb_rx_fc_mode_adr 0x00005700 +#define HW_ATL_RPB_RX_FC_MODE_ADR 0x00005700 /* bitmask for bitfield rx_fc_mode[1:0] */ -#define rpb_rx_fc_mode_msk 0x00000030 +#define HW_ATL_RPB_RX_FC_MODE_MSK 0x00000030 /* inverted bitmask for bitfield rx_fc_mode[1:0] */ -#define rpb_rx_fc_mode_mskn 0xffffffcf +#define HW_ATL_RPB_RX_FC_MODE_MSKN 0xffffffcf /* lower bit position of bitfield rx_fc_mode[1:0] */ -#define rpb_rx_fc_mode_shift 4 +#define HW_ATL_RPB_RX_FC_MODE_SHIFT 4 /* width of bitfield rx_fc_mode[1:0] */ -#define rpb_rx_fc_mode_width 2 +#define HW_ATL_RPB_RX_FC_MODE_WIDTH 2 /* default value of bitfield rx_fc_mode[1:0] */ -#define rpb_rx_fc_mode_default 0x0 +#define HW_ATL_RPB_RX_FC_MODE_DEFAULT 0x0 /* rx rx{b}_buf_size[8:0] bitfield definitions * preprocessor definitions for the bitfield "rx{b}_buf_size[8:0]". @@ -603,17 +609,17 @@ */ /* register address for bitfield rx{b}_buf_size[8:0] */ -#define rpb_rxbbuf_size_adr(buffer) (0x00005710 + (buffer) * 0x10) +#define HW_ATL_RPB_RXBBUF_SIZE_ADR(buffer) (0x00005710 + (buffer) * 0x10) /* bitmask for bitfield rx{b}_buf_size[8:0] */ -#define rpb_rxbbuf_size_msk 0x000001ff +#define HW_ATL_RPB_RXBBUF_SIZE_MSK 0x000001ff /* inverted bitmask for bitfield rx{b}_buf_size[8:0] */ -#define rpb_rxbbuf_size_mskn 0xfffffe00 +#define HW_ATL_RPB_RXBBUF_SIZE_MSKN 0xfffffe00 /* lower bit position of bitfield rx{b}_buf_size[8:0] */ -#define rpb_rxbbuf_size_shift 0 +#define HW_ATL_RPB_RXBBUF_SIZE_SHIFT 0 /* width of bitfield rx{b}_buf_size[8:0] */ -#define rpb_rxbbuf_size_width 9 +#define HW_ATL_RPB_RXBBUF_SIZE_WIDTH 9 /* default value of bitfield rx{b}_buf_size[8:0] */ -#define rpb_rxbbuf_size_default 0x0 +#define HW_ATL_RPB_RXBBUF_SIZE_DEFAULT 0x0 /* rx rx{b}_xoff_en bitfield definitions * preprocessor definitions for the bitfield "rx{b}_xoff_en". @@ -622,17 +628,17 @@ */ /* register address for bitfield rx{b}_xoff_en */ -#define rpb_rxbxoff_en_adr(buffer) (0x00005714 + (buffer) * 0x10) +#define HW_ATL_RPB_RXBXOFF_EN_ADR(buffer) (0x00005714 + (buffer) * 0x10) /* bitmask for bitfield rx{b}_xoff_en */ -#define rpb_rxbxoff_en_msk 0x80000000 +#define HW_ATL_RPB_RXBXOFF_EN_MSK 0x80000000 /* inverted bitmask for bitfield rx{b}_xoff_en */ -#define rpb_rxbxoff_en_mskn 0x7fffffff +#define HW_ATL_RPB_RXBXOFF_EN_MSKN 0x7fffffff /* lower bit position of bitfield rx{b}_xoff_en */ -#define rpb_rxbxoff_en_shift 31 +#define HW_ATL_RPB_RXBXOFF_EN_SHIFT 31 /* width of bitfield rx{b}_xoff_en */ -#define rpb_rxbxoff_en_width 1 +#define HW_ATL_RPB_RXBXOFF_EN_WIDTH 1 /* default value of bitfield rx{b}_xoff_en */ -#define rpb_rxbxoff_en_default 0x0 +#define HW_ATL_RPB_RXBXOFF_EN_DEFAULT 0x0 /* rx l2_bc_thresh[f:0] bitfield definitions * preprocessor definitions for the bitfield "l2_bc_thresh[f:0]". @@ -640,17 +646,17 @@ */ /* register address for bitfield l2_bc_thresh[f:0] */ -#define rpfl2bc_thresh_adr 0x00005100 +#define HW_ATL_RPFL2BC_THRESH_ADR 0x00005100 /* bitmask for bitfield l2_bc_thresh[f:0] */ -#define rpfl2bc_thresh_msk 0xffff0000 +#define HW_ATL_RPFL2BC_THRESH_MSK 0xffff0000 /* inverted bitmask for bitfield l2_bc_thresh[f:0] */ -#define rpfl2bc_thresh_mskn 0x0000ffff +#define HW_ATL_RPFL2BC_THRESH_MSKN 0x0000ffff /* lower bit position of bitfield l2_bc_thresh[f:0] */ -#define rpfl2bc_thresh_shift 16 +#define HW_ATL_RPFL2BC_THRESH_SHIFT 16 /* width of bitfield l2_bc_thresh[f:0] */ -#define rpfl2bc_thresh_width 16 +#define HW_ATL_RPFL2BC_THRESH_WIDTH 16 /* default value of bitfield l2_bc_thresh[f:0] */ -#define rpfl2bc_thresh_default 0x0 +#define HW_ATL_RPFL2BC_THRESH_DEFAULT 0x0 /* rx l2_bc_en bitfield definitions * preprocessor definitions for the bitfield "l2_bc_en". @@ -658,17 +664,17 @@ */ /* register address for bitfield l2_bc_en */ -#define rpfl2bc_en_adr 0x00005100 +#define HW_ATL_RPFL2BC_EN_ADR 0x00005100 /* bitmask for bitfield l2_bc_en */ -#define rpfl2bc_en_msk 0x00000001 +#define HW_ATL_RPFL2BC_EN_MSK 0x00000001 /* inverted bitmask for bitfield l2_bc_en */ -#define rpfl2bc_en_mskn 0xfffffffe +#define HW_ATL_RPFL2BC_EN_MSKN 0xfffffffe /* lower bit position of bitfield l2_bc_en */ -#define rpfl2bc_en_shift 0 +#define HW_ATL_RPFL2BC_EN_SHIFT 0 /* width of bitfield l2_bc_en */ -#define rpfl2bc_en_width 1 +#define HW_ATL_RPFL2BC_EN_WIDTH 1 /* default value of bitfield l2_bc_en */ -#define rpfl2bc_en_default 0x0 +#define HW_ATL_RPFL2BC_EN_DEFAULT 0x0 /* rx l2_bc_act[2:0] bitfield definitions * preprocessor definitions for the bitfield "l2_bc_act[2:0]". @@ -676,17 +682,17 @@ */ /* register address for bitfield l2_bc_act[2:0] */ -#define rpfl2bc_act_adr 0x00005100 +#define HW_ATL_RPFL2BC_ACT_ADR 0x00005100 /* bitmask for bitfield l2_bc_act[2:0] */ -#define rpfl2bc_act_msk 0x00007000 +#define HW_ATL_RPFL2BC_ACT_MSK 0x00007000 /* inverted bitmask for bitfield l2_bc_act[2:0] */ -#define rpfl2bc_act_mskn 0xffff8fff +#define HW_ATL_RPFL2BC_ACT_MSKN 0xffff8fff /* lower bit position of bitfield l2_bc_act[2:0] */ -#define rpfl2bc_act_shift 12 +#define HW_ATL_RPFL2BC_ACT_SHIFT 12 /* width of bitfield l2_bc_act[2:0] */ -#define rpfl2bc_act_width 3 +#define HW_ATL_RPFL2BC_ACT_WIDTH 3 /* default value of bitfield l2_bc_act[2:0] */ -#define rpfl2bc_act_default 0x0 +#define HW_ATL_RPFL2BC_ACT_DEFAULT 0x0 /* rx l2_mc_en{f} bitfield definitions * preprocessor definitions for the bitfield "l2_mc_en{f}". @@ -695,17 +701,17 @@ */ /* register address for bitfield l2_mc_en{f} */ -#define rpfl2mc_enf_adr(filter) (0x00005250 + (filter) * 0x4) +#define HW_ATL_RPFL2MC_ENF_ADR(filter) (0x00005250 + (filter) * 0x4) /* bitmask for bitfield l2_mc_en{f} */ -#define rpfl2mc_enf_msk 0x80000000 +#define HW_ATL_RPFL2MC_ENF_MSK 0x80000000 /* inverted bitmask for bitfield l2_mc_en{f} */ -#define rpfl2mc_enf_mskn 0x7fffffff +#define HW_ATL_RPFL2MC_ENF_MSKN 0x7fffffff /* lower bit position of bitfield l2_mc_en{f} */ -#define rpfl2mc_enf_shift 31 +#define HW_ATL_RPFL2MC_ENF_SHIFT 31 /* width of bitfield l2_mc_en{f} */ -#define rpfl2mc_enf_width 1 +#define HW_ATL_RPFL2MC_ENF_WIDTH 1 /* default value of bitfield l2_mc_en{f} */ -#define rpfl2mc_enf_default 0x0 +#define HW_ATL_RPFL2MC_ENF_DEFAULT 0x0 /* rx l2_promis_mode bitfield definitions * preprocessor definitions for the bitfield "l2_promis_mode". @@ -713,17 +719,17 @@ */ /* register address for bitfield l2_promis_mode */ -#define rpfl2promis_mode_adr 0x00005100 +#define HW_ATL_RPFL2PROMIS_MODE_ADR 0x00005100 /* bitmask for bitfield l2_promis_mode */ -#define rpfl2promis_mode_msk 0x00000008 +#define HW_ATL_RPFL2PROMIS_MODE_MSK 0x00000008 /* inverted bitmask for bitfield l2_promis_mode */ -#define rpfl2promis_mode_mskn 0xfffffff7 +#define HW_ATL_RPFL2PROMIS_MODE_MSKN 0xfffffff7 /* lower bit position of bitfield l2_promis_mode */ -#define rpfl2promis_mode_shift 3 +#define HW_ATL_RPFL2PROMIS_MODE_SHIFT 3 /* width of bitfield l2_promis_mode */ -#define rpfl2promis_mode_width 1 +#define HW_ATL_RPFL2PROMIS_MODE_WIDTH 1 /* default value of bitfield l2_promis_mode */ -#define rpfl2promis_mode_default 0x0 +#define HW_ATL_RPFL2PROMIS_MODE_DEFAULT 0x0 /* rx l2_uc_act{f}[2:0] bitfield definitions * preprocessor definitions for the bitfield "l2_uc_act{f}[2:0]". @@ -732,17 +738,17 @@ */ /* register address for bitfield l2_uc_act{f}[2:0] */ -#define rpfl2uc_actf_adr(filter) (0x00005114 + (filter) * 0x8) +#define HW_ATL_RPFL2UC_ACTF_ADR(filter) (0x00005114 + (filter) * 0x8) /* bitmask for bitfield l2_uc_act{f}[2:0] */ -#define rpfl2uc_actf_msk 0x00070000 +#define HW_ATL_RPFL2UC_ACTF_MSK 0x00070000 /* inverted bitmask for bitfield l2_uc_act{f}[2:0] */ -#define rpfl2uc_actf_mskn 0xfff8ffff +#define HW_ATL_RPFL2UC_ACTF_MSKN 0xfff8ffff /* lower bit position of bitfield l2_uc_act{f}[2:0] */ -#define rpfl2uc_actf_shift 16 +#define HW_ATL_RPFL2UC_ACTF_SHIFT 16 /* width of bitfield l2_uc_act{f}[2:0] */ -#define rpfl2uc_actf_width 3 +#define HW_ATL_RPFL2UC_ACTF_WIDTH 3 /* default value of bitfield l2_uc_act{f}[2:0] */ -#define rpfl2uc_actf_default 0x0 +#define HW_ATL_RPFL2UC_ACTF_DEFAULT 0x0 /* rx l2_uc_en{f} bitfield definitions * preprocessor definitions for the bitfield "l2_uc_en{f}". @@ -751,26 +757,26 @@ */ /* register address for bitfield l2_uc_en{f} */ -#define rpfl2uc_enf_adr(filter) (0x00005114 + (filter) * 0x8) +#define HW_ATL_RPFL2UC_ENF_ADR(filter) (0x00005114 + (filter) * 0x8) /* bitmask for bitfield l2_uc_en{f} */ -#define rpfl2uc_enf_msk 0x80000000 +#define HW_ATL_RPFL2UC_ENF_MSK 0x80000000 /* inverted bitmask for bitfield l2_uc_en{f} */ -#define rpfl2uc_enf_mskn 0x7fffffff +#define HW_ATL_RPFL2UC_ENF_MSKN 0x7fffffff /* lower bit position of bitfield l2_uc_en{f} */ -#define rpfl2uc_enf_shift 31 +#define HW_ATL_RPFL2UC_ENF_SHIFT 31 /* width of bitfield l2_uc_en{f} */ -#define rpfl2uc_enf_width 1 +#define HW_ATL_RPFL2UC_ENF_WIDTH 1 /* default value of bitfield l2_uc_en{f} */ -#define rpfl2uc_enf_default 0x0 +#define HW_ATL_RPFL2UC_ENF_DEFAULT 0x0 /* register address for bitfield l2_uc_da{f}_lsw[1f:0] */ -#define rpfl2uc_daflsw_adr(filter) (0x00005110 + (filter) * 0x8) +#define HW_ATL_RPFL2UC_DAFLSW_ADR(filter) (0x00005110 + (filter) * 0x8) /* register address for bitfield l2_uc_da{f}_msw[f:0] */ -#define rpfl2uc_dafmsw_adr(filter) (0x00005114 + (filter) * 0x8) +#define HW_ATL_RPFL2UC_DAFMSW_ADR(filter) (0x00005114 + (filter) * 0x8) /* bitmask for bitfield l2_uc_da{f}_msw[f:0] */ -#define rpfl2uc_dafmsw_msk 0x0000ffff +#define HW_ATL_RPFL2UC_DAFMSW_MSK 0x0000ffff /* lower bit position of bitfield l2_uc_da{f}_msw[f:0] */ -#define rpfl2uc_dafmsw_shift 0 +#define HW_ATL_RPFL2UC_DAFMSW_SHIFT 0 /* rx l2_mc_accept_all bitfield definitions * Preprocessor definitions for the bitfield "l2_mc_accept_all". @@ -778,22 +784,22 @@ */ /* Register address for bitfield l2_mc_accept_all */ -#define rpfl2mc_accept_all_adr 0x00005270 +#define HW_ATL_RPFL2MC_ACCEPT_ALL_ADR 0x00005270 /* Bitmask for bitfield l2_mc_accept_all */ -#define rpfl2mc_accept_all_msk 0x00004000 +#define HW_ATL_RPFL2MC_ACCEPT_ALL_MSK 0x00004000 /* Inverted bitmask for bitfield l2_mc_accept_all */ -#define rpfl2mc_accept_all_mskn 0xFFFFBFFF +#define HW_ATL_RPFL2MC_ACCEPT_ALL_MSKN 0xFFFFBFFF /* Lower bit position of bitfield l2_mc_accept_all */ -#define rpfl2mc_accept_all_shift 14 +#define HW_ATL_RPFL2MC_ACCEPT_ALL_SHIFT 14 /* Width of bitfield l2_mc_accept_all */ -#define rpfl2mc_accept_all_width 1 +#define HW_ATL_RPFL2MC_ACCEPT_ALL_WIDTH 1 /* Default value of bitfield l2_mc_accept_all */ -#define rpfl2mc_accept_all_default 0x0 +#define HW_ATL_RPFL2MC_ACCEPT_ALL_DEFAULT 0x0 /* width of bitfield rx_tc_up{t}[2:0] */ -#define rpf_rpb_rx_tc_upt_width 3 +#define HW_ATL_RPF_RPB_RX_TC_UPT_WIDTH 3 /* default value of bitfield rx_tc_up{t}[2:0] */ -#define rpf_rpb_rx_tc_upt_default 0x0 +#define HW_ATL_RPF_RPB_RX_TC_UPT_DEFAULT 0x0 /* rx rss_key_addr[4:0] bitfield definitions * preprocessor definitions for the bitfield "rss_key_addr[4:0]". @@ -801,17 +807,17 @@ */ /* register address for bitfield rss_key_addr[4:0] */ -#define rpf_rss_key_addr_adr 0x000054d0 +#define HW_ATL_RPF_RSS_KEY_ADDR_ADR 0x000054d0 /* bitmask for bitfield rss_key_addr[4:0] */ -#define rpf_rss_key_addr_msk 0x0000001f +#define HW_ATL_RPF_RSS_KEY_ADDR_MSK 0x0000001f /* inverted bitmask for bitfield rss_key_addr[4:0] */ -#define rpf_rss_key_addr_mskn 0xffffffe0 +#define HW_ATL_RPF_RSS_KEY_ADDR_MSKN 0xffffffe0 /* lower bit position of bitfield rss_key_addr[4:0] */ -#define rpf_rss_key_addr_shift 0 +#define HW_ATL_RPF_RSS_KEY_ADDR_SHIFT 0 /* width of bitfield rss_key_addr[4:0] */ -#define rpf_rss_key_addr_width 5 +#define HW_ATL_RPF_RSS_KEY_ADDR_WIDTH 5 /* default value of bitfield rss_key_addr[4:0] */ -#define rpf_rss_key_addr_default 0x0 +#define HW_ATL_RPF_RSS_KEY_ADDR_DEFAULT 0x0 /* rx rss_key_wr_data[1f:0] bitfield definitions * preprocessor definitions for the bitfield "rss_key_wr_data[1f:0]". @@ -819,17 +825,17 @@ */ /* register address for bitfield rss_key_wr_data[1f:0] */ -#define rpf_rss_key_wr_data_adr 0x000054d4 +#define HW_ATL_RPF_RSS_KEY_WR_DATA_ADR 0x000054d4 /* bitmask for bitfield rss_key_wr_data[1f:0] */ -#define rpf_rss_key_wr_data_msk 0xffffffff +#define HW_ATL_RPF_RSS_KEY_WR_DATA_MSK 0xffffffff /* inverted bitmask for bitfield rss_key_wr_data[1f:0] */ -#define rpf_rss_key_wr_data_mskn 0x00000000 +#define HW_ATL_RPF_RSS_KEY_WR_DATA_MSKN 0x00000000 /* lower bit position of bitfield rss_key_wr_data[1f:0] */ -#define rpf_rss_key_wr_data_shift 0 +#define HW_ATL_RPF_RSS_KEY_WR_DATA_SHIFT 0 /* width of bitfield rss_key_wr_data[1f:0] */ -#define rpf_rss_key_wr_data_width 32 +#define HW_ATL_RPF_RSS_KEY_WR_DATA_WIDTH 32 /* default value of bitfield rss_key_wr_data[1f:0] */ -#define rpf_rss_key_wr_data_default 0x0 +#define HW_ATL_RPF_RSS_KEY_WR_DATA_DEFAULT 0x0 /* rx rss_key_wr_en_i bitfield definitions * preprocessor definitions for the bitfield "rss_key_wr_en_i". @@ -837,17 +843,17 @@ */ /* register address for bitfield rss_key_wr_en_i */ -#define rpf_rss_key_wr_eni_adr 0x000054d0 +#define HW_ATL_RPF_RSS_KEY_WR_ENI_ADR 0x000054d0 /* bitmask for bitfield rss_key_wr_en_i */ -#define rpf_rss_key_wr_eni_msk 0x00000020 +#define HW_ATL_RPF_RSS_KEY_WR_ENI_MSK 0x00000020 /* inverted bitmask for bitfield rss_key_wr_en_i */ -#define rpf_rss_key_wr_eni_mskn 0xffffffdf +#define HW_ATL_RPF_RSS_KEY_WR_ENI_MSKN 0xffffffdf /* lower bit position of bitfield rss_key_wr_en_i */ -#define rpf_rss_key_wr_eni_shift 5 +#define HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT 5 /* width of bitfield rss_key_wr_en_i */ -#define rpf_rss_key_wr_eni_width 1 +#define HW_ATL_RPF_RSS_KEY_WR_ENI_WIDTH 1 /* default value of bitfield rss_key_wr_en_i */ -#define rpf_rss_key_wr_eni_default 0x0 +#define HW_ATL_RPF_RSS_KEY_WR_ENI_DEFAULT 0x0 /* rx rss_redir_addr[3:0] bitfield definitions * preprocessor definitions for the bitfield "rss_redir_addr[3:0]". @@ -855,17 +861,17 @@ */ /* register address for bitfield rss_redir_addr[3:0] */ -#define rpf_rss_redir_addr_adr 0x000054e0 +#define HW_ATL_RPF_RSS_REDIR_ADDR_ADR 0x000054e0 /* bitmask for bitfield rss_redir_addr[3:0] */ -#define rpf_rss_redir_addr_msk 0x0000000f +#define HW_ATL_RPF_RSS_REDIR_ADDR_MSK 0x0000000f /* inverted bitmask for bitfield rss_redir_addr[3:0] */ -#define rpf_rss_redir_addr_mskn 0xfffffff0 +#define HW_ATL_RPF_RSS_REDIR_ADDR_MSKN 0xfffffff0 /* lower bit position of bitfield rss_redir_addr[3:0] */ -#define rpf_rss_redir_addr_shift 0 +#define HW_ATL_RPF_RSS_REDIR_ADDR_SHIFT 0 /* width of bitfield rss_redir_addr[3:0] */ -#define rpf_rss_redir_addr_width 4 +#define HW_ATL_RPF_RSS_REDIR_ADDR_WIDTH 4 /* default value of bitfield rss_redir_addr[3:0] */ -#define rpf_rss_redir_addr_default 0x0 +#define HW_ATL_RPF_RSS_REDIR_ADDR_DEFAULT 0x0 /* rx rss_redir_wr_data[f:0] bitfield definitions * preprocessor definitions for the bitfield "rss_redir_wr_data[f:0]". @@ -873,17 +879,17 @@ */ /* register address for bitfield rss_redir_wr_data[f:0] */ -#define rpf_rss_redir_wr_data_adr 0x000054e4 +#define HW_ATL_RPF_RSS_REDIR_WR_DATA_ADR 0x000054e4 /* bitmask for bitfield rss_redir_wr_data[f:0] */ -#define rpf_rss_redir_wr_data_msk 0x0000ffff +#define HW_ATL_RPF_RSS_REDIR_WR_DATA_MSK 0x0000ffff /* inverted bitmask for bitfield rss_redir_wr_data[f:0] */ -#define rpf_rss_redir_wr_data_mskn 0xffff0000 +#define HW_ATL_RPF_RSS_REDIR_WR_DATA_MSKN 0xffff0000 /* lower bit position of bitfield rss_redir_wr_data[f:0] */ -#define rpf_rss_redir_wr_data_shift 0 +#define HW_ATL_RPF_RSS_REDIR_WR_DATA_SHIFT 0 /* width of bitfield rss_redir_wr_data[f:0] */ -#define rpf_rss_redir_wr_data_width 16 +#define HW_ATL_RPF_RSS_REDIR_WR_DATA_WIDTH 16 /* default value of bitfield rss_redir_wr_data[f:0] */ -#define rpf_rss_redir_wr_data_default 0x0 +#define HW_ATL_RPF_RSS_REDIR_WR_DATA_DEFAULT 0x0 /* rx rss_redir_wr_en_i bitfield definitions * preprocessor definitions for the bitfield "rss_redir_wr_en_i". @@ -891,17 +897,17 @@ */ /* register address for bitfield rss_redir_wr_en_i */ -#define rpf_rss_redir_wr_eni_adr 0x000054e0 +#define HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR 0x000054e0 /* bitmask for bitfield rss_redir_wr_en_i */ -#define rpf_rss_redir_wr_eni_msk 0x00000010 +#define HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK 0x00000010 /* inverted bitmask for bitfield rss_redir_wr_en_i */ -#define rpf_rss_redir_wr_eni_mskn 0xffffffef +#define HW_ATL_RPF_RSS_REDIR_WR_ENI_MSKN 0xffffffef /* lower bit position of bitfield rss_redir_wr_en_i */ -#define rpf_rss_redir_wr_eni_shift 4 +#define HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT 4 /* width of bitfield rss_redir_wr_en_i */ -#define rpf_rss_redir_wr_eni_width 1 +#define HW_ATL_RPF_RSS_REDIR_WR_ENI_WIDTH 1 /* default value of bitfield rss_redir_wr_en_i */ -#define rpf_rss_redir_wr_eni_default 0x0 +#define HW_ATL_RPF_RSS_REDIR_WR_ENI_DEFAULT 0x0 /* rx tpo_rpf_sys_loopback bitfield definitions * preprocessor definitions for the bitfield "tpo_rpf_sys_loopback". @@ -909,17 +915,17 @@ */ /* register address for bitfield tpo_rpf_sys_loopback */ -#define rpf_tpo_rpf_sys_lbk_adr 0x00005000 +#define HW_ATL_RPF_TPO_RPF_SYS_LBK_ADR 0x00005000 /* bitmask for bitfield tpo_rpf_sys_loopback */ -#define rpf_tpo_rpf_sys_lbk_msk 0x00000100 +#define HW_ATL_RPF_TPO_RPF_SYS_LBK_MSK 0x00000100 /* inverted bitmask for bitfield tpo_rpf_sys_loopback */ -#define rpf_tpo_rpf_sys_lbk_mskn 0xfffffeff +#define HW_ATL_RPF_TPO_RPF_SYS_LBK_MSKN 0xfffffeff /* lower bit position of bitfield tpo_rpf_sys_loopback */ -#define rpf_tpo_rpf_sys_lbk_shift 8 +#define HW_ATL_RPF_TPO_RPF_SYS_LBK_SHIFT 8 /* width of bitfield tpo_rpf_sys_loopback */ -#define rpf_tpo_rpf_sys_lbk_width 1 +#define HW_ATL_RPF_TPO_RPF_SYS_LBK_WIDTH 1 /* default value of bitfield tpo_rpf_sys_loopback */ -#define rpf_tpo_rpf_sys_lbk_default 0x0 +#define HW_ATL_RPF_TPO_RPF_SYS_LBK_DEFAULT 0x0 /* rx vl_inner_tpid[f:0] bitfield definitions * preprocessor definitions for the bitfield "vl_inner_tpid[f:0]". @@ -927,17 +933,17 @@ */ /* register address for bitfield vl_inner_tpid[f:0] */ -#define rpf_vl_inner_tpid_adr 0x00005284 +#define HW_ATL_RPF_VL_INNER_TPID_ADR 0x00005284 /* bitmask for bitfield vl_inner_tpid[f:0] */ -#define rpf_vl_inner_tpid_msk 0x0000ffff +#define HW_ATL_RPF_VL_INNER_TPID_MSK 0x0000ffff /* inverted bitmask for bitfield vl_inner_tpid[f:0] */ -#define rpf_vl_inner_tpid_mskn 0xffff0000 +#define HW_ATL_RPF_VL_INNER_TPID_MSKN 0xffff0000 /* lower bit position of bitfield vl_inner_tpid[f:0] */ -#define rpf_vl_inner_tpid_shift 0 +#define HW_ATL_RPF_VL_INNER_TPID_SHIFT 0 /* width of bitfield vl_inner_tpid[f:0] */ -#define rpf_vl_inner_tpid_width 16 +#define HW_ATL_RPF_VL_INNER_TPID_WIDTH 16 /* default value of bitfield vl_inner_tpid[f:0] */ -#define rpf_vl_inner_tpid_default 0x8100 +#define HW_ATL_RPF_VL_INNER_TPID_DEFAULT 0x8100 /* rx vl_outer_tpid[f:0] bitfield definitions * preprocessor definitions for the bitfield "vl_outer_tpid[f:0]". @@ -945,17 +951,17 @@ */ /* register address for bitfield vl_outer_tpid[f:0] */ -#define rpf_vl_outer_tpid_adr 0x00005284 +#define HW_ATL_RPF_VL_OUTER_TPID_ADR 0x00005284 /* bitmask for bitfield vl_outer_tpid[f:0] */ -#define rpf_vl_outer_tpid_msk 0xffff0000 +#define HW_ATL_RPF_VL_OUTER_TPID_MSK 0xffff0000 /* inverted bitmask for bitfield vl_outer_tpid[f:0] */ -#define rpf_vl_outer_tpid_mskn 0x0000ffff +#define HW_ATL_RPF_VL_OUTER_TPID_MSKN 0x0000ffff /* lower bit position of bitfield vl_outer_tpid[f:0] */ -#define rpf_vl_outer_tpid_shift 16 +#define HW_ATL_RPF_VL_OUTER_TPID_SHIFT 16 /* width of bitfield vl_outer_tpid[f:0] */ -#define rpf_vl_outer_tpid_width 16 +#define HW_ATL_RPF_VL_OUTER_TPID_WIDTH 16 /* default value of bitfield vl_outer_tpid[f:0] */ -#define rpf_vl_outer_tpid_default 0x88a8 +#define HW_ATL_RPF_VL_OUTER_TPID_DEFAULT 0x88a8 /* rx vl_promis_mode bitfield definitions * preprocessor definitions for the bitfield "vl_promis_mode". @@ -963,17 +969,17 @@ */ /* register address for bitfield vl_promis_mode */ -#define rpf_vl_promis_mode_adr 0x00005280 +#define HW_ATL_RPF_VL_PROMIS_MODE_ADR 0x00005280 /* bitmask for bitfield vl_promis_mode */ -#define rpf_vl_promis_mode_msk 0x00000002 +#define HW_ATL_RPF_VL_PROMIS_MODE_MSK 0x00000002 /* inverted bitmask for bitfield vl_promis_mode */ -#define rpf_vl_promis_mode_mskn 0xfffffffd +#define HW_ATL_RPF_VL_PROMIS_MODE_MSKN 0xfffffffd /* lower bit position of bitfield vl_promis_mode */ -#define rpf_vl_promis_mode_shift 1 +#define HW_ATL_RPF_VL_PROMIS_MODE_SHIFT 1 /* width of bitfield vl_promis_mode */ -#define rpf_vl_promis_mode_width 1 +#define HW_ATL_RPF_VL_PROMIS_MODE_WIDTH 1 /* default value of bitfield vl_promis_mode */ -#define rpf_vl_promis_mode_default 0x0 +#define HW_ATL_RPF_VL_PROMIS_MODE_DEFAULT 0x0 /* RX vl_accept_untagged_mode Bitfield Definitions * Preprocessor definitions for the bitfield "vl_accept_untagged_mode". @@ -981,17 +987,17 @@ */ /* Register address for bitfield vl_accept_untagged_mode */ -#define rpf_vl_accept_untagged_mode_adr 0x00005280 +#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_ADR 0x00005280 /* Bitmask for bitfield vl_accept_untagged_mode */ -#define rpf_vl_accept_untagged_mode_msk 0x00000004 +#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSK 0x00000004 /* Inverted bitmask for bitfield vl_accept_untagged_mode */ -#define rpf_vl_accept_untagged_mode_mskn 0xFFFFFFFB +#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSKN 0xFFFFFFFB /* Lower bit position of bitfield vl_accept_untagged_mode */ -#define rpf_vl_accept_untagged_mode_shift 2 +#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_SHIFT 2 /* Width of bitfield vl_accept_untagged_mode */ -#define rpf_vl_accept_untagged_mode_width 1 +#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_WIDTH 1 /* Default value of bitfield vl_accept_untagged_mode */ -#define rpf_vl_accept_untagged_mode_default 0x0 +#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_DEFAULT 0x0 /* rX vl_untagged_act[2:0] Bitfield Definitions * Preprocessor definitions for the bitfield "vl_untagged_act[2:0]". @@ -999,17 +1005,17 @@ */ /* Register address for bitfield vl_untagged_act[2:0] */ -#define rpf_vl_untagged_act_adr 0x00005280 +#define HW_ATL_RPF_VL_UNTAGGED_ACT_ADR 0x00005280 /* Bitmask for bitfield vl_untagged_act[2:0] */ -#define rpf_vl_untagged_act_msk 0x00000038 +#define HW_ATL_RPF_VL_UNTAGGED_ACT_MSK 0x00000038 /* Inverted bitmask for bitfield vl_untagged_act[2:0] */ -#define rpf_vl_untagged_act_mskn 0xFFFFFFC7 +#define HW_ATL_RPF_VL_UNTAGGED_ACT_MSKN 0xFFFFFFC7 /* Lower bit position of bitfield vl_untagged_act[2:0] */ -#define rpf_vl_untagged_act_shift 3 +#define HW_ATL_RPF_VL_UNTAGGED_ACT_SHIFT 3 /* Width of bitfield vl_untagged_act[2:0] */ -#define rpf_vl_untagged_act_width 3 +#define HW_ATL_RPF_VL_UNTAGGED_ACT_WIDTH 3 /* Default value of bitfield vl_untagged_act[2:0] */ -#define rpf_vl_untagged_act_default 0x0 +#define HW_ATL_RPF_VL_UNTAGGED_ACT_DEFAULT 0x0 /* RX vl_en{F} Bitfield Definitions * Preprocessor definitions for the bitfield "vl_en{F}". @@ -1018,17 +1024,17 @@ */ /* Register address for bitfield vl_en{F} */ -#define rpf_vl_en_f_adr(filter) (0x00005290 + (filter) * 0x4) +#define HW_ATL_RPF_VL_EN_F_ADR(filter) (0x00005290 + (filter) * 0x4) /* Bitmask for bitfield vl_en{F} */ -#define rpf_vl_en_f_msk 0x80000000 +#define HW_ATL_RPF_VL_EN_F_MSK 0x80000000 /* Inverted bitmask for bitfield vl_en{F} */ -#define rpf_vl_en_f_mskn 0x7FFFFFFF +#define HW_ATL_RPF_VL_EN_F_MSKN 0x7FFFFFFF /* Lower bit position of bitfield vl_en{F} */ -#define rpf_vl_en_f_shift 31 +#define HW_ATL_RPF_VL_EN_F_SHIFT 31 /* Width of bitfield vl_en{F} */ -#define rpf_vl_en_f_width 1 +#define HW_ATL_RPF_VL_EN_F_WIDTH 1 /* Default value of bitfield vl_en{F} */ -#define rpf_vl_en_f_default 0x0 +#define HW_ATL_RPF_VL_EN_F_DEFAULT 0x0 /* RX vl_act{F}[2:0] Bitfield Definitions * Preprocessor definitions for the bitfield "vl_act{F}[2:0]". @@ -1037,17 +1043,17 @@ */ /* Register address for bitfield vl_act{F}[2:0] */ -#define rpf_vl_act_f_adr(filter) (0x00005290 + (filter) * 0x4) +#define HW_ATL_RPF_VL_ACT_F_ADR(filter) (0x00005290 + (filter) * 0x4) /* Bitmask for bitfield vl_act{F}[2:0] */ -#define rpf_vl_act_f_msk 0x00070000 +#define HW_ATL_RPF_VL_ACT_F_MSK 0x00070000 /* Inverted bitmask for bitfield vl_act{F}[2:0] */ -#define rpf_vl_act_f_mskn 0xFFF8FFFF +#define HW_ATL_RPF_VL_ACT_F_MSKN 0xFFF8FFFF /* Lower bit position of bitfield vl_act{F}[2:0] */ -#define rpf_vl_act_f_shift 16 +#define HW_ATL_RPF_VL_ACT_F_SHIFT 16 /* Width of bitfield vl_act{F}[2:0] */ -#define rpf_vl_act_f_width 3 +#define HW_ATL_RPF_VL_ACT_F_WIDTH 3 /* Default value of bitfield vl_act{F}[2:0] */ -#define rpf_vl_act_f_default 0x0 +#define HW_ATL_RPF_VL_ACT_F_DEFAULT 0x0 /* RX vl_id{F}[B:0] Bitfield Definitions * Preprocessor definitions for the bitfield "vl_id{F}[B:0]". @@ -1056,17 +1062,17 @@ */ /* Register address for bitfield vl_id{F}[B:0] */ -#define rpf_vl_id_f_adr(filter) (0x00005290 + (filter) * 0x4) +#define HW_ATL_RPF_VL_ID_F_ADR(filter) (0x00005290 + (filter) * 0x4) /* Bitmask for bitfield vl_id{F}[B:0] */ -#define rpf_vl_id_f_msk 0x00000FFF +#define HW_ATL_RPF_VL_ID_F_MSK 0x00000FFF /* Inverted bitmask for bitfield vl_id{F}[B:0] */ -#define rpf_vl_id_f_mskn 0xFFFFF000 +#define HW_ATL_RPF_VL_ID_F_MSKN 0xFFFFF000 /* Lower bit position of bitfield vl_id{F}[B:0] */ -#define rpf_vl_id_f_shift 0 +#define HW_ATL_RPF_VL_ID_F_SHIFT 0 /* Width of bitfield vl_id{F}[B:0] */ -#define rpf_vl_id_f_width 12 +#define HW_ATL_RPF_VL_ID_F_WIDTH 12 /* Default value of bitfield vl_id{F}[B:0] */ -#define rpf_vl_id_f_default 0x0 +#define HW_ATL_RPF_VL_ID_F_DEFAULT 0x0 /* RX et_en{F} Bitfield Definitions * Preprocessor definitions for the bitfield "et_en{F}". @@ -1075,17 +1081,17 @@ */ /* Register address for bitfield et_en{F} */ -#define rpf_et_en_f_adr(filter) (0x00005300 + (filter) * 0x4) +#define HW_ATL_RPF_ET_EN_F_ADR(filter) (0x00005300 + (filter) * 0x4) /* Bitmask for bitfield et_en{F} */ -#define rpf_et_en_f_msk 0x80000000 +#define HW_ATL_RPF_ET_EN_F_MSK 0x80000000 /* Inverted bitmask for bitfield et_en{F} */ -#define rpf_et_en_f_mskn 0x7FFFFFFF +#define HW_ATL_RPF_ET_EN_F_MSKN 0x7FFFFFFF /* Lower bit position of bitfield et_en{F} */ -#define rpf_et_en_f_shift 31 +#define HW_ATL_RPF_ET_EN_F_SHIFT 31 /* Width of bitfield et_en{F} */ -#define rpf_et_en_f_width 1 +#define HW_ATL_RPF_ET_EN_F_WIDTH 1 /* Default value of bitfield et_en{F} */ -#define rpf_et_en_f_default 0x0 +#define HW_ATL_RPF_ET_EN_F_DEFAULT 0x0 /* rx et_en{f} bitfield definitions * preprocessor definitions for the bitfield "et_en{f}". @@ -1094,17 +1100,17 @@ */ /* register address for bitfield et_en{f} */ -#define rpf_et_enf_adr(filter) (0x00005300 + (filter) * 0x4) +#define HW_ATL_RPF_ET_ENF_ADR(filter) (0x00005300 + (filter) * 0x4) /* bitmask for bitfield et_en{f} */ -#define rpf_et_enf_msk 0x80000000 +#define HW_ATL_RPF_ET_ENF_MSK 0x80000000 /* inverted bitmask for bitfield et_en{f} */ -#define rpf_et_enf_mskn 0x7fffffff +#define HW_ATL_RPF_ET_ENF_MSKN 0x7fffffff /* lower bit position of bitfield et_en{f} */ -#define rpf_et_enf_shift 31 +#define HW_ATL_RPF_ET_ENF_SHIFT 31 /* width of bitfield et_en{f} */ -#define rpf_et_enf_width 1 +#define HW_ATL_RPF_ET_ENF_WIDTH 1 /* default value of bitfield et_en{f} */ -#define rpf_et_enf_default 0x0 +#define HW_ATL_RPF_ET_ENF_DEFAULT 0x0 /* rx et_up{f}_en bitfield definitions * preprocessor definitions for the bitfield "et_up{f}_en". @@ -1113,17 +1119,17 @@ */ /* register address for bitfield et_up{f}_en */ -#define rpf_et_upfen_adr(filter) (0x00005300 + (filter) * 0x4) +#define HW_ATL_RPF_ET_UPFEN_ADR(filter) (0x00005300 + (filter) * 0x4) /* bitmask for bitfield et_up{f}_en */ -#define rpf_et_upfen_msk 0x40000000 +#define HW_ATL_RPF_ET_UPFEN_MSK 0x40000000 /* inverted bitmask for bitfield et_up{f}_en */ -#define rpf_et_upfen_mskn 0xbfffffff +#define HW_ATL_RPF_ET_UPFEN_MSKN 0xbfffffff /* lower bit position of bitfield et_up{f}_en */ -#define rpf_et_upfen_shift 30 +#define HW_ATL_RPF_ET_UPFEN_SHIFT 30 /* width of bitfield et_up{f}_en */ -#define rpf_et_upfen_width 1 +#define HW_ATL_RPF_ET_UPFEN_WIDTH 1 /* default value of bitfield et_up{f}_en */ -#define rpf_et_upfen_default 0x0 +#define HW_ATL_RPF_ET_UPFEN_DEFAULT 0x0 /* rx et_rxq{f}_en bitfield definitions * preprocessor definitions for the bitfield "et_rxq{f}_en". @@ -1132,17 +1138,17 @@ */ /* register address for bitfield et_rxq{f}_en */ -#define rpf_et_rxqfen_adr(filter) (0x00005300 + (filter) * 0x4) +#define HW_ATL_RPF_ET_RXQFEN_ADR(filter) (0x00005300 + (filter) * 0x4) /* bitmask for bitfield et_rxq{f}_en */ -#define rpf_et_rxqfen_msk 0x20000000 +#define HW_ATL_RPF_ET_RXQFEN_MSK 0x20000000 /* inverted bitmask for bitfield et_rxq{f}_en */ -#define rpf_et_rxqfen_mskn 0xdfffffff +#define HW_ATL_RPF_ET_RXQFEN_MSKN 0xdfffffff /* lower bit position of bitfield et_rxq{f}_en */ -#define rpf_et_rxqfen_shift 29 +#define HW_ATL_RPF_ET_RXQFEN_SHIFT 29 /* width of bitfield et_rxq{f}_en */ -#define rpf_et_rxqfen_width 1 +#define HW_ATL_RPF_ET_RXQFEN_WIDTH 1 /* default value of bitfield et_rxq{f}_en */ -#define rpf_et_rxqfen_default 0x0 +#define HW_ATL_RPF_ET_RXQFEN_DEFAULT 0x0 /* rx et_up{f}[2:0] bitfield definitions * preprocessor definitions for the bitfield "et_up{f}[2:0]". @@ -1151,17 +1157,17 @@ */ /* register address for bitfield et_up{f}[2:0] */ -#define rpf_et_upf_adr(filter) (0x00005300 + (filter) * 0x4) +#define HW_ATL_RPF_ET_UPF_ADR(filter) (0x00005300 + (filter) * 0x4) /* bitmask for bitfield et_up{f}[2:0] */ -#define rpf_et_upf_msk 0x1c000000 +#define HW_ATL_RPF_ET_UPF_MSK 0x1c000000 /* inverted bitmask for bitfield et_up{f}[2:0] */ -#define rpf_et_upf_mskn 0xe3ffffff +#define HW_ATL_RPF_ET_UPF_MSKN 0xe3ffffff /* lower bit position of bitfield et_up{f}[2:0] */ -#define rpf_et_upf_shift 26 +#define HW_ATL_RPF_ET_UPF_SHIFT 26 /* width of bitfield et_up{f}[2:0] */ -#define rpf_et_upf_width 3 +#define HW_ATL_RPF_ET_UPF_WIDTH 3 /* default value of bitfield et_up{f}[2:0] */ -#define rpf_et_upf_default 0x0 +#define HW_ATL_RPF_ET_UPF_DEFAULT 0x0 /* rx et_rxq{f}[4:0] bitfield definitions * preprocessor definitions for the bitfield "et_rxq{f}[4:0]". @@ -1170,17 +1176,17 @@ */ /* register address for bitfield et_rxq{f}[4:0] */ -#define rpf_et_rxqf_adr(filter) (0x00005300 + (filter) * 0x4) +#define HW_ATL_RPF_ET_RXQF_ADR(filter) (0x00005300 + (filter) * 0x4) /* bitmask for bitfield et_rxq{f}[4:0] */ -#define rpf_et_rxqf_msk 0x01f00000 +#define HW_ATL_RPF_ET_RXQF_MSK 0x01f00000 /* inverted bitmask for bitfield et_rxq{f}[4:0] */ -#define rpf_et_rxqf_mskn 0xfe0fffff +#define HW_ATL_RPF_ET_RXQF_MSKN 0xfe0fffff /* lower bit position of bitfield et_rxq{f}[4:0] */ -#define rpf_et_rxqf_shift 20 +#define HW_ATL_RPF_ET_RXQF_SHIFT 20 /* width of bitfield et_rxq{f}[4:0] */ -#define rpf_et_rxqf_width 5 +#define HW_ATL_RPF_ET_RXQF_WIDTH 5 /* default value of bitfield et_rxq{f}[4:0] */ -#define rpf_et_rxqf_default 0x0 +#define HW_ATL_RPF_ET_RXQF_DEFAULT 0x0 /* rx et_mng_rxq{f} bitfield definitions * preprocessor definitions for the bitfield "et_mng_rxq{f}". @@ -1189,17 +1195,17 @@ */ /* register address for bitfield et_mng_rxq{f} */ -#define rpf_et_mng_rxqf_adr(filter) (0x00005300 + (filter) * 0x4) +#define HW_ATL_RPF_ET_MNG_RXQF_ADR(filter) (0x00005300 + (filter) * 0x4) /* bitmask for bitfield et_mng_rxq{f} */ -#define rpf_et_mng_rxqf_msk 0x00080000 +#define HW_ATL_RPF_ET_MNG_RXQF_MSK 0x00080000 /* inverted bitmask for bitfield et_mng_rxq{f} */ -#define rpf_et_mng_rxqf_mskn 0xfff7ffff +#define HW_ATL_RPF_ET_MNG_RXQF_MSKN 0xfff7ffff /* lower bit position of bitfield et_mng_rxq{f} */ -#define rpf_et_mng_rxqf_shift 19 +#define HW_ATL_RPF_ET_MNG_RXQF_SHIFT 19 /* width of bitfield et_mng_rxq{f} */ -#define rpf_et_mng_rxqf_width 1 +#define HW_ATL_RPF_ET_MNG_RXQF_WIDTH 1 /* default value of bitfield et_mng_rxq{f} */ -#define rpf_et_mng_rxqf_default 0x0 +#define HW_ATL_RPF_ET_MNG_RXQF_DEFAULT 0x0 /* rx et_act{f}[2:0] bitfield definitions * preprocessor definitions for the bitfield "et_act{f}[2:0]". @@ -1208,17 +1214,17 @@ */ /* register address for bitfield et_act{f}[2:0] */ -#define rpf_et_actf_adr(filter) (0x00005300 + (filter) * 0x4) +#define HW_ATL_RPF_ET_ACTF_ADR(filter) (0x00005300 + (filter) * 0x4) /* bitmask for bitfield et_act{f}[2:0] */ -#define rpf_et_actf_msk 0x00070000 +#define HW_ATL_RPF_ET_ACTF_MSK 0x00070000 /* inverted bitmask for bitfield et_act{f}[2:0] */ -#define rpf_et_actf_mskn 0xfff8ffff +#define HW_ATL_RPF_ET_ACTF_MSKN 0xfff8ffff /* lower bit position of bitfield et_act{f}[2:0] */ -#define rpf_et_actf_shift 16 +#define HW_ATL_RPF_ET_ACTF_SHIFT 16 /* width of bitfield et_act{f}[2:0] */ -#define rpf_et_actf_width 3 +#define HW_ATL_RPF_ET_ACTF_WIDTH 3 /* default value of bitfield et_act{f}[2:0] */ -#define rpf_et_actf_default 0x0 +#define HW_ATL_RPF_ET_ACTF_DEFAULT 0x0 /* rx et_val{f}[f:0] bitfield definitions * preprocessor definitions for the bitfield "et_val{f}[f:0]". @@ -1227,17 +1233,17 @@ */ /* register address for bitfield et_val{f}[f:0] */ -#define rpf_et_valf_adr(filter) (0x00005300 + (filter) * 0x4) +#define HW_ATL_RPF_ET_VALF_ADR(filter) (0x00005300 + (filter) * 0x4) /* bitmask for bitfield et_val{f}[f:0] */ -#define rpf_et_valf_msk 0x0000ffff +#define HW_ATL_RPF_ET_VALF_MSK 0x0000ffff /* inverted bitmask for bitfield et_val{f}[f:0] */ -#define rpf_et_valf_mskn 0xffff0000 +#define HW_ATL_RPF_ET_VALF_MSKN 0xffff0000 /* lower bit position of bitfield et_val{f}[f:0] */ -#define rpf_et_valf_shift 0 +#define HW_ATL_RPF_ET_VALF_SHIFT 0 /* width of bitfield et_val{f}[f:0] */ -#define rpf_et_valf_width 16 +#define HW_ATL_RPF_ET_VALF_WIDTH 16 /* default value of bitfield et_val{f}[f:0] */ -#define rpf_et_valf_default 0x0 +#define HW_ATL_RPF_ET_VALF_DEFAULT 0x0 /* rx ipv4_chk_en bitfield definitions * preprocessor definitions for the bitfield "ipv4_chk_en". @@ -1245,17 +1251,17 @@ */ /* register address for bitfield ipv4_chk_en */ -#define rpo_ipv4chk_en_adr 0x00005580 +#define HW_ATL_RPO_IPV4CHK_EN_ADR 0x00005580 /* bitmask for bitfield ipv4_chk_en */ -#define rpo_ipv4chk_en_msk 0x00000002 +#define HW_ATL_RPO_IPV4CHK_EN_MSK 0x00000002 /* inverted bitmask for bitfield ipv4_chk_en */ -#define rpo_ipv4chk_en_mskn 0xfffffffd +#define HW_ATL_RPO_IPV4CHK_EN_MSKN 0xfffffffd /* lower bit position of bitfield ipv4_chk_en */ -#define rpo_ipv4chk_en_shift 1 +#define HW_ATL_RPO_IPV4CHK_EN_SHIFT 1 /* width of bitfield ipv4_chk_en */ -#define rpo_ipv4chk_en_width 1 +#define HW_ATL_RPO_IPV4CHK_EN_WIDTH 1 /* default value of bitfield ipv4_chk_en */ -#define rpo_ipv4chk_en_default 0x0 +#define HW_ATL_RPO_IPV4CHK_EN_DEFAULT 0x0 /* rx desc{d}_vl_strip bitfield definitions * preprocessor definitions for the bitfield "desc{d}_vl_strip". @@ -1264,17 +1270,18 @@ */ /* register address for bitfield desc{d}_vl_strip */ -#define rpo_descdvl_strip_adr(descriptor) (0x00005b08 + (descriptor) * 0x20) +#define HW_ATL_RPO_DESCDVL_STRIP_ADR(descriptor) \ + (0x00005b08 + (descriptor) * 0x20) /* bitmask for bitfield desc{d}_vl_strip */ -#define rpo_descdvl_strip_msk 0x20000000 +#define HW_ATL_RPO_DESCDVL_STRIP_MSK 0x20000000 /* inverted bitmask for bitfield desc{d}_vl_strip */ -#define rpo_descdvl_strip_mskn 0xdfffffff +#define HW_ATL_RPO_DESCDVL_STRIP_MSKN 0xdfffffff /* lower bit position of bitfield desc{d}_vl_strip */ -#define rpo_descdvl_strip_shift 29 +#define HW_ATL_RPO_DESCDVL_STRIP_SHIFT 29 /* width of bitfield desc{d}_vl_strip */ -#define rpo_descdvl_strip_width 1 +#define HW_ATL_RPO_DESCDVL_STRIP_WIDTH 1 /* default value of bitfield desc{d}_vl_strip */ -#define rpo_descdvl_strip_default 0x0 +#define HW_ATL_RPO_DESCDVL_STRIP_DEFAULT 0x0 /* rx l4_chk_en bitfield definitions * preprocessor definitions for the bitfield "l4_chk_en". @@ -1282,17 +1289,17 @@ */ /* register address for bitfield l4_chk_en */ -#define rpol4chk_en_adr 0x00005580 +#define HW_ATL_RPOL4CHK_EN_ADR 0x00005580 /* bitmask for bitfield l4_chk_en */ -#define rpol4chk_en_msk 0x00000001 +#define HW_ATL_RPOL4CHK_EN_MSK 0x00000001 /* inverted bitmask for bitfield l4_chk_en */ -#define rpol4chk_en_mskn 0xfffffffe +#define HW_ATL_RPOL4CHK_EN_MSKN 0xfffffffe /* lower bit position of bitfield l4_chk_en */ -#define rpol4chk_en_shift 0 +#define HW_ATL_RPOL4CHK_EN_SHIFT 0 /* width of bitfield l4_chk_en */ -#define rpol4chk_en_width 1 +#define HW_ATL_RPOL4CHK_EN_WIDTH 1 /* default value of bitfield l4_chk_en */ -#define rpol4chk_en_default 0x0 +#define HW_ATL_RPOL4CHK_EN_DEFAULT 0x0 /* rx reg_res_dsbl bitfield definitions * preprocessor definitions for the bitfield "reg_res_dsbl". @@ -1300,17 +1307,17 @@ */ /* register address for bitfield reg_res_dsbl */ -#define rx_reg_res_dsbl_adr 0x00005000 +#define HW_ATL_RX_REG_RES_DSBL_ADR 0x00005000 /* bitmask for bitfield reg_res_dsbl */ -#define rx_reg_res_dsbl_msk 0x20000000 +#define HW_ATL_RX_REG_RES_DSBL_MSK 0x20000000 /* inverted bitmask for bitfield reg_res_dsbl */ -#define rx_reg_res_dsbl_mskn 0xdfffffff +#define HW_ATL_RX_REG_RES_DSBL_MSKN 0xdfffffff /* lower bit position of bitfield reg_res_dsbl */ -#define rx_reg_res_dsbl_shift 29 +#define HW_ATL_RX_REG_RES_DSBL_SHIFT 29 /* width of bitfield reg_res_dsbl */ -#define rx_reg_res_dsbl_width 1 +#define HW_ATL_RX_REG_RES_DSBL_WIDTH 1 /* default value of bitfield reg_res_dsbl */ -#define rx_reg_res_dsbl_default 0x1 +#define HW_ATL_RX_REG_RES_DSBL_DEFAULT 0x1 /* tx dca{d}_cpuid[7:0] bitfield definitions * preprocessor definitions for the bitfield "dca{d}_cpuid[7:0]". @@ -1319,17 +1326,17 @@ */ /* register address for bitfield dca{d}_cpuid[7:0] */ -#define tdm_dcadcpuid_adr(dca) (0x00008400 + (dca) * 0x4) +#define HW_ATL_TDM_DCADCPUID_ADR(dca) (0x00008400 + (dca) * 0x4) /* bitmask for bitfield dca{d}_cpuid[7:0] */ -#define tdm_dcadcpuid_msk 0x000000ff +#define HW_ATL_TDM_DCADCPUID_MSK 0x000000ff /* inverted bitmask for bitfield dca{d}_cpuid[7:0] */ -#define tdm_dcadcpuid_mskn 0xffffff00 +#define HW_ATL_TDM_DCADCPUID_MSKN 0xffffff00 /* lower bit position of bitfield dca{d}_cpuid[7:0] */ -#define tdm_dcadcpuid_shift 0 +#define HW_ATL_TDM_DCADCPUID_SHIFT 0 /* width of bitfield dca{d}_cpuid[7:0] */ -#define tdm_dcadcpuid_width 8 +#define HW_ATL_TDM_DCADCPUID_WIDTH 8 /* default value of bitfield dca{d}_cpuid[7:0] */ -#define tdm_dcadcpuid_default 0x0 +#define HW_ATL_TDM_DCADCPUID_DEFAULT 0x0 /* tx lso_en[1f:0] bitfield definitions * preprocessor definitions for the bitfield "lso_en[1f:0]". @@ -1337,17 +1344,17 @@ */ /* register address for bitfield lso_en[1f:0] */ -#define tdm_lso_en_adr 0x00007810 +#define HW_ATL_TDM_LSO_EN_ADR 0x00007810 /* bitmask for bitfield lso_en[1f:0] */ -#define tdm_lso_en_msk 0xffffffff +#define HW_ATL_TDM_LSO_EN_MSK 0xffffffff /* inverted bitmask for bitfield lso_en[1f:0] */ -#define tdm_lso_en_mskn 0x00000000 +#define HW_ATL_TDM_LSO_EN_MSKN 0x00000000 /* lower bit position of bitfield lso_en[1f:0] */ -#define tdm_lso_en_shift 0 +#define HW_ATL_TDM_LSO_EN_SHIFT 0 /* width of bitfield lso_en[1f:0] */ -#define tdm_lso_en_width 32 +#define HW_ATL_TDM_LSO_EN_WIDTH 32 /* default value of bitfield lso_en[1f:0] */ -#define tdm_lso_en_default 0x0 +#define HW_ATL_TDM_LSO_EN_DEFAULT 0x0 /* tx dca_en bitfield definitions * preprocessor definitions for the bitfield "dca_en". @@ -1355,17 +1362,17 @@ */ /* register address for bitfield dca_en */ -#define tdm_dca_en_adr 0x00008480 +#define HW_ATL_TDM_DCA_EN_ADR 0x00008480 /* bitmask for bitfield dca_en */ -#define tdm_dca_en_msk 0x80000000 +#define HW_ATL_TDM_DCA_EN_MSK 0x80000000 /* inverted bitmask for bitfield dca_en */ -#define tdm_dca_en_mskn 0x7fffffff +#define HW_ATL_TDM_DCA_EN_MSKN 0x7fffffff /* lower bit position of bitfield dca_en */ -#define tdm_dca_en_shift 31 +#define HW_ATL_TDM_DCA_EN_SHIFT 31 /* width of bitfield dca_en */ -#define tdm_dca_en_width 1 +#define HW_ATL_TDM_DCA_EN_WIDTH 1 /* default value of bitfield dca_en */ -#define tdm_dca_en_default 0x1 +#define HW_ATL_TDM_DCA_EN_DEFAULT 0x1 /* tx dca_mode[3:0] bitfield definitions * preprocessor definitions for the bitfield "dca_mode[3:0]". @@ -1373,17 +1380,17 @@ */ /* register address for bitfield dca_mode[3:0] */ -#define tdm_dca_mode_adr 0x00008480 +#define HW_ATL_TDM_DCA_MODE_ADR 0x00008480 /* bitmask for bitfield dca_mode[3:0] */ -#define tdm_dca_mode_msk 0x0000000f +#define HW_ATL_TDM_DCA_MODE_MSK 0x0000000f /* inverted bitmask for bitfield dca_mode[3:0] */ -#define tdm_dca_mode_mskn 0xfffffff0 +#define HW_ATL_TDM_DCA_MODE_MSKN 0xfffffff0 /* lower bit position of bitfield dca_mode[3:0] */ -#define tdm_dca_mode_shift 0 +#define HW_ATL_TDM_DCA_MODE_SHIFT 0 /* width of bitfield dca_mode[3:0] */ -#define tdm_dca_mode_width 4 +#define HW_ATL_TDM_DCA_MODE_WIDTH 4 /* default value of bitfield dca_mode[3:0] */ -#define tdm_dca_mode_default 0x0 +#define HW_ATL_TDM_DCA_MODE_DEFAULT 0x0 /* tx dca{d}_desc_en bitfield definitions * preprocessor definitions for the bitfield "dca{d}_desc_en". @@ -1392,17 +1399,17 @@ */ /* register address for bitfield dca{d}_desc_en */ -#define tdm_dcaddesc_en_adr(dca) (0x00008400 + (dca) * 0x4) +#define HW_ATL_TDM_DCADDESC_EN_ADR(dca) (0x00008400 + (dca) * 0x4) /* bitmask for bitfield dca{d}_desc_en */ -#define tdm_dcaddesc_en_msk 0x80000000 +#define HW_ATL_TDM_DCADDESC_EN_MSK 0x80000000 /* inverted bitmask for bitfield dca{d}_desc_en */ -#define tdm_dcaddesc_en_mskn 0x7fffffff +#define HW_ATL_TDM_DCADDESC_EN_MSKN 0x7fffffff /* lower bit position of bitfield dca{d}_desc_en */ -#define tdm_dcaddesc_en_shift 31 +#define HW_ATL_TDM_DCADDESC_EN_SHIFT 31 /* width of bitfield dca{d}_desc_en */ -#define tdm_dcaddesc_en_width 1 +#define HW_ATL_TDM_DCADDESC_EN_WIDTH 1 /* default value of bitfield dca{d}_desc_en */ -#define tdm_dcaddesc_en_default 0x0 +#define HW_ATL_TDM_DCADDESC_EN_DEFAULT 0x0 /* tx desc{d}_en bitfield definitions * preprocessor definitions for the bitfield "desc{d}_en". @@ -1411,17 +1418,17 @@ */ /* register address for bitfield desc{d}_en */ -#define tdm_descden_adr(descriptor) (0x00007c08 + (descriptor) * 0x40) +#define HW_ATL_TDM_DESCDEN_ADR(descriptor) (0x00007c08 + (descriptor) * 0x40) /* bitmask for bitfield desc{d}_en */ -#define tdm_descden_msk 0x80000000 +#define HW_ATL_TDM_DESCDEN_MSK 0x80000000 /* inverted bitmask for bitfield desc{d}_en */ -#define tdm_descden_mskn 0x7fffffff +#define HW_ATL_TDM_DESCDEN_MSKN 0x7fffffff /* lower bit position of bitfield desc{d}_en */ -#define tdm_descden_shift 31 +#define HW_ATL_TDM_DESCDEN_SHIFT 31 /* width of bitfield desc{d}_en */ -#define tdm_descden_width 1 +#define HW_ATL_TDM_DESCDEN_WIDTH 1 /* default value of bitfield desc{d}_en */ -#define tdm_descden_default 0x0 +#define HW_ATL_TDM_DESCDEN_DEFAULT 0x0 /* tx desc{d}_hd[c:0] bitfield definitions * preprocessor definitions for the bitfield "desc{d}_hd[c:0]". @@ -1430,15 +1437,15 @@ */ /* register address for bitfield desc{d}_hd[c:0] */ -#define tdm_descdhd_adr(descriptor) (0x00007c0c + (descriptor) * 0x40) +#define HW_ATL_TDM_DESCDHD_ADR(descriptor) (0x00007c0c + (descriptor) * 0x40) /* bitmask for bitfield desc{d}_hd[c:0] */ -#define tdm_descdhd_msk 0x00001fff +#define HW_ATL_TDM_DESCDHD_MSK 0x00001fff /* inverted bitmask for bitfield desc{d}_hd[c:0] */ -#define tdm_descdhd_mskn 0xffffe000 +#define HW_ATL_TDM_DESCDHD_MSKN 0xffffe000 /* lower bit position of bitfield desc{d}_hd[c:0] */ -#define tdm_descdhd_shift 0 +#define HW_ATL_TDM_DESCDHD_SHIFT 0 /* width of bitfield desc{d}_hd[c:0] */ -#define tdm_descdhd_width 13 +#define HW_ATL_TDM_DESCDHD_WIDTH 13 /* tx desc{d}_len[9:0] bitfield definitions * preprocessor definitions for the bitfield "desc{d}_len[9:0]". @@ -1447,17 +1454,17 @@ */ /* register address for bitfield desc{d}_len[9:0] */ -#define tdm_descdlen_adr(descriptor) (0x00007c08 + (descriptor) * 0x40) +#define HW_ATL_TDM_DESCDLEN_ADR(descriptor) (0x00007c08 + (descriptor) * 0x40) /* bitmask for bitfield desc{d}_len[9:0] */ -#define tdm_descdlen_msk 0x00001ff8 +#define HW_ATL_TDM_DESCDLEN_MSK 0x00001ff8 /* inverted bitmask for bitfield desc{d}_len[9:0] */ -#define tdm_descdlen_mskn 0xffffe007 +#define HW_ATL_TDM_DESCDLEN_MSKN 0xffffe007 /* lower bit position of bitfield desc{d}_len[9:0] */ -#define tdm_descdlen_shift 3 +#define HW_ATL_TDM_DESCDLEN_SHIFT 3 /* width of bitfield desc{d}_len[9:0] */ -#define tdm_descdlen_width 10 +#define HW_ATL_TDM_DESCDLEN_WIDTH 10 /* default value of bitfield desc{d}_len[9:0] */ -#define tdm_descdlen_default 0x0 +#define HW_ATL_TDM_DESCDLEN_DEFAULT 0x0 /* tx int_desc_wrb_en bitfield definitions * preprocessor definitions for the bitfield "int_desc_wrb_en". @@ -1465,17 +1472,17 @@ */ /* register address for bitfield int_desc_wrb_en */ -#define tdm_int_desc_wrb_en_adr 0x00007b40 +#define HW_ATL_TDM_INT_DESC_WRB_EN_ADR 0x00007b40 /* bitmask for bitfield int_desc_wrb_en */ -#define tdm_int_desc_wrb_en_msk 0x00000002 +#define HW_ATL_TDM_INT_DESC_WRB_EN_MSK 0x00000002 /* inverted bitmask for bitfield int_desc_wrb_en */ -#define tdm_int_desc_wrb_en_mskn 0xfffffffd +#define HW_ATL_TDM_INT_DESC_WRB_EN_MSKN 0xfffffffd /* lower bit position of bitfield int_desc_wrb_en */ -#define tdm_int_desc_wrb_en_shift 1 +#define HW_ATL_TDM_INT_DESC_WRB_EN_SHIFT 1 /* width of bitfield int_desc_wrb_en */ -#define tdm_int_desc_wrb_en_width 1 +#define HW_ATL_TDM_INT_DESC_WRB_EN_WIDTH 1 /* default value of bitfield int_desc_wrb_en */ -#define tdm_int_desc_wrb_en_default 0x0 +#define HW_ATL_TDM_INT_DESC_WRB_EN_DEFAULT 0x0 /* tx desc{d}_wrb_thresh[6:0] bitfield definitions * preprocessor definitions for the bitfield "desc{d}_wrb_thresh[6:0]". @@ -1484,17 +1491,18 @@ */ /* register address for bitfield desc{d}_wrb_thresh[6:0] */ -#define tdm_descdwrb_thresh_adr(descriptor) (0x00007c18 + (descriptor) * 0x40) +#define HW_ATL_TDM_DESCDWRB_THRESH_ADR(descriptor) \ + (0x00007c18 + (descriptor) * 0x40) /* bitmask for bitfield desc{d}_wrb_thresh[6:0] */ -#define tdm_descdwrb_thresh_msk 0x00007f00 +#define HW_ATL_TDM_DESCDWRB_THRESH_MSK 0x00007f00 /* inverted bitmask for bitfield desc{d}_wrb_thresh[6:0] */ -#define tdm_descdwrb_thresh_mskn 0xffff80ff +#define HW_ATL_TDM_DESCDWRB_THRESH_MSKN 0xffff80ff /* lower bit position of bitfield desc{d}_wrb_thresh[6:0] */ -#define tdm_descdwrb_thresh_shift 8 +#define HW_ATL_TDM_DESCDWRB_THRESH_SHIFT 8 /* width of bitfield desc{d}_wrb_thresh[6:0] */ -#define tdm_descdwrb_thresh_width 7 +#define HW_ATL_TDM_DESCDWRB_THRESH_WIDTH 7 /* default value of bitfield desc{d}_wrb_thresh[6:0] */ -#define tdm_descdwrb_thresh_default 0x0 +#define HW_ATL_TDM_DESCDWRB_THRESH_DEFAULT 0x0 /* tx lso_tcp_flag_first[b:0] bitfield definitions * preprocessor definitions for the bitfield "lso_tcp_flag_first[b:0]". @@ -1502,17 +1510,17 @@ */ /* register address for bitfield lso_tcp_flag_first[b:0] */ -#define thm_lso_tcp_flag_first_adr 0x00007820 +#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_ADR 0x00007820 /* bitmask for bitfield lso_tcp_flag_first[b:0] */ -#define thm_lso_tcp_flag_first_msk 0x00000fff +#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSK 0x00000fff /* inverted bitmask for bitfield lso_tcp_flag_first[b:0] */ -#define thm_lso_tcp_flag_first_mskn 0xfffff000 +#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSKN 0xfffff000 /* lower bit position of bitfield lso_tcp_flag_first[b:0] */ -#define thm_lso_tcp_flag_first_shift 0 +#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_SHIFT 0 /* width of bitfield lso_tcp_flag_first[b:0] */ -#define thm_lso_tcp_flag_first_width 12 +#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_WIDTH 12 /* default value of bitfield lso_tcp_flag_first[b:0] */ -#define thm_lso_tcp_flag_first_default 0x0 +#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_DEFAULT 0x0 /* tx lso_tcp_flag_last[b:0] bitfield definitions * preprocessor definitions for the bitfield "lso_tcp_flag_last[b:0]". @@ -1520,17 +1528,17 @@ */ /* register address for bitfield lso_tcp_flag_last[b:0] */ -#define thm_lso_tcp_flag_last_adr 0x00007824 +#define HW_ATL_THM_LSO_TCP_FLAG_LAST_ADR 0x00007824 /* bitmask for bitfield lso_tcp_flag_last[b:0] */ -#define thm_lso_tcp_flag_last_msk 0x00000fff +#define HW_ATL_THM_LSO_TCP_FLAG_LAST_MSK 0x00000fff /* inverted bitmask for bitfield lso_tcp_flag_last[b:0] */ -#define thm_lso_tcp_flag_last_mskn 0xfffff000 +#define HW_ATL_THM_LSO_TCP_FLAG_LAST_MSKN 0xfffff000 /* lower bit position of bitfield lso_tcp_flag_last[b:0] */ -#define thm_lso_tcp_flag_last_shift 0 +#define HW_ATL_THM_LSO_TCP_FLAG_LAST_SHIFT 0 /* width of bitfield lso_tcp_flag_last[b:0] */ -#define thm_lso_tcp_flag_last_width 12 +#define HW_ATL_THM_LSO_TCP_FLAG_LAST_WIDTH 12 /* default value of bitfield lso_tcp_flag_last[b:0] */ -#define thm_lso_tcp_flag_last_default 0x0 +#define HW_ATL_THM_LSO_TCP_FLAG_LAST_DEFAULT 0x0 /* tx lso_tcp_flag_mid[b:0] bitfield definitions * preprocessor definitions for the bitfield "lso_tcp_flag_mid[b:0]". @@ -1538,17 +1546,17 @@ */ /* Register address for bitfield lro_rsc_max[1F:0] */ -#define rpo_lro_rsc_max_adr 0x00005598 +#define HW_ATL_RPO_LRO_RSC_MAX_ADR 0x00005598 /* Bitmask for bitfield lro_rsc_max[1F:0] */ -#define rpo_lro_rsc_max_msk 0xFFFFFFFF +#define HW_ATL_RPO_LRO_RSC_MAX_MSK 0xFFFFFFFF /* Inverted bitmask for bitfield lro_rsc_max[1F:0] */ -#define rpo_lro_rsc_max_mskn 0x00000000 +#define HW_ATL_RPO_LRO_RSC_MAX_MSKN 0x00000000 /* Lower bit position of bitfield lro_rsc_max[1F:0] */ -#define rpo_lro_rsc_max_shift 0 +#define HW_ATL_RPO_LRO_RSC_MAX_SHIFT 0 /* Width of bitfield lro_rsc_max[1F:0] */ -#define rpo_lro_rsc_max_width 32 +#define HW_ATL_RPO_LRO_RSC_MAX_WIDTH 32 /* Default value of bitfield lro_rsc_max[1F:0] */ -#define rpo_lro_rsc_max_default 0x0 +#define HW_ATL_RPO_LRO_RSC_MAX_DEFAULT 0x0 /* RX lro_en[1F:0] Bitfield Definitions * Preprocessor definitions for the bitfield "lro_en[1F:0]". @@ -1556,17 +1564,17 @@ */ /* Register address for bitfield lro_en[1F:0] */ -#define rpo_lro_en_adr 0x00005590 +#define HW_ATL_RPO_LRO_EN_ADR 0x00005590 /* Bitmask for bitfield lro_en[1F:0] */ -#define rpo_lro_en_msk 0xFFFFFFFF +#define HW_ATL_RPO_LRO_EN_MSK 0xFFFFFFFF /* Inverted bitmask for bitfield lro_en[1F:0] */ -#define rpo_lro_en_mskn 0x00000000 +#define HW_ATL_RPO_LRO_EN_MSKN 0x00000000 /* Lower bit position of bitfield lro_en[1F:0] */ -#define rpo_lro_en_shift 0 +#define HW_ATL_RPO_LRO_EN_SHIFT 0 /* Width of bitfield lro_en[1F:0] */ -#define rpo_lro_en_width 32 +#define HW_ATL_RPO_LRO_EN_WIDTH 32 /* Default value of bitfield lro_en[1F:0] */ -#define rpo_lro_en_default 0x0 +#define HW_ATL_RPO_LRO_EN_DEFAULT 0x0 /* RX lro_ptopt_en Bitfield Definitions * Preprocessor definitions for the bitfield "lro_ptopt_en". @@ -1574,17 +1582,17 @@ */ /* Register address for bitfield lro_ptopt_en */ -#define rpo_lro_ptopt_en_adr 0x00005594 +#define HW_ATL_RPO_LRO_PTOPT_EN_ADR 0x00005594 /* Bitmask for bitfield lro_ptopt_en */ -#define rpo_lro_ptopt_en_msk 0x00008000 +#define HW_ATL_RPO_LRO_PTOPT_EN_MSK 0x00008000 /* Inverted bitmask for bitfield lro_ptopt_en */ -#define rpo_lro_ptopt_en_mskn 0xFFFF7FFF +#define HW_ATL_RPO_LRO_PTOPT_EN_MSKN 0xFFFF7FFF /* Lower bit position of bitfield lro_ptopt_en */ -#define rpo_lro_ptopt_en_shift 15 +#define HW_ATL_RPO_LRO_PTOPT_EN_SHIFT 15 /* Width of bitfield lro_ptopt_en */ -#define rpo_lro_ptopt_en_width 1 +#define HW_ATL_RPO_LRO_PTOPT_EN_WIDTH 1 /* Default value of bitfield lro_ptopt_en */ -#define rpo_lro_ptopt_en_defalt 0x1 +#define HW_ATL_RPO_LRO_PTOPT_EN_DEFALT 0x1 /* RX lro_q_ses_lmt Bitfield Definitions * Preprocessor definitions for the bitfield "lro_q_ses_lmt". @@ -1592,17 +1600,17 @@ */ /* Register address for bitfield lro_q_ses_lmt */ -#define rpo_lro_qses_lmt_adr 0x00005594 +#define HW_ATL_RPO_LRO_QSES_LMT_ADR 0x00005594 /* Bitmask for bitfield lro_q_ses_lmt */ -#define rpo_lro_qses_lmt_msk 0x00003000 +#define HW_ATL_RPO_LRO_QSES_LMT_MSK 0x00003000 /* Inverted bitmask for bitfield lro_q_ses_lmt */ -#define rpo_lro_qses_lmt_mskn 0xFFFFCFFF +#define HW_ATL_RPO_LRO_QSES_LMT_MSKN 0xFFFFCFFF /* Lower bit position of bitfield lro_q_ses_lmt */ -#define rpo_lro_qses_lmt_shift 12 +#define HW_ATL_RPO_LRO_QSES_LMT_SHIFT 12 /* Width of bitfield lro_q_ses_lmt */ -#define rpo_lro_qses_lmt_width 2 +#define HW_ATL_RPO_LRO_QSES_LMT_WIDTH 2 /* Default value of bitfield lro_q_ses_lmt */ -#define rpo_lro_qses_lmt_default 0x1 +#define HW_ATL_RPO_LRO_QSES_LMT_DEFAULT 0x1 /* RX lro_tot_dsc_lmt[1:0] Bitfield Definitions * Preprocessor definitions for the bitfield "lro_tot_dsc_lmt[1:0]". @@ -1610,17 +1618,17 @@ */ /* Register address for bitfield lro_tot_dsc_lmt[1:0] */ -#define rpo_lro_tot_dsc_lmt_adr 0x00005594 +#define HW_ATL_RPO_LRO_TOT_DSC_LMT_ADR 0x00005594 /* Bitmask for bitfield lro_tot_dsc_lmt[1:0] */ -#define rpo_lro_tot_dsc_lmt_msk 0x00000060 +#define HW_ATL_RPO_LRO_TOT_DSC_LMT_MSK 0x00000060 /* Inverted bitmask for bitfield lro_tot_dsc_lmt[1:0] */ -#define rpo_lro_tot_dsc_lmt_mskn 0xFFFFFF9F +#define HW_ATL_RPO_LRO_TOT_DSC_LMT_MSKN 0xFFFFFF9F /* Lower bit position of bitfield lro_tot_dsc_lmt[1:0] */ -#define rpo_lro_tot_dsc_lmt_shift 5 +#define HW_ATL_RPO_LRO_TOT_DSC_LMT_SHIFT 5 /* Width of bitfield lro_tot_dsc_lmt[1:0] */ -#define rpo_lro_tot_dsc_lmt_width 2 +#define HW_ATL_RPO_LRO_TOT_DSC_LMT_WIDTH 2 /* Default value of bitfield lro_tot_dsc_lmt[1:0] */ -#define rpo_lro_tot_dsc_lmt_defalt 0x1 +#define HW_ATL_RPO_LRO_TOT_DSC_LMT_DEFALT 0x1 /* RX lro_pkt_min[4:0] Bitfield Definitions * Preprocessor definitions for the bitfield "lro_pkt_min[4:0]". @@ -1628,22 +1636,22 @@ */ /* Register address for bitfield lro_pkt_min[4:0] */ -#define rpo_lro_pkt_min_adr 0x00005594 +#define HW_ATL_RPO_LRO_PKT_MIN_ADR 0x00005594 /* Bitmask for bitfield lro_pkt_min[4:0] */ -#define rpo_lro_pkt_min_msk 0x0000001F +#define HW_ATL_RPO_LRO_PKT_MIN_MSK 0x0000001F /* Inverted bitmask for bitfield lro_pkt_min[4:0] */ -#define rpo_lro_pkt_min_mskn 0xFFFFFFE0 +#define HW_ATL_RPO_LRO_PKT_MIN_MSKN 0xFFFFFFE0 /* Lower bit position of bitfield lro_pkt_min[4:0] */ -#define rpo_lro_pkt_min_shift 0 +#define HW_ATL_RPO_LRO_PKT_MIN_SHIFT 0 /* Width of bitfield lro_pkt_min[4:0] */ -#define rpo_lro_pkt_min_width 5 +#define HW_ATL_RPO_LRO_PKT_MIN_WIDTH 5 /* Default value of bitfield lro_pkt_min[4:0] */ -#define rpo_lro_pkt_min_default 0x8 +#define HW_ATL_RPO_LRO_PKT_MIN_DEFAULT 0x8 /* Width of bitfield lro{L}_des_max[1:0] */ -#define rpo_lro_ldes_max_width 2 +#define HW_ATL_RPO_LRO_LDES_MAX_WIDTH 2 /* Default value of bitfield lro{L}_des_max[1:0] */ -#define rpo_lro_ldes_max_default 0x0 +#define HW_ATL_RPO_LRO_LDES_MAX_DEFAULT 0x0 /* RX lro_tb_div[11:0] Bitfield Definitions * Preprocessor definitions for the bitfield "lro_tb_div[11:0]". @@ -1651,17 +1659,17 @@ */ /* Register address for bitfield lro_tb_div[11:0] */ -#define rpo_lro_tb_div_adr 0x00005620 +#define HW_ATL_RPO_LRO_TB_DIV_ADR 0x00005620 /* Bitmask for bitfield lro_tb_div[11:0] */ -#define rpo_lro_tb_div_msk 0xFFF00000 +#define HW_ATL_RPO_LRO_TB_DIV_MSK 0xFFF00000 /* Inverted bitmask for bitfield lro_tb_div[11:0] */ -#define rpo_lro_tb_div_mskn 0x000FFFFF +#define HW_ATL_RPO_LRO_TB_DIV_MSKN 0x000FFFFF /* Lower bit position of bitfield lro_tb_div[11:0] */ -#define rpo_lro_tb_div_shift 20 +#define HW_ATL_RPO_LRO_TB_DIV_SHIFT 20 /* Width of bitfield lro_tb_div[11:0] */ -#define rpo_lro_tb_div_width 12 +#define HW_ATL_RPO_LRO_TB_DIV_WIDTH 12 /* Default value of bitfield lro_tb_div[11:0] */ -#define rpo_lro_tb_div_default 0xC35 +#define HW_ATL_RPO_LRO_TB_DIV_DEFAULT 0xC35 /* RX lro_ina_ival[9:0] Bitfield Definitions * Preprocessor definitions for the bitfield "lro_ina_ival[9:0]". @@ -1669,17 +1677,17 @@ */ /* Register address for bitfield lro_ina_ival[9:0] */ -#define rpo_lro_ina_ival_adr 0x00005620 +#define HW_ATL_RPO_LRO_INA_IVAL_ADR 0x00005620 /* Bitmask for bitfield lro_ina_ival[9:0] */ -#define rpo_lro_ina_ival_msk 0x000FFC00 +#define HW_ATL_RPO_LRO_INA_IVAL_MSK 0x000FFC00 /* Inverted bitmask for bitfield lro_ina_ival[9:0] */ -#define rpo_lro_ina_ival_mskn 0xFFF003FF +#define HW_ATL_RPO_LRO_INA_IVAL_MSKN 0xFFF003FF /* Lower bit position of bitfield lro_ina_ival[9:0] */ -#define rpo_lro_ina_ival_shift 10 +#define HW_ATL_RPO_LRO_INA_IVAL_SHIFT 10 /* Width of bitfield lro_ina_ival[9:0] */ -#define rpo_lro_ina_ival_width 10 +#define HW_ATL_RPO_LRO_INA_IVAL_WIDTH 10 /* Default value of bitfield lro_ina_ival[9:0] */ -#define rpo_lro_ina_ival_default 0xA +#define HW_ATL_RPO_LRO_INA_IVAL_DEFAULT 0xA /* RX lro_max_ival[9:0] Bitfield Definitions * Preprocessor definitions for the bitfield "lro_max_ival[9:0]". @@ -1687,17 +1695,17 @@ */ /* Register address for bitfield lro_max_ival[9:0] */ -#define rpo_lro_max_ival_adr 0x00005620 +#define HW_ATL_RPO_LRO_MAX_IVAL_ADR 0x00005620 /* Bitmask for bitfield lro_max_ival[9:0] */ -#define rpo_lro_max_ival_msk 0x000003FF +#define HW_ATL_RPO_LRO_MAX_IVAL_MSK 0x000003FF /* Inverted bitmask for bitfield lro_max_ival[9:0] */ -#define rpo_lro_max_ival_mskn 0xFFFFFC00 +#define HW_ATL_RPO_LRO_MAX_IVAL_MSKN 0xFFFFFC00 /* Lower bit position of bitfield lro_max_ival[9:0] */ -#define rpo_lro_max_ival_shift 0 +#define HW_ATL_RPO_LRO_MAX_IVAL_SHIFT 0 /* Width of bitfield lro_max_ival[9:0] */ -#define rpo_lro_max_ival_width 10 +#define HW_ATL_RPO_LRO_MAX_IVAL_WIDTH 10 /* Default value of bitfield lro_max_ival[9:0] */ -#define rpo_lro_max_ival_default 0x19 +#define HW_ATL_RPO_LRO_MAX_IVAL_DEFAULT 0x19 /* TX dca{D}_cpuid[7:0] Bitfield Definitions * Preprocessor definitions for the bitfield "dca{D}_cpuid[7:0]". @@ -1706,17 +1714,17 @@ */ /* Register address for bitfield dca{D}_cpuid[7:0] */ -#define tdm_dca_dcpuid_adr(dca) (0x00008400 + (dca) * 0x4) +#define HW_ATL_TDM_DCA_DCPUID_ADR(dca) (0x00008400 + (dca) * 0x4) /* Bitmask for bitfield dca{D}_cpuid[7:0] */ -#define tdm_dca_dcpuid_msk 0x000000FF +#define HW_ATL_TDM_DCA_DCPUID_MSK 0x000000FF /* Inverted bitmask for bitfield dca{D}_cpuid[7:0] */ -#define tdm_dca_dcpuid_mskn 0xFFFFFF00 +#define HW_ATL_TDM_DCA_DCPUID_MSKN 0xFFFFFF00 /* Lower bit position of bitfield dca{D}_cpuid[7:0] */ -#define tdm_dca_dcpuid_shift 0 +#define HW_ATL_TDM_DCA_DCPUID_SHIFT 0 /* Width of bitfield dca{D}_cpuid[7:0] */ -#define tdm_dca_dcpuid_width 8 +#define HW_ATL_TDM_DCA_DCPUID_WIDTH 8 /* Default value of bitfield dca{D}_cpuid[7:0] */ -#define tdm_dca_dcpuid_default 0x0 +#define HW_ATL_TDM_DCA_DCPUID_DEFAULT 0x0 /* TX dca{D}_desc_en Bitfield Definitions * Preprocessor definitions for the bitfield "dca{D}_desc_en". @@ -1725,17 +1733,17 @@ */ /* Register address for bitfield dca{D}_desc_en */ -#define tdm_dca_ddesc_en_adr(dca) (0x00008400 + (dca) * 0x4) +#define HW_ATL_TDM_DCA_DDESC_EN_ADR(dca) (0x00008400 + (dca) * 0x4) /* Bitmask for bitfield dca{D}_desc_en */ -#define tdm_dca_ddesc_en_msk 0x80000000 +#define HW_ATL_TDM_DCA_DDESC_EN_MSK 0x80000000 /* Inverted bitmask for bitfield dca{D}_desc_en */ -#define tdm_dca_ddesc_en_mskn 0x7FFFFFFF +#define HW_ATL_TDM_DCA_DDESC_EN_MSKN 0x7FFFFFFF /* Lower bit position of bitfield dca{D}_desc_en */ -#define tdm_dca_ddesc_en_shift 31 +#define HW_ATL_TDM_DCA_DDESC_EN_SHIFT 31 /* Width of bitfield dca{D}_desc_en */ -#define tdm_dca_ddesc_en_width 1 +#define HW_ATL_TDM_DCA_DDESC_EN_WIDTH 1 /* Default value of bitfield dca{D}_desc_en */ -#define tdm_dca_ddesc_en_default 0x0 +#define HW_ATL_TDM_DCA_DDESC_EN_DEFAULT 0x0 /* TX desc{D}_en Bitfield Definitions * Preprocessor definitions for the bitfield "desc{D}_en". @@ -1744,17 +1752,17 @@ */ /* Register address for bitfield desc{D}_en */ -#define tdm_desc_den_adr(descriptor) (0x00007C08 + (descriptor) * 0x40) +#define HW_ATL_TDM_DESC_DEN_ADR(descriptor) (0x00007C08 + (descriptor) * 0x40) /* Bitmask for bitfield desc{D}_en */ -#define tdm_desc_den_msk 0x80000000 +#define HW_ATL_TDM_DESC_DEN_MSK 0x80000000 /* Inverted bitmask for bitfield desc{D}_en */ -#define tdm_desc_den_mskn 0x7FFFFFFF +#define HW_ATL_TDM_DESC_DEN_MSKN 0x7FFFFFFF /* Lower bit position of bitfield desc{D}_en */ -#define tdm_desc_den_shift 31 +#define HW_ATL_TDM_DESC_DEN_SHIFT 31 /* Width of bitfield desc{D}_en */ -#define tdm_desc_den_width 1 +#define HW_ATL_TDM_DESC_DEN_WIDTH 1 /* Default value of bitfield desc{D}_en */ -#define tdm_desc_den_default 0x0 +#define HW_ATL_TDM_DESC_DEN_DEFAULT 0x0 /* TX desc{D}_hd[C:0] Bitfield Definitions * Preprocessor definitions for the bitfield "desc{D}_hd[C:0]". @@ -1763,15 +1771,15 @@ */ /* Register address for bitfield desc{D}_hd[C:0] */ -#define tdm_desc_dhd_adr(descriptor) (0x00007C0C + (descriptor) * 0x40) +#define HW_ATL_TDM_DESC_DHD_ADR(descriptor) (0x00007C0C + (descriptor) * 0x40) /* Bitmask for bitfield desc{D}_hd[C:0] */ -#define tdm_desc_dhd_msk 0x00001FFF +#define HW_ATL_TDM_DESC_DHD_MSK 0x00001FFF /* Inverted bitmask for bitfield desc{D}_hd[C:0] */ -#define tdm_desc_dhd_mskn 0xFFFFE000 +#define HW_ATL_TDM_DESC_DHD_MSKN 0xFFFFE000 /* Lower bit position of bitfield desc{D}_hd[C:0] */ -#define tdm_desc_dhd_shift 0 +#define HW_ATL_TDM_DESC_DHD_SHIFT 0 /* Width of bitfield desc{D}_hd[C:0] */ -#define tdm_desc_dhd_width 13 +#define HW_ATL_TDM_DESC_DHD_WIDTH 13 /* TX desc{D}_len[9:0] Bitfield Definitions * Preprocessor definitions for the bitfield "desc{D}_len[9:0]". @@ -1780,17 +1788,17 @@ */ /* Register address for bitfield desc{D}_len[9:0] */ -#define tdm_desc_dlen_adr(descriptor) (0x00007C08 + (descriptor) * 0x40) +#define HW_ATL_TDM_DESC_DLEN_ADR(descriptor) (0x00007C08 + (descriptor) * 0x40) /* Bitmask for bitfield desc{D}_len[9:0] */ -#define tdm_desc_dlen_msk 0x00001FF8 +#define HW_ATL_TDM_DESC_DLEN_MSK 0x00001FF8 /* Inverted bitmask for bitfield desc{D}_len[9:0] */ -#define tdm_desc_dlen_mskn 0xFFFFE007 +#define HW_ATL_TDM_DESC_DLEN_MSKN 0xFFFFE007 /* Lower bit position of bitfield desc{D}_len[9:0] */ -#define tdm_desc_dlen_shift 3 +#define HW_ATL_TDM_DESC_DLEN_SHIFT 3 /* Width of bitfield desc{D}_len[9:0] */ -#define tdm_desc_dlen_width 10 +#define HW_ATL_TDM_DESC_DLEN_WIDTH 10 /* Default value of bitfield desc{D}_len[9:0] */ -#define tdm_desc_dlen_default 0x0 +#define HW_ATL_TDM_DESC_DLEN_DEFAULT 0x0 /* TX desc{D}_wrb_thresh[6:0] Bitfield Definitions * Preprocessor definitions for the bitfield "desc{D}_wrb_thresh[6:0]". @@ -1799,18 +1807,18 @@ */ /* Register address for bitfield desc{D}_wrb_thresh[6:0] */ -#define tdm_desc_dwrb_thresh_adr(descriptor) \ +#define HW_ATL_TDM_DESC_DWRB_THRESH_ADR(descriptor) \ (0x00007C18 + (descriptor) * 0x40) /* Bitmask for bitfield desc{D}_wrb_thresh[6:0] */ -#define tdm_desc_dwrb_thresh_msk 0x00007F00 +#define HW_ATL_TDM_DESC_DWRB_THRESH_MSK 0x00007F00 /* Inverted bitmask for bitfield desc{D}_wrb_thresh[6:0] */ -#define tdm_desc_dwrb_thresh_mskn 0xFFFF80FF +#define HW_ATL_TDM_DESC_DWRB_THRESH_MSKN 0xFFFF80FF /* Lower bit position of bitfield desc{D}_wrb_thresh[6:0] */ -#define tdm_desc_dwrb_thresh_shift 8 +#define HW_ATL_TDM_DESC_DWRB_THRESH_SHIFT 8 /* Width of bitfield desc{D}_wrb_thresh[6:0] */ -#define tdm_desc_dwrb_thresh_width 7 +#define HW_ATL_TDM_DESC_DWRB_THRESH_WIDTH 7 /* Default value of bitfield desc{D}_wrb_thresh[6:0] */ -#define tdm_desc_dwrb_thresh_default 0x0 +#define HW_ATL_TDM_DESC_DWRB_THRESH_DEFAULT 0x0 /* TX tdm_int_mod_en Bitfield Definitions * Preprocessor definitions for the bitfield "tdm_int_mod_en". @@ -1818,34 +1826,34 @@ */ /* Register address for bitfield tdm_int_mod_en */ -#define tdm_int_mod_en_adr 0x00007B40 +#define HW_ATL_TDM_INT_MOD_EN_ADR 0x00007B40 /* Bitmask for bitfield tdm_int_mod_en */ -#define tdm_int_mod_en_msk 0x00000010 +#define HW_ATL_TDM_INT_MOD_EN_MSK 0x00000010 /* Inverted bitmask for bitfield tdm_int_mod_en */ -#define tdm_int_mod_en_mskn 0xFFFFFFEF +#define HW_ATL_TDM_INT_MOD_EN_MSKN 0xFFFFFFEF /* Lower bit position of bitfield tdm_int_mod_en */ -#define tdm_int_mod_en_shift 4 +#define HW_ATL_TDM_INT_MOD_EN_SHIFT 4 /* Width of bitfield tdm_int_mod_en */ -#define tdm_int_mod_en_width 1 +#define HW_ATL_TDM_INT_MOD_EN_WIDTH 1 /* Default value of bitfield tdm_int_mod_en */ -#define tdm_int_mod_en_default 0x0 +#define HW_ATL_TDM_INT_MOD_EN_DEFAULT 0x0 /* TX lso_tcp_flag_mid[B:0] Bitfield Definitions * Preprocessor definitions for the bitfield "lso_tcp_flag_mid[B:0]". * PORT="pif_thm_lso_tcp_flag_mid_i[11:0]" */ /* register address for bitfield lso_tcp_flag_mid[b:0] */ -#define thm_lso_tcp_flag_mid_adr 0x00007820 +#define HW_ATL_THM_LSO_TCP_FLAG_MID_ADR 0x00007820 /* bitmask for bitfield lso_tcp_flag_mid[b:0] */ -#define thm_lso_tcp_flag_mid_msk 0x0fff0000 +#define HW_ATL_THM_LSO_TCP_FLAG_MID_MSK 0x0fff0000 /* inverted bitmask for bitfield lso_tcp_flag_mid[b:0] */ -#define thm_lso_tcp_flag_mid_mskn 0xf000ffff +#define HW_ATL_THM_LSO_TCP_FLAG_MID_MSKN 0xf000ffff /* lower bit position of bitfield lso_tcp_flag_mid[b:0] */ -#define thm_lso_tcp_flag_mid_shift 16 +#define HW_ATL_THM_LSO_TCP_FLAG_MID_SHIFT 16 /* width of bitfield lso_tcp_flag_mid[b:0] */ -#define thm_lso_tcp_flag_mid_width 12 +#define HW_ATL_THM_LSO_TCP_FLAG_MID_WIDTH 12 /* default value of bitfield lso_tcp_flag_mid[b:0] */ -#define thm_lso_tcp_flag_mid_default 0x0 +#define HW_ATL_THM_LSO_TCP_FLAG_MID_DEFAULT 0x0 /* tx tx_buf_en bitfield definitions * preprocessor definitions for the bitfield "tx_buf_en". @@ -1853,17 +1861,17 @@ */ /* register address for bitfield tx_buf_en */ -#define tpb_tx_buf_en_adr 0x00007900 +#define HW_ATL_TPB_TX_BUF_EN_ADR 0x00007900 /* bitmask for bitfield tx_buf_en */ -#define tpb_tx_buf_en_msk 0x00000001 +#define HW_ATL_TPB_TX_BUF_EN_MSK 0x00000001 /* inverted bitmask for bitfield tx_buf_en */ -#define tpb_tx_buf_en_mskn 0xfffffffe +#define HW_ATL_TPB_TX_BUF_EN_MSKN 0xfffffffe /* lower bit position of bitfield tx_buf_en */ -#define tpb_tx_buf_en_shift 0 +#define HW_ATL_TPB_TX_BUF_EN_SHIFT 0 /* width of bitfield tx_buf_en */ -#define tpb_tx_buf_en_width 1 +#define HW_ATL_TPB_TX_BUF_EN_WIDTH 1 /* default value of bitfield tx_buf_en */ -#define tpb_tx_buf_en_default 0x0 +#define HW_ATL_TPB_TX_BUF_EN_DEFAULT 0x0 /* tx tx{b}_hi_thresh[c:0] bitfield definitions * preprocessor definitions for the bitfield "tx{b}_hi_thresh[c:0]". @@ -1872,17 +1880,17 @@ */ /* register address for bitfield tx{b}_hi_thresh[c:0] */ -#define tpb_txbhi_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10) +#define HW_ATL_TPB_TXBHI_THRESH_ADR(buffer) (0x00007914 + (buffer) * 0x10) /* bitmask for bitfield tx{b}_hi_thresh[c:0] */ -#define tpb_txbhi_thresh_msk 0x1fff0000 +#define HW_ATL_TPB_TXBHI_THRESH_MSK 0x1fff0000 /* inverted bitmask for bitfield tx{b}_hi_thresh[c:0] */ -#define tpb_txbhi_thresh_mskn 0xe000ffff +#define HW_ATL_TPB_TXBHI_THRESH_MSKN 0xe000ffff /* lower bit position of bitfield tx{b}_hi_thresh[c:0] */ -#define tpb_txbhi_thresh_shift 16 +#define HW_ATL_TPB_TXBHI_THRESH_SHIFT 16 /* width of bitfield tx{b}_hi_thresh[c:0] */ -#define tpb_txbhi_thresh_width 13 +#define HW_ATL_TPB_TXBHI_THRESH_WIDTH 13 /* default value of bitfield tx{b}_hi_thresh[c:0] */ -#define tpb_txbhi_thresh_default 0x0 +#define HW_ATL_TPB_TXBHI_THRESH_DEFAULT 0x0 /* tx tx{b}_lo_thresh[c:0] bitfield definitions * preprocessor definitions for the bitfield "tx{b}_lo_thresh[c:0]". @@ -1891,17 +1899,17 @@ */ /* register address for bitfield tx{b}_lo_thresh[c:0] */ -#define tpb_txblo_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10) +#define HW_ATL_TPB_TXBLO_THRESH_ADR(buffer) (0x00007914 + (buffer) * 0x10) /* bitmask for bitfield tx{b}_lo_thresh[c:0] */ -#define tpb_txblo_thresh_msk 0x00001fff +#define HW_ATL_TPB_TXBLO_THRESH_MSK 0x00001fff /* inverted bitmask for bitfield tx{b}_lo_thresh[c:0] */ -#define tpb_txblo_thresh_mskn 0xffffe000 +#define HW_ATL_TPB_TXBLO_THRESH_MSKN 0xffffe000 /* lower bit position of bitfield tx{b}_lo_thresh[c:0] */ -#define tpb_txblo_thresh_shift 0 +#define HW_ATL_TPB_TXBLO_THRESH_SHIFT 0 /* width of bitfield tx{b}_lo_thresh[c:0] */ -#define tpb_txblo_thresh_width 13 +#define HW_ATL_TPB_TXBLO_THRESH_WIDTH 13 /* default value of bitfield tx{b}_lo_thresh[c:0] */ -#define tpb_txblo_thresh_default 0x0 +#define HW_ATL_TPB_TXBLO_THRESH_DEFAULT 0x0 /* tx dma_sys_loopback bitfield definitions * preprocessor definitions for the bitfield "dma_sys_loopback". @@ -1909,17 +1917,17 @@ */ /* register address for bitfield dma_sys_loopback */ -#define tpb_dma_sys_lbk_adr 0x00007000 +#define HW_ATL_TPB_DMA_SYS_LBK_ADR 0x00007000 /* bitmask for bitfield dma_sys_loopback */ -#define tpb_dma_sys_lbk_msk 0x00000040 +#define HW_ATL_TPB_DMA_SYS_LBK_MSK 0x00000040 /* inverted bitmask for bitfield dma_sys_loopback */ -#define tpb_dma_sys_lbk_mskn 0xffffffbf +#define HW_ATL_TPB_DMA_SYS_LBK_MSKN 0xffffffbf /* lower bit position of bitfield dma_sys_loopback */ -#define tpb_dma_sys_lbk_shift 6 +#define HW_ATL_TPB_DMA_SYS_LBK_SHIFT 6 /* width of bitfield dma_sys_loopback */ -#define tpb_dma_sys_lbk_width 1 +#define HW_ATL_TPB_DMA_SYS_LBK_WIDTH 1 /* default value of bitfield dma_sys_loopback */ -#define tpb_dma_sys_lbk_default 0x0 +#define HW_ATL_TPB_DMA_SYS_LBK_DEFAULT 0x0 /* tx tx{b}_buf_size[7:0] bitfield definitions * preprocessor definitions for the bitfield "tx{b}_buf_size[7:0]". @@ -1928,17 +1936,17 @@ */ /* register address for bitfield tx{b}_buf_size[7:0] */ -#define tpb_txbbuf_size_adr(buffer) (0x00007910 + (buffer) * 0x10) +#define HW_ATL_TPB_TXBBUF_SIZE_ADR(buffer) (0x00007910 + (buffer) * 0x10) /* bitmask for bitfield tx{b}_buf_size[7:0] */ -#define tpb_txbbuf_size_msk 0x000000ff +#define HW_ATL_TPB_TXBBUF_SIZE_MSK 0x000000ff /* inverted bitmask for bitfield tx{b}_buf_size[7:0] */ -#define tpb_txbbuf_size_mskn 0xffffff00 +#define HW_ATL_TPB_TXBBUF_SIZE_MSKN 0xffffff00 /* lower bit position of bitfield tx{b}_buf_size[7:0] */ -#define tpb_txbbuf_size_shift 0 +#define HW_ATL_TPB_TXBBUF_SIZE_SHIFT 0 /* width of bitfield tx{b}_buf_size[7:0] */ -#define tpb_txbbuf_size_width 8 +#define HW_ATL_TPB_TXBBUF_SIZE_WIDTH 8 /* default value of bitfield tx{b}_buf_size[7:0] */ -#define tpb_txbbuf_size_default 0x0 +#define HW_ATL_TPB_TXBBUF_SIZE_DEFAULT 0x0 /* tx tx_scp_ins_en bitfield definitions * preprocessor definitions for the bitfield "tx_scp_ins_en". @@ -1946,17 +1954,17 @@ */ /* register address for bitfield tx_scp_ins_en */ -#define tpb_tx_scp_ins_en_adr 0x00007900 +#define HW_ATL_TPB_TX_SCP_INS_EN_ADR 0x00007900 /* bitmask for bitfield tx_scp_ins_en */ -#define tpb_tx_scp_ins_en_msk 0x00000004 +#define HW_ATL_TPB_TX_SCP_INS_EN_MSK 0x00000004 /* inverted bitmask for bitfield tx_scp_ins_en */ -#define tpb_tx_scp_ins_en_mskn 0xfffffffb +#define HW_ATL_TPB_TX_SCP_INS_EN_MSKN 0xfffffffb /* lower bit position of bitfield tx_scp_ins_en */ -#define tpb_tx_scp_ins_en_shift 2 +#define HW_ATL_TPB_TX_SCP_INS_EN_SHIFT 2 /* width of bitfield tx_scp_ins_en */ -#define tpb_tx_scp_ins_en_width 1 +#define HW_ATL_TPB_TX_SCP_INS_EN_WIDTH 1 /* default value of bitfield tx_scp_ins_en */ -#define tpb_tx_scp_ins_en_default 0x0 +#define HW_ATL_TPB_TX_SCP_INS_EN_DEFAULT 0x0 /* tx ipv4_chk_en bitfield definitions * preprocessor definitions for the bitfield "ipv4_chk_en". @@ -1964,17 +1972,17 @@ */ /* register address for bitfield ipv4_chk_en */ -#define tpo_ipv4chk_en_adr 0x00007800 +#define HW_ATL_TPO_IPV4CHK_EN_ADR 0x00007800 /* bitmask for bitfield ipv4_chk_en */ -#define tpo_ipv4chk_en_msk 0x00000002 +#define HW_ATL_TPO_IPV4CHK_EN_MSK 0x00000002 /* inverted bitmask for bitfield ipv4_chk_en */ -#define tpo_ipv4chk_en_mskn 0xfffffffd +#define HW_ATL_TPO_IPV4CHK_EN_MSKN 0xfffffffd /* lower bit position of bitfield ipv4_chk_en */ -#define tpo_ipv4chk_en_shift 1 +#define HW_ATL_TPO_IPV4CHK_EN_SHIFT 1 /* width of bitfield ipv4_chk_en */ -#define tpo_ipv4chk_en_width 1 +#define HW_ATL_TPO_IPV4CHK_EN_WIDTH 1 /* default value of bitfield ipv4_chk_en */ -#define tpo_ipv4chk_en_default 0x0 +#define HW_ATL_TPO_IPV4CHK_EN_DEFAULT 0x0 /* tx l4_chk_en bitfield definitions * preprocessor definitions for the bitfield "l4_chk_en". @@ -1982,17 +1990,17 @@ */ /* register address for bitfield l4_chk_en */ -#define tpol4chk_en_adr 0x00007800 +#define HW_ATL_TPOL4CHK_EN_ADR 0x00007800 /* bitmask for bitfield l4_chk_en */ -#define tpol4chk_en_msk 0x00000001 +#define HW_ATL_TPOL4CHK_EN_MSK 0x00000001 /* inverted bitmask for bitfield l4_chk_en */ -#define tpol4chk_en_mskn 0xfffffffe +#define HW_ATL_TPOL4CHK_EN_MSKN 0xfffffffe /* lower bit position of bitfield l4_chk_en */ -#define tpol4chk_en_shift 0 +#define HW_ATL_TPOL4CHK_EN_SHIFT 0 /* width of bitfield l4_chk_en */ -#define tpol4chk_en_width 1 +#define HW_ATL_TPOL4CHK_EN_WIDTH 1 /* default value of bitfield l4_chk_en */ -#define tpol4chk_en_default 0x0 +#define HW_ATL_TPOL4CHK_EN_DEFAULT 0x0 /* tx pkt_sys_loopback bitfield definitions * preprocessor definitions for the bitfield "pkt_sys_loopback". @@ -2000,17 +2008,17 @@ */ /* register address for bitfield pkt_sys_loopback */ -#define tpo_pkt_sys_lbk_adr 0x00007000 +#define HW_ATL_TPO_PKT_SYS_LBK_ADR 0x00007000 /* bitmask for bitfield pkt_sys_loopback */ -#define tpo_pkt_sys_lbk_msk 0x00000080 +#define HW_ATL_TPO_PKT_SYS_LBK_MSK 0x00000080 /* inverted bitmask for bitfield pkt_sys_loopback */ -#define tpo_pkt_sys_lbk_mskn 0xffffff7f +#define HW_ATL_TPO_PKT_SYS_LBK_MSKN 0xffffff7f /* lower bit position of bitfield pkt_sys_loopback */ -#define tpo_pkt_sys_lbk_shift 7 +#define HW_ATL_TPO_PKT_SYS_LBK_SHIFT 7 /* width of bitfield pkt_sys_loopback */ -#define tpo_pkt_sys_lbk_width 1 +#define HW_ATL_TPO_PKT_SYS_LBK_WIDTH 1 /* default value of bitfield pkt_sys_loopback */ -#define tpo_pkt_sys_lbk_default 0x0 +#define HW_ATL_TPO_PKT_SYS_LBK_DEFAULT 0x0 /* tx data_tc_arb_mode bitfield definitions * preprocessor definitions for the bitfield "data_tc_arb_mode". @@ -2018,17 +2026,17 @@ */ /* register address for bitfield data_tc_arb_mode */ -#define tps_data_tc_arb_mode_adr 0x00007100 +#define HW_ATL_TPS_DATA_TC_ARB_MODE_ADR 0x00007100 /* bitmask for bitfield data_tc_arb_mode */ -#define tps_data_tc_arb_mode_msk 0x00000001 +#define HW_ATL_TPS_DATA_TC_ARB_MODE_MSK 0x00000001 /* inverted bitmask for bitfield data_tc_arb_mode */ -#define tps_data_tc_arb_mode_mskn 0xfffffffe +#define HW_ATL_TPS_DATA_TC_ARB_MODE_MSKN 0xfffffffe /* lower bit position of bitfield data_tc_arb_mode */ -#define tps_data_tc_arb_mode_shift 0 +#define HW_ATL_TPS_DATA_TC_ARB_MODE_SHIFT 0 /* width of bitfield data_tc_arb_mode */ -#define tps_data_tc_arb_mode_width 1 +#define HW_ATL_TPS_DATA_TC_ARB_MODE_WIDTH 1 /* default value of bitfield data_tc_arb_mode */ -#define tps_data_tc_arb_mode_default 0x0 +#define HW_ATL_TPS_DATA_TC_ARB_MODE_DEFAULT 0x0 /* tx desc_rate_ta_rst bitfield definitions * preprocessor definitions for the bitfield "desc_rate_ta_rst". @@ -2036,17 +2044,17 @@ */ /* register address for bitfield desc_rate_ta_rst */ -#define tps_desc_rate_ta_rst_adr 0x00007310 +#define HW_ATL_TPS_DESC_RATE_TA_RST_ADR 0x00007310 /* bitmask for bitfield desc_rate_ta_rst */ -#define tps_desc_rate_ta_rst_msk 0x80000000 +#define HW_ATL_TPS_DESC_RATE_TA_RST_MSK 0x80000000 /* inverted bitmask for bitfield desc_rate_ta_rst */ -#define tps_desc_rate_ta_rst_mskn 0x7fffffff +#define HW_ATL_TPS_DESC_RATE_TA_RST_MSKN 0x7fffffff /* lower bit position of bitfield desc_rate_ta_rst */ -#define tps_desc_rate_ta_rst_shift 31 +#define HW_ATL_TPS_DESC_RATE_TA_RST_SHIFT 31 /* width of bitfield desc_rate_ta_rst */ -#define tps_desc_rate_ta_rst_width 1 +#define HW_ATL_TPS_DESC_RATE_TA_RST_WIDTH 1 /* default value of bitfield desc_rate_ta_rst */ -#define tps_desc_rate_ta_rst_default 0x0 +#define HW_ATL_TPS_DESC_RATE_TA_RST_DEFAULT 0x0 /* tx desc_rate_limit[a:0] bitfield definitions * preprocessor definitions for the bitfield "desc_rate_limit[a:0]". @@ -2054,17 +2062,17 @@ */ /* register address for bitfield desc_rate_limit[a:0] */ -#define tps_desc_rate_lim_adr 0x00007310 +#define HW_ATL_TPS_DESC_RATE_LIM_ADR 0x00007310 /* bitmask for bitfield desc_rate_limit[a:0] */ -#define tps_desc_rate_lim_msk 0x000007ff +#define HW_ATL_TPS_DESC_RATE_LIM_MSK 0x000007ff /* inverted bitmask for bitfield desc_rate_limit[a:0] */ -#define tps_desc_rate_lim_mskn 0xfffff800 +#define HW_ATL_TPS_DESC_RATE_LIM_MSKN 0xfffff800 /* lower bit position of bitfield desc_rate_limit[a:0] */ -#define tps_desc_rate_lim_shift 0 +#define HW_ATL_TPS_DESC_RATE_LIM_SHIFT 0 /* width of bitfield desc_rate_limit[a:0] */ -#define tps_desc_rate_lim_width 11 +#define HW_ATL_TPS_DESC_RATE_LIM_WIDTH 11 /* default value of bitfield desc_rate_limit[a:0] */ -#define tps_desc_rate_lim_default 0x0 +#define HW_ATL_TPS_DESC_RATE_LIM_DEFAULT 0x0 /* tx desc_tc_arb_mode[1:0] bitfield definitions * preprocessor definitions for the bitfield "desc_tc_arb_mode[1:0]". @@ -2072,17 +2080,17 @@ */ /* register address for bitfield desc_tc_arb_mode[1:0] */ -#define tps_desc_tc_arb_mode_adr 0x00007200 +#define HW_ATL_TPS_DESC_TC_ARB_MODE_ADR 0x00007200 /* bitmask for bitfield desc_tc_arb_mode[1:0] */ -#define tps_desc_tc_arb_mode_msk 0x00000003 +#define HW_ATL_TPS_DESC_TC_ARB_MODE_MSK 0x00000003 /* inverted bitmask for bitfield desc_tc_arb_mode[1:0] */ -#define tps_desc_tc_arb_mode_mskn 0xfffffffc +#define HW_ATL_TPS_DESC_TC_ARB_MODE_MSKN 0xfffffffc /* lower bit position of bitfield desc_tc_arb_mode[1:0] */ -#define tps_desc_tc_arb_mode_shift 0 +#define HW_ATL_TPS_DESC_TC_ARB_MODE_SHIFT 0 /* width of bitfield desc_tc_arb_mode[1:0] */ -#define tps_desc_tc_arb_mode_width 2 +#define HW_ATL_TPS_DESC_TC_ARB_MODE_WIDTH 2 /* default value of bitfield desc_tc_arb_mode[1:0] */ -#define tps_desc_tc_arb_mode_default 0x0 +#define HW_ATL_TPS_DESC_TC_ARB_MODE_DEFAULT 0x0 /* tx desc_tc{t}_credit_max[b:0] bitfield definitions * preprocessor definitions for the bitfield "desc_tc{t}_credit_max[b:0]". @@ -2091,17 +2099,17 @@ */ /* register address for bitfield desc_tc{t}_credit_max[b:0] */ -#define tps_desc_tctcredit_max_adr(tc) (0x00007210 + (tc) * 0x4) +#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_ADR(tc) (0x00007210 + (tc) * 0x4) /* bitmask for bitfield desc_tc{t}_credit_max[b:0] */ -#define tps_desc_tctcredit_max_msk 0x0fff0000 +#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSK 0x0fff0000 /* inverted bitmask for bitfield desc_tc{t}_credit_max[b:0] */ -#define tps_desc_tctcredit_max_mskn 0xf000ffff +#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSKN 0xf000ffff /* lower bit position of bitfield desc_tc{t}_credit_max[b:0] */ -#define tps_desc_tctcredit_max_shift 16 +#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_SHIFT 16 /* width of bitfield desc_tc{t}_credit_max[b:0] */ -#define tps_desc_tctcredit_max_width 12 +#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_WIDTH 12 /* default value of bitfield desc_tc{t}_credit_max[b:0] */ -#define tps_desc_tctcredit_max_default 0x0 +#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_DEFAULT 0x0 /* tx desc_tc{t}_weight[8:0] bitfield definitions * preprocessor definitions for the bitfield "desc_tc{t}_weight[8:0]". @@ -2110,17 +2118,17 @@ */ /* register address for bitfield desc_tc{t}_weight[8:0] */ -#define tps_desc_tctweight_adr(tc) (0x00007210 + (tc) * 0x4) +#define HW_ATL_TPS_DESC_TCTWEIGHT_ADR(tc) (0x00007210 + (tc) * 0x4) /* bitmask for bitfield desc_tc{t}_weight[8:0] */ -#define tps_desc_tctweight_msk 0x000001ff +#define HW_ATL_TPS_DESC_TCTWEIGHT_MSK 0x000001ff /* inverted bitmask for bitfield desc_tc{t}_weight[8:0] */ -#define tps_desc_tctweight_mskn 0xfffffe00 +#define HW_ATL_TPS_DESC_TCTWEIGHT_MSKN 0xfffffe00 /* lower bit position of bitfield desc_tc{t}_weight[8:0] */ -#define tps_desc_tctweight_shift 0 +#define HW_ATL_TPS_DESC_TCTWEIGHT_SHIFT 0 /* width of bitfield desc_tc{t}_weight[8:0] */ -#define tps_desc_tctweight_width 9 +#define HW_ATL_TPS_DESC_TCTWEIGHT_WIDTH 9 /* default value of bitfield desc_tc{t}_weight[8:0] */ -#define tps_desc_tctweight_default 0x0 +#define HW_ATL_TPS_DESC_TCTWEIGHT_DEFAULT 0x0 /* tx desc_vm_arb_mode bitfield definitions * preprocessor definitions for the bitfield "desc_vm_arb_mode". @@ -2128,17 +2136,17 @@ */ /* register address for bitfield desc_vm_arb_mode */ -#define tps_desc_vm_arb_mode_adr 0x00007300 +#define HW_ATL_TPS_DESC_VM_ARB_MODE_ADR 0x00007300 /* bitmask for bitfield desc_vm_arb_mode */ -#define tps_desc_vm_arb_mode_msk 0x00000001 +#define HW_ATL_TPS_DESC_VM_ARB_MODE_MSK 0x00000001 /* inverted bitmask for bitfield desc_vm_arb_mode */ -#define tps_desc_vm_arb_mode_mskn 0xfffffffe +#define HW_ATL_TPS_DESC_VM_ARB_MODE_MSKN 0xfffffffe /* lower bit position of bitfield desc_vm_arb_mode */ -#define tps_desc_vm_arb_mode_shift 0 +#define HW_ATL_TPS_DESC_VM_ARB_MODE_SHIFT 0 /* width of bitfield desc_vm_arb_mode */ -#define tps_desc_vm_arb_mode_width 1 +#define HW_ATL_TPS_DESC_VM_ARB_MODE_WIDTH 1 /* default value of bitfield desc_vm_arb_mode */ -#define tps_desc_vm_arb_mode_default 0x0 +#define HW_ATL_TPS_DESC_VM_ARB_MODE_DEFAULT 0x0 /* tx data_tc{t}_credit_max[b:0] bitfield definitions * preprocessor definitions for the bitfield "data_tc{t}_credit_max[b:0]". @@ -2147,17 +2155,17 @@ */ /* register address for bitfield data_tc{t}_credit_max[b:0] */ -#define tps_data_tctcredit_max_adr(tc) (0x00007110 + (tc) * 0x4) +#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_ADR(tc) (0x00007110 + (tc) * 0x4) /* bitmask for bitfield data_tc{t}_credit_max[b:0] */ -#define tps_data_tctcredit_max_msk 0x0fff0000 +#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSK 0x0fff0000 /* inverted bitmask for bitfield data_tc{t}_credit_max[b:0] */ -#define tps_data_tctcredit_max_mskn 0xf000ffff +#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSKN 0xf000ffff /* lower bit position of bitfield data_tc{t}_credit_max[b:0] */ -#define tps_data_tctcredit_max_shift 16 +#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_SHIFT 16 /* width of bitfield data_tc{t}_credit_max[b:0] */ -#define tps_data_tctcredit_max_width 12 +#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_WIDTH 12 /* default value of bitfield data_tc{t}_credit_max[b:0] */ -#define tps_data_tctcredit_max_default 0x0 +#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_DEFAULT 0x0 /* tx data_tc{t}_weight[8:0] bitfield definitions * preprocessor definitions for the bitfield "data_tc{t}_weight[8:0]". @@ -2166,17 +2174,17 @@ */ /* register address for bitfield data_tc{t}_weight[8:0] */ -#define tps_data_tctweight_adr(tc) (0x00007110 + (tc) * 0x4) +#define HW_ATL_TPS_DATA_TCTWEIGHT_ADR(tc) (0x00007110 + (tc) * 0x4) /* bitmask for bitfield data_tc{t}_weight[8:0] */ -#define tps_data_tctweight_msk 0x000001ff +#define HW_ATL_TPS_DATA_TCTWEIGHT_MSK 0x000001ff /* inverted bitmask for bitfield data_tc{t}_weight[8:0] */ -#define tps_data_tctweight_mskn 0xfffffe00 +#define HW_ATL_TPS_DATA_TCTWEIGHT_MSKN 0xfffffe00 /* lower bit position of bitfield data_tc{t}_weight[8:0] */ -#define tps_data_tctweight_shift 0 +#define HW_ATL_TPS_DATA_TCTWEIGHT_SHIFT 0 /* width of bitfield data_tc{t}_weight[8:0] */ -#define tps_data_tctweight_width 9 +#define HW_ATL_TPS_DATA_TCTWEIGHT_WIDTH 9 /* default value of bitfield data_tc{t}_weight[8:0] */ -#define tps_data_tctweight_default 0x0 +#define HW_ATL_TPS_DATA_TCTWEIGHT_DEFAULT 0x0 /* tx reg_res_dsbl bitfield definitions * preprocessor definitions for the bitfield "reg_res_dsbl". @@ -2184,17 +2192,17 @@ */ /* register address for bitfield reg_res_dsbl */ -#define tx_reg_res_dsbl_adr 0x00007000 +#define HW_ATL_TX_REG_RES_DSBL_ADR 0x00007000 /* bitmask for bitfield reg_res_dsbl */ -#define tx_reg_res_dsbl_msk 0x20000000 +#define HW_ATL_TX_REG_RES_DSBL_MSK 0x20000000 /* inverted bitmask for bitfield reg_res_dsbl */ -#define tx_reg_res_dsbl_mskn 0xdfffffff +#define HW_ATL_TX_REG_RES_DSBL_MSKN 0xdfffffff /* lower bit position of bitfield reg_res_dsbl */ -#define tx_reg_res_dsbl_shift 29 +#define HW_ATL_TX_REG_RES_DSBL_SHIFT 29 /* width of bitfield reg_res_dsbl */ -#define tx_reg_res_dsbl_width 1 +#define HW_ATL_TX_REG_RES_DSBL_WIDTH 1 /* default value of bitfield reg_res_dsbl */ -#define tx_reg_res_dsbl_default 0x1 +#define HW_ATL_TX_REG_RES_DSBL_DEFAULT 0x1 /* mac_phy register access busy bitfield definitions * preprocessor definitions for the bitfield "register access busy". @@ -2202,15 +2210,15 @@ */ /* register address for bitfield register access busy */ -#define msm_reg_access_busy_adr 0x00004400 +#define HW_ATL_MSM_REG_ACCESS_BUSY_ADR 0x00004400 /* bitmask for bitfield register access busy */ -#define msm_reg_access_busy_msk 0x00001000 +#define HW_ATL_MSM_REG_ACCESS_BUSY_MSK 0x00001000 /* inverted bitmask for bitfield register access busy */ -#define msm_reg_access_busy_mskn 0xffffefff +#define HW_ATL_MSM_REG_ACCESS_BUSY_MSKN 0xffffefff /* lower bit position of bitfield register access busy */ -#define msm_reg_access_busy_shift 12 +#define HW_ATL_MSM_REG_ACCESS_BUSY_SHIFT 12 /* width of bitfield register access busy */ -#define msm_reg_access_busy_width 1 +#define HW_ATL_MSM_REG_ACCESS_BUSY_WIDTH 1 /* mac_phy msm register address[7:0] bitfield definitions * preprocessor definitions for the bitfield "msm register address[7:0]". @@ -2218,17 +2226,17 @@ */ /* register address for bitfield msm register address[7:0] */ -#define msm_reg_addr_adr 0x00004400 +#define HW_ATL_MSM_REG_ADDR_ADR 0x00004400 /* bitmask for bitfield msm register address[7:0] */ -#define msm_reg_addr_msk 0x000000ff +#define HW_ATL_MSM_REG_ADDR_MSK 0x000000ff /* inverted bitmask for bitfield msm register address[7:0] */ -#define msm_reg_addr_mskn 0xffffff00 +#define HW_ATL_MSM_REG_ADDR_MSKN 0xffffff00 /* lower bit position of bitfield msm register address[7:0] */ -#define msm_reg_addr_shift 0 +#define HW_ATL_MSM_REG_ADDR_SHIFT 0 /* width of bitfield msm register address[7:0] */ -#define msm_reg_addr_width 8 +#define HW_ATL_MSM_REG_ADDR_WIDTH 8 /* default value of bitfield msm register address[7:0] */ -#define msm_reg_addr_default 0x0 +#define HW_ATL_MSM_REG_ADDR_DEFAULT 0x0 /* mac_phy register read strobe bitfield definitions * preprocessor definitions for the bitfield "register read strobe". @@ -2236,17 +2244,17 @@ */ /* register address for bitfield register read strobe */ -#define msm_reg_rd_strobe_adr 0x00004400 +#define HW_ATL_MSM_REG_RD_STROBE_ADR 0x00004400 /* bitmask for bitfield register read strobe */ -#define msm_reg_rd_strobe_msk 0x00000200 +#define HW_ATL_MSM_REG_RD_STROBE_MSK 0x00000200 /* inverted bitmask for bitfield register read strobe */ -#define msm_reg_rd_strobe_mskn 0xfffffdff +#define HW_ATL_MSM_REG_RD_STROBE_MSKN 0xfffffdff /* lower bit position of bitfield register read strobe */ -#define msm_reg_rd_strobe_shift 9 +#define HW_ATL_MSM_REG_RD_STROBE_SHIFT 9 /* width of bitfield register read strobe */ -#define msm_reg_rd_strobe_width 1 +#define HW_ATL_MSM_REG_RD_STROBE_WIDTH 1 /* default value of bitfield register read strobe */ -#define msm_reg_rd_strobe_default 0x0 +#define HW_ATL_MSM_REG_RD_STROBE_DEFAULT 0x0 /* mac_phy msm register read data[31:0] bitfield definitions * preprocessor definitions for the bitfield "msm register read data[31:0]". @@ -2254,15 +2262,15 @@ */ /* register address for bitfield msm register read data[31:0] */ -#define msm_reg_rd_data_adr 0x00004408 +#define HW_ATL_MSM_REG_RD_DATA_ADR 0x00004408 /* bitmask for bitfield msm register read data[31:0] */ -#define msm_reg_rd_data_msk 0xffffffff +#define HW_ATL_MSM_REG_RD_DATA_MSK 0xffffffff /* inverted bitmask for bitfield msm register read data[31:0] */ -#define msm_reg_rd_data_mskn 0x00000000 +#define HW_ATL_MSM_REG_RD_DATA_MSKN 0x00000000 /* lower bit position of bitfield msm register read data[31:0] */ -#define msm_reg_rd_data_shift 0 +#define HW_ATL_MSM_REG_RD_DATA_SHIFT 0 /* width of bitfield msm register read data[31:0] */ -#define msm_reg_rd_data_width 32 +#define HW_ATL_MSM_REG_RD_DATA_WIDTH 32 /* mac_phy msm register write data[31:0] bitfield definitions * preprocessor definitions for the bitfield "msm register write data[31:0]". @@ -2270,17 +2278,17 @@ */ /* register address for bitfield msm register write data[31:0] */ -#define msm_reg_wr_data_adr 0x00004404 +#define HW_ATL_MSM_REG_WR_DATA_ADR 0x00004404 /* bitmask for bitfield msm register write data[31:0] */ -#define msm_reg_wr_data_msk 0xffffffff +#define HW_ATL_MSM_REG_WR_DATA_MSK 0xffffffff /* inverted bitmask for bitfield msm register write data[31:0] */ -#define msm_reg_wr_data_mskn 0x00000000 +#define HW_ATL_MSM_REG_WR_DATA_MSKN 0x00000000 /* lower bit position of bitfield msm register write data[31:0] */ -#define msm_reg_wr_data_shift 0 +#define HW_ATL_MSM_REG_WR_DATA_SHIFT 0 /* width of bitfield msm register write data[31:0] */ -#define msm_reg_wr_data_width 32 +#define HW_ATL_MSM_REG_WR_DATA_WIDTH 32 /* default value of bitfield msm register write data[31:0] */ -#define msm_reg_wr_data_default 0x0 +#define HW_ATL_MSM_REG_WR_DATA_DEFAULT 0x0 /* mac_phy register write strobe bitfield definitions * preprocessor definitions for the bitfield "register write strobe". @@ -2288,17 +2296,17 @@ */ /* register address for bitfield register write strobe */ -#define msm_reg_wr_strobe_adr 0x00004400 +#define HW_ATL_MSM_REG_WR_STROBE_ADR 0x00004400 /* bitmask for bitfield register write strobe */ -#define msm_reg_wr_strobe_msk 0x00000100 +#define HW_ATL_MSM_REG_WR_STROBE_MSK 0x00000100 /* inverted bitmask for bitfield register write strobe */ -#define msm_reg_wr_strobe_mskn 0xfffffeff +#define HW_ATL_MSM_REG_WR_STROBE_MSKN 0xfffffeff /* lower bit position of bitfield register write strobe */ -#define msm_reg_wr_strobe_shift 8 +#define HW_ATL_MSM_REG_WR_STROBE_SHIFT 8 /* width of bitfield register write strobe */ -#define msm_reg_wr_strobe_width 1 +#define HW_ATL_MSM_REG_WR_STROBE_WIDTH 1 /* default value of bitfield register write strobe */ -#define msm_reg_wr_strobe_default 0x0 +#define HW_ATL_MSM_REG_WR_STROBE_DEFAULT 0x0 /* mif soft reset bitfield definitions * preprocessor definitions for the bitfield "soft reset". @@ -2306,17 +2314,17 @@ */ /* register address for bitfield soft reset */ -#define glb_soft_res_adr 0x00000000 +#define HW_ATL_GLB_SOFT_RES_ADR 0x00000000 /* bitmask for bitfield soft reset */ -#define glb_soft_res_msk 0x00008000 +#define HW_ATL_GLB_SOFT_RES_MSK 0x00008000 /* inverted bitmask for bitfield soft reset */ -#define glb_soft_res_mskn 0xffff7fff +#define HW_ATL_GLB_SOFT_RES_MSKN 0xffff7fff /* lower bit position of bitfield soft reset */ -#define glb_soft_res_shift 15 +#define HW_ATL_GLB_SOFT_RES_SHIFT 15 /* width of bitfield soft reset */ -#define glb_soft_res_width 1 +#define HW_ATL_GLB_SOFT_RES_WIDTH 1 /* default value of bitfield soft reset */ -#define glb_soft_res_default 0x0 +#define HW_ATL_GLB_SOFT_RES_DEFAULT 0x0 /* mif register reset disable bitfield definitions * preprocessor definitions for the bitfield "register reset disable". @@ -2324,27 +2332,27 @@ */ /* register address for bitfield register reset disable */ -#define glb_reg_res_dis_adr 0x00000000 +#define HW_ATL_GLB_REG_RES_DIS_ADR 0x00000000 /* bitmask for bitfield register reset disable */ -#define glb_reg_res_dis_msk 0x00004000 +#define HW_ATL_GLB_REG_RES_DIS_MSK 0x00004000 /* inverted bitmask for bitfield register reset disable */ -#define glb_reg_res_dis_mskn 0xffffbfff +#define HW_ATL_GLB_REG_RES_DIS_MSKN 0xffffbfff /* lower bit position of bitfield register reset disable */ -#define glb_reg_res_dis_shift 14 +#define HW_ATL_GLB_REG_RES_DIS_SHIFT 14 /* width of bitfield register reset disable */ -#define glb_reg_res_dis_width 1 +#define HW_ATL_GLB_REG_RES_DIS_WIDTH 1 /* default value of bitfield register reset disable */ -#define glb_reg_res_dis_default 0x1 +#define HW_ATL_GLB_REG_RES_DIS_DEFAULT 0x1 /* tx dma debug control definitions */ -#define tx_dma_debug_ctl_adr 0x00008920u +#define HW_ATL_TX_DMA_DEBUG_CTL_ADR 0x00008920u /* tx dma descriptor base address msw definitions */ -#define tx_dma_desc_base_addrmsw_adr(descriptor) \ +#define HW_ATL_TX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor) \ (0x00007c04u + (descriptor) * 0x40) /* tx dma total request limit */ -#define tx_dma_total_req_limit_adr 0x00007b20u +#define HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR 0x00007b20u /* tx interrupt moderation control register definitions * Preprocessor definitions for TX Interrupt Moderation Control Register @@ -2352,7 +2360,7 @@ * Parameter: queue {Q} | stride size 0x4 | range [0, 31] */ -#define tx_intr_moderation_ctl_adr(queue) (0x00008980u + (queue) * 0x4) +#define HW_ATL_TX_INTR_MODERATION_CTL_ADR(queue) (0x00008980u + (queue) * 0x4) /* pcie reg_res_dsbl bitfield definitions * preprocessor definitions for the bitfield "reg_res_dsbl". @@ -2360,22 +2368,23 @@ */ /* register address for bitfield reg_res_dsbl */ -#define pci_reg_res_dsbl_adr 0x00001000 +#define HW_ATL_PCI_REG_RES_DSBL_ADR 0x00001000 /* bitmask for bitfield reg_res_dsbl */ -#define pci_reg_res_dsbl_msk 0x20000000 +#define HW_ATL_PCI_REG_RES_DSBL_MSK 0x20000000 /* inverted bitmask for bitfield reg_res_dsbl */ -#define pci_reg_res_dsbl_mskn 0xdfffffff +#define HW_ATL_PCI_REG_RES_DSBL_MSKN 0xdfffffff /* lower bit position of bitfield reg_res_dsbl */ -#define pci_reg_res_dsbl_shift 29 +#define HW_ATL_PCI_REG_RES_DSBL_SHIFT 29 /* width of bitfield reg_res_dsbl */ -#define pci_reg_res_dsbl_width 1 +#define HW_ATL_PCI_REG_RES_DSBL_WIDTH 1 /* default value of bitfield reg_res_dsbl */ -#define pci_reg_res_dsbl_default 0x1 +#define HW_ATL_PCI_REG_RES_DSBL_DEFAULT 0x1 /* PCI core control register */ -#define pci_reg_control6_adr 0x1014u +#define HW_ATL_PCI_REG_CONTROL6_ADR 0x1014u /* global microprocessor scratch pad definitions */ -#define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4) +#define HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp) \ + (0x00000300u + (scratch_scp) * 0x4) #endif /* HW_ATL_LLH_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index f2ce12ed4218..967f0fd07fcf 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -11,41 +11,244 @@ * abstraction layer. */ -#include "../aq_hw.h" +#include "../aq_nic.h" #include "../aq_hw_utils.h" -#include "../aq_pci_func.h" -#include "../aq_ring.h" -#include "../aq_vec.h" #include "hw_atl_utils.h" #include "hw_atl_llh.h" +#include "hw_atl_llh_internal.h" #include <linux/random.h> #define HW_ATL_UCP_0X370_REG 0x0370U #define HW_ATL_FW_SM_RAM 0x2U +#define HW_ATL_MPI_FW_VERSION 0x18 #define HW_ATL_MPI_CONTROL_ADR 0x0368U #define HW_ATL_MPI_STATE_ADR 0x036CU #define HW_ATL_MPI_STATE_MSK 0x00FFU #define HW_ATL_MPI_STATE_SHIFT 0U -#define HW_ATL_MPI_SPEED_MSK 0xFFFFU +#define HW_ATL_MPI_SPEED_MSK 0xFFFF0000U #define HW_ATL_MPI_SPEED_SHIFT 16U -static int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a, - u32 *p, u32 cnt) +#define HW_ATL_MPI_DAISY_CHAIN_STATUS 0x704 +#define HW_ATL_MPI_BOOT_EXIT_CODE 0x388 + +#define HW_ATL_MAC_PHY_CONTROL 0x4000 +#define HW_ATL_MAC_PHY_MPI_RESET_BIT 0x1D + +#define HW_ATL_FW_VER_1X 0x01050006U +#define HW_ATL_FW_VER_2X 0x02000000U +#define HW_ATL_FW_VER_3X 0x03000000U + +#define FORCE_FLASHLESS 0 + +static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual); + +int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops) { int err = 0; - AQ_HW_WAIT_FOR(reg_glb_cpu_sem_get(self, - HW_ATL_FW_SM_RAM) == 1U, - 1U, 10000U); + err = hw_atl_utils_soft_reset(self); + if (err) + return err; + + hw_atl_utils_hw_chip_features_init(self, + &self->chip_features); + + hw_atl_utils_get_fw_version(self, &self->fw_ver_actual); + + if (hw_atl_utils_ver_match(HW_ATL_FW_VER_1X, + self->fw_ver_actual) == 0) { + *fw_ops = &aq_fw_1x_ops; + } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_2X, + self->fw_ver_actual) == 0) { + *fw_ops = &aq_fw_2x_ops; + } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_3X, + self->fw_ver_actual) == 0) { + *fw_ops = &aq_fw_2x_ops; + } else { + aq_pr_err("Bad FW version detected: %x\n", + self->fw_ver_actual); + return -EOPNOTSUPP; + } + self->aq_fw_ops = *fw_ops; + err = self->aq_fw_ops->init(self); + return err; +} + +static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self) +{ + int k = 0; + u32 gsr; + + aq_hw_write_reg(self, 0x404, 0x40e1); + AQ_HW_SLEEP(50); + + /* Cleanup SPI */ + aq_hw_write_reg(self, 0x534, 0xA0); + aq_hw_write_reg(self, 0x100, 0x9F); + aq_hw_write_reg(self, 0x100, 0x809F); + + gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR); + aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000); + + /* Kickstart MAC */ + aq_hw_write_reg(self, 0x404, 0x80e0); + aq_hw_write_reg(self, 0x32a8, 0x0); + aq_hw_write_reg(self, 0x520, 0x1); + AQ_HW_SLEEP(10); + aq_hw_write_reg(self, 0x404, 0x180e0); + + for (k = 0; k < 1000; k++) { + u32 flb_status = aq_hw_read_reg(self, + HW_ATL_MPI_DAISY_CHAIN_STATUS); + + flb_status = flb_status & 0x10; + if (flb_status) + break; + AQ_HW_SLEEP(10); + } + if (k == 1000) { + aq_pr_err("MAC kickstart failed\n"); + return -EIO; + } + + /* FW reset */ + aq_hw_write_reg(self, 0x404, 0x80e0); + AQ_HW_SLEEP(50); + aq_hw_write_reg(self, 0x3a0, 0x1); + + /* Kickstart PHY - skipped */ + + /* Global software reset*/ + hw_atl_rx_rx_reg_res_dis_set(self, 0U); + hw_atl_tx_tx_reg_res_dis_set(self, 0U); + aq_hw_write_reg_bit(self, HW_ATL_MAC_PHY_CONTROL, + BIT(HW_ATL_MAC_PHY_MPI_RESET_BIT), + HW_ATL_MAC_PHY_MPI_RESET_BIT, 0x0); + gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR); + aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000); + + for (k = 0; k < 1000; k++) { + u32 fw_state = aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION); + + if (fw_state) + break; + AQ_HW_SLEEP(10); + } + if (k == 1000) { + aq_pr_err("FW kickstart failed\n"); + return -EIO; + } + + return 0; +} + +static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self) +{ + u32 gsr, rbl_status; + int k; + + aq_hw_write_reg(self, 0x404, 0x40e1); + aq_hw_write_reg(self, 0x3a0, 0x1); + aq_hw_write_reg(self, 0x32a8, 0x0); + + /* Alter RBL status */ + aq_hw_write_reg(self, 0x388, 0xDEAD); + + /* Global software reset*/ + hw_atl_rx_rx_reg_res_dis_set(self, 0U); + hw_atl_tx_tx_reg_res_dis_set(self, 0U); + aq_hw_write_reg_bit(self, HW_ATL_MAC_PHY_CONTROL, + BIT(HW_ATL_MAC_PHY_MPI_RESET_BIT), + HW_ATL_MAC_PHY_MPI_RESET_BIT, 0x0); + gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR); + aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, + (gsr & 0xFFFFBFFF) | 0x8000); + + if (FORCE_FLASHLESS) + aq_hw_write_reg(self, 0x534, 0x0); + + aq_hw_write_reg(self, 0x404, 0x40e0); + + /* Wait for RBL boot */ + for (k = 0; k < 1000; k++) { + rbl_status = aq_hw_read_reg(self, 0x388) & 0xFFFF; + if (rbl_status && rbl_status != 0xDEAD) + break; + AQ_HW_SLEEP(10); + } + if (!rbl_status || rbl_status == 0xDEAD) { + aq_pr_err("RBL Restart failed"); + return -EIO; + } + + /* Restore NVR */ + if (FORCE_FLASHLESS) + aq_hw_write_reg(self, 0x534, 0xA0); + + if (rbl_status == 0xF1A7) { + aq_pr_err("No FW detected. Dynamic FW load not implemented\n"); + return -ENOTSUPP; + } + + for (k = 0; k < 1000; k++) { + u32 fw_state = aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION); + + if (fw_state) + break; + AQ_HW_SLEEP(10); + } + if (k == 1000) { + aq_pr_err("FW kickstart failed\n"); + return -EIO; + } + + return 0; +} + +int hw_atl_utils_soft_reset(struct aq_hw_s *self) +{ + int k; + u32 boot_exit_code = 0; + + for (k = 0; k < 1000; ++k) { + u32 flb_status = aq_hw_read_reg(self, + HW_ATL_MPI_DAISY_CHAIN_STATUS); + boot_exit_code = aq_hw_read_reg(self, + HW_ATL_MPI_BOOT_EXIT_CODE); + if (flb_status != 0x06000000 || boot_exit_code != 0) + break; + } + + if (k == 1000) { + aq_pr_err("Neither RBL nor FLB firmware started\n"); + return -EOPNOTSUPP; + } + + self->rbl_enabled = (boot_exit_code != 0); + + if (self->rbl_enabled) + return hw_atl_utils_soft_reset_rbl(self); + else + return hw_atl_utils_soft_reset_flb(self); +} + +int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a, + u32 *p, u32 cnt) +{ + int err = 0; + + AQ_HW_WAIT_FOR(hw_atl_reg_glb_cpu_sem_get(self, + HW_ATL_FW_SM_RAM) == 1U, + 1U, 10000U); if (err < 0) { bool is_locked; - reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); - is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM); + hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); + is_locked = hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM); if (!is_locked) { err = -ETIME; goto err_exit; @@ -66,7 +269,7 @@ static int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a, *(p++) = aq_hw_read_reg(self, 0x0000020CU); } - reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); + hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); err_exit: return err; @@ -78,7 +281,7 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p, int err = 0; bool is_locked; - is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM); + is_locked = hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM); if (!is_locked) { err = -ETIME; goto err_exit; @@ -97,7 +300,7 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p, } } - reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); + hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); err_exit: return err; @@ -119,7 +322,7 @@ err_exit: } static int hw_atl_utils_init_ucp(struct aq_hw_s *self, - struct aq_hw_caps_s *aq_hw_caps) + const struct aq_hw_caps_s *aq_hw_caps) { int err = 0; @@ -133,20 +336,12 @@ static int hw_atl_utils_init_ucp(struct aq_hw_s *self, aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370); } - reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U); + hw_atl_reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U); /* check 10 times by 1ms */ - AQ_HW_WAIT_FOR(0U != (PHAL_ATLANTIC_A0->mbox_addr = + AQ_HW_WAIT_FOR(0U != (self->mbox_addr = aq_hw_read_reg(self, 0x360U)), 1000U, 10U); - err = hw_atl_utils_ver_match(aq_hw_caps->fw_ver_expected, - aq_hw_read_reg(self, 0x18U)); - - if (err < 0) - pr_err("%s: Bad FW version detected: expected=%x, actual=%x\n", - AQ_CFG_DRV_NAME, - aq_hw_caps->fw_ver_expected, - aq_hw_read_reg(self, 0x18U)); return err; } @@ -174,14 +369,14 @@ static int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size) err = -1; goto err_exit; } - err = hw_atl_utils_fw_upload_dwords(self, PHAL_ATLANTIC->rpc_addr, - (u32 *)(void *)&PHAL_ATLANTIC->rpc, + err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr, + (u32 *)(void *)&self->rpc, (rpc_size + sizeof(u32) - sizeof(u8)) / sizeof(u32)); if (err < 0) goto err_exit; - sw.tid = 0xFFFFU & (++PHAL_ATLANTIC->rpc_tid); + sw.tid = 0xFFFFU & (++self->rpc_tid); sw.len = (u16)rpc_size; aq_hw_write_reg(self, HW_ATL_RPC_CONTROL_ADR, sw.val); @@ -199,7 +394,7 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, do { sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR); - PHAL_ATLANTIC->rpc_tid = sw.tid; + self->rpc_tid = sw.tid; AQ_HW_WAIT_FOR(sw.tid == (fw.val = @@ -221,9 +416,9 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, if (fw.len) { err = hw_atl_utils_fw_downld_dwords(self, - PHAL_ATLANTIC->rpc_addr, + self->rpc_addr, (u32 *)(void *) - &PHAL_ATLANTIC->rpc, + &self->rpc, (fw.len + sizeof(u32) - sizeof(u8)) / sizeof(u32)); @@ -231,19 +426,18 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, goto err_exit; } - *rpc = &PHAL_ATLANTIC->rpc; + *rpc = &self->rpc; } err_exit: return err; } -static int hw_atl_utils_mpi_create(struct aq_hw_s *self, - struct aq_hw_caps_s *aq_hw_caps) +static int hw_atl_utils_mpi_create(struct aq_hw_s *self) { int err = 0; - err = hw_atl_utils_init_ucp(self, aq_hw_caps); + err = hw_atl_utils_init_ucp(self, self->aq_nic_cfg->aq_hw_caps); if (err < 0) goto err_exit; @@ -259,7 +453,7 @@ int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self, struct hw_aq_atl_utils_mbox_header *pmbox) { return hw_atl_utils_fw_downld_dwords(self, - PHAL_ATLANTIC->mbox_addr, + self->mbox_addr, (u32 *)(void *)pmbox, sizeof(*pmbox) / sizeof(u32)); } @@ -270,7 +464,7 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, int err = 0; err = hw_atl_utils_fw_downld_dwords(self, - PHAL_ATLANTIC->mbox_addr, + self->mbox_addr, (u32 *)(void *)pmbox, sizeof(*pmbox) / sizeof(u32)); if (err < 0) @@ -281,27 +475,27 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, self->aq_nic_cfg->mtu : 1514U; pmbox->stats.ubrc = pmbox->stats.uprc * mtu; pmbox->stats.ubtc = pmbox->stats.uptc * mtu; - pmbox->stats.dpc = atomic_read(&PHAL_ATLANTIC_A0->dpc); + pmbox->stats.dpc = atomic_read(&self->dpc); } else { - pmbox->stats.dpc = reg_rx_dma_stat_counter7get(self); + pmbox->stats.dpc = hw_atl_reg_rx_dma_stat_counter7get(self); } err_exit:; } -int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed, - enum hal_atl_utils_fw_state_e state) +int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed) { - u32 ucp_0x368 = 0; + u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR); - ucp_0x368 = (speed << HW_ATL_MPI_SPEED_SHIFT) | state; - aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, ucp_0x368); + val = (val & HW_ATL_MPI_STATE_MSK) | (speed << HW_ATL_MPI_SPEED_SHIFT); + aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val); return 0; } void hw_atl_utils_mpi_set(struct aq_hw_s *self, - enum hal_atl_utils_fw_state_e state, u32 speed) + enum hal_atl_utils_fw_state_e state, + u32 speed) { int err = 0; u32 transaction_id = 0; @@ -320,11 +514,22 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self, goto err_exit; } - err = hw_atl_utils_mpi_set_speed(self, speed, state); + aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, + (speed << HW_ATL_MPI_SPEED_SHIFT) | state); err_exit:; } +static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self, + enum hal_atl_utils_fw_state_e state) +{ + u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR); + + val = state | (val & HW_ATL_MPI_SPEED_MSK); + aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val); + return 0; +} + int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self) { u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR); @@ -365,7 +570,6 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self) } int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self, - struct aq_hw_caps_s *aq_hw_caps, u8 *mac) { int err = 0; @@ -373,15 +577,6 @@ int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self, u32 l = 0U; u32 mac_addr[2]; - self->mmio = aq_pci_func_get_mmio(self->aq_pci_func); - - hw_atl_utils_hw_chip_features_init(self, - &PHAL_ATLANTIC_A0->chip_features); - - err = hw_atl_utils_mpi_create(self, aq_hw_caps); - if (err < 0) - goto err_exit; - if (!aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) { unsigned int rnd = 0; unsigned int ucp_0x370 = 0; @@ -396,7 +591,7 @@ int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self, aq_hw_read_reg(self, 0x00000374U) + (40U * 4U), mac_addr, - AQ_DIMOF(mac_addr)); + ARRAY_SIZE(mac_addr)); if (err < 0) { mac_addr[0] = 0U; mac_addr[1] = 0U; @@ -427,7 +622,6 @@ int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self, mac[0] = (u8)(0xFFU & h); } -err_exit: return err; } @@ -465,7 +659,7 @@ unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps) void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p) { u32 chip_features = 0U; - u32 val = reg_glb_mif_id_get(self); + u32 val = hw_atl_reg_glb_mif_id_get(self); u32 mif_rev = val & 0xFFU; if ((3U & mif_rev) == 1U) { @@ -500,13 +694,13 @@ int hw_atl_utils_hw_set_power(struct aq_hw_s *self, int hw_atl_utils_update_stats(struct aq_hw_s *self) { - struct hw_atl_s *hw_self = PHAL_ATLANTIC; struct hw_aq_atl_utils_mbox mbox; hw_atl_utils_mpi_read_stats(self, &mbox); -#define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \ - mbox.stats._N_ - hw_self->last_stats._N_) +#define AQ_SDELTA(_N_) (self->curr_stats._N_ += \ + mbox.stats._N_ - self->last_stats._N_) + if (self->aq_link_status.mbps) { AQ_SDELTA(uprc); AQ_SDELTA(mprc); @@ -527,19 +721,19 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self) AQ_SDELTA(dpc); } #undef AQ_SDELTA - hw_self->curr_stats.dma_pkt_rc = stats_rx_dma_good_pkt_counterlsw_get(self); - hw_self->curr_stats.dma_pkt_tc = stats_tx_dma_good_pkt_counterlsw_get(self); - hw_self->curr_stats.dma_oct_rc = stats_rx_dma_good_octet_counterlsw_get(self); - hw_self->curr_stats.dma_oct_tc = stats_tx_dma_good_octet_counterlsw_get(self); + self->curr_stats.dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counterlsw_get(self); + self->curr_stats.dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counterlsw_get(self); + self->curr_stats.dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counterlsw_get(self); + self->curr_stats.dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counterlsw_get(self); - memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats)); + memcpy(&self->last_stats, &mbox.stats, sizeof(mbox.stats)); return 0; } struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self) { - return &PHAL_ATLANTIC->curr_stats; + return &self->curr_stats; } static const u32 hw_atl_utils_hw_mac_regs[] = { @@ -568,14 +762,14 @@ static const u32 hw_atl_utils_hw_mac_regs[] = { }; int hw_atl_utils_hw_get_regs(struct aq_hw_s *self, - struct aq_hw_caps_s *aq_hw_caps, + const struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff) { unsigned int i = 0U; for (i = 0; i < aq_hw_caps->mac_regs_count; i++) regs_buff[i] = aq_hw_read_reg(self, - hw_atl_utils_hw_mac_regs[i]); + hw_atl_utils_hw_mac_regs[i]); return 0; } @@ -584,3 +778,13 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version) *fw_version = aq_hw_read_reg(self, 0x18U); return 0; } + +const struct aq_fw_ops aq_fw_1x_ops = { + .init = hw_atl_utils_mpi_create, + .reset = NULL, + .get_mac_permanent = hw_atl_utils_get_mac_permanent, + .set_link_speed = hw_atl_utils_mpi_set_speed, + .set_state = hw_atl_utils_mpi_set_state, + .update_link_status = hw_atl_utils_mpi_get_link_status, + .update_stats = hw_atl_utils_update_stats, +}; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h index 21aeca6908d3..2c690947910a 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h @@ -14,10 +14,39 @@ #ifndef HW_ATL_UTILS_H #define HW_ATL_UTILS_H -#include "../aq_common.h" - #define HW_ATL_FLUSH() { (void)aq_hw_read_reg(self, 0x10); } +/* Hardware tx descriptor */ +struct __packed hw_atl_txd_s { + u64 buf_addr; + u32 ctl; + u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */ +}; + +/* Hardware tx context descriptor */ +struct __packed hw_atl_txc_s { + u32 rsvd; + u32 len; + u32 ctl; + u32 len2; +}; + +/* Hardware rx descriptor */ +struct __packed hw_atl_rxd_s { + u64 buf_addr; + u64 hdr_addr; +}; + +/* Hardware rx descriptor writeback */ +struct __packed hw_atl_rxd_wb_s { + u32 type; + u32 rss_hash; + u16 status; + u16 pkt_len; + u16 next_desc_ptr; + u16 vlan; +}; + struct __packed hw_atl_stats_s { u32 uprc; u32 mprc; @@ -126,26 +155,6 @@ struct __packed hw_aq_atl_utils_mbox { struct hw_atl_stats_s stats; }; -struct __packed hw_atl_s { - struct aq_hw_s base; - struct hw_atl_stats_s last_stats; - struct aq_stats_s curr_stats; - u64 speed; - unsigned int chip_features; - u32 fw_ver_actual; - atomic_t dpc; - u32 mbox_addr; - u32 rpc_addr; - u32 rpc_tid; - struct hw_aq_atl_utils_fw_rpc rpc; -}; - -#define SELF ((struct hw_atl_s *)self) - -#define PHAL_ATLANTIC ((struct hw_atl_s *)((void *)(self))) -#define PHAL_ATLANTIC_A0 ((struct hw_atl_s *)((void *)(self))) -#define PHAL_ATLANTIC_B0 ((struct hw_atl_s *)((void *)(self))) - #define HAL_ATLANTIC_UTILS_CHIP_MIPS 0x00000001U #define HAL_ATLANTIC_UTILS_CHIP_TPO2 0x00000002U #define HAL_ATLANTIC_UTILS_CHIP_RPF2 0x00000004U @@ -154,7 +163,7 @@ struct __packed hw_atl_s { #define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 0x02000000U #define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \ - PHAL_ATLANTIC->chip_features) + self->chip_features) enum hal_atl_utils_fw_state_e { MPI_DEINIT = 0, @@ -171,6 +180,73 @@ enum hal_atl_utils_fw_state_e { #define HAL_ATLANTIC_RATE_100M BIT(5) #define HAL_ATLANTIC_RATE_INVALID BIT(6) +enum hw_atl_fw2x_rate { + FW2X_RATE_100M = 0x20, + FW2X_RATE_1G = 0x100, + FW2X_RATE_2G5 = 0x200, + FW2X_RATE_5G = 0x400, + FW2X_RATE_10G = 0x800, +}; + +enum hw_atl_fw2x_caps_lo { + CAPS_LO_10BASET_HD = 0x00, + CAPS_LO_10BASET_FD, + CAPS_LO_100BASETX_HD, + CAPS_LO_100BASET4_HD, + CAPS_LO_100BASET2_HD, + CAPS_LO_100BASETX_FD, + CAPS_LO_100BASET2_FD, + CAPS_LO_1000BASET_HD, + CAPS_LO_1000BASET_FD, + CAPS_LO_2P5GBASET_FD, + CAPS_LO_5GBASET_FD, + CAPS_LO_10GBASET_FD, +}; + +enum hw_atl_fw2x_caps_hi { + CAPS_HI_RESERVED1 = 0x00, + CAPS_HI_10BASET_EEE, + CAPS_HI_RESERVED2, + CAPS_HI_PAUSE, + CAPS_HI_ASYMMETRIC_PAUSE, + CAPS_HI_100BASETX_EEE, + CAPS_HI_RESERVED3, + CAPS_HI_RESERVED4, + CAPS_HI_1000BASET_FD_EEE, + CAPS_HI_2P5GBASET_FD_EEE, + CAPS_HI_5GBASET_FD_EEE, + CAPS_HI_10GBASET_FD_EEE, + CAPS_HI_RESERVED5, + CAPS_HI_RESERVED6, + CAPS_HI_RESERVED7, + CAPS_HI_RESERVED8, + CAPS_HI_RESERVED9, + CAPS_HI_CABLE_DIAG, + CAPS_HI_TEMPERATURE, + CAPS_HI_DOWNSHIFT, + CAPS_HI_PTP_AVB_EN, + CAPS_HI_MEDIA_DETECT, + CAPS_HI_LINK_DROP, + CAPS_HI_SLEEP_PROXY, + CAPS_HI_WOL, + CAPS_HI_MAC_STOP, + CAPS_HI_EXT_LOOPBACK, + CAPS_HI_INT_LOOPBACK, + CAPS_HI_EFUSE_AGENT, + CAPS_HI_WOL_TIMER, + CAPS_HI_STATISTICS, + CAPS_HI_TRANSACTION_ID, +}; + +struct aq_hw_s; +struct aq_fw_ops; +struct aq_hw_caps_s; +struct aq_hw_link_status_s; + +int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops); + +int hw_atl_utils_soft_reset(struct aq_hw_s *self); + void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p); int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self, @@ -183,19 +259,15 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state, u32 speed); -int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed, - enum hal_atl_utils_fw_state_e state); - int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self); int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self, - struct aq_hw_caps_s *aq_hw_caps, u8 *mac); unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps); int hw_atl_utils_hw_get_regs(struct aq_hw_s *self, - struct aq_hw_caps_s *aq_hw_caps, + const struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff); int hw_atl_utils_hw_set_power(struct aq_hw_s *self, @@ -208,5 +280,10 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version); int hw_atl_utils_update_stats(struct aq_hw_s *self); struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self); +int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a, + u32 *p, u32 cnt); + +extern const struct aq_fw_ops aq_fw_1x_ops; +extern const struct aq_fw_ops aq_fw_2x_ops; #endif /* HW_ATL_UTILS_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c new file mode 100644 index 000000000000..8cfce95c82fc --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c @@ -0,0 +1,184 @@ +/* + * aQuantia Corporation Network Driver + * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +/* File hw_atl_utils_fw2x.c: Definition of firmware 2.x functions for + * Atlantic hardware abstraction layer. + */ + +#include "../aq_hw.h" +#include "../aq_hw_utils.h" +#include "../aq_pci_func.h" +#include "../aq_ring.h" +#include "../aq_vec.h" +#include "hw_atl_utils.h" +#include "hw_atl_llh.h" + +#define HW_ATL_FW2X_MPI_EFUSE_ADDR 0x364 +#define HW_ATL_FW2X_MPI_MBOX_ADDR 0x360 + +#define HW_ATL_FW2X_MPI_CONTROL_ADDR 0x368 +#define HW_ATL_FW2X_MPI_CONTROL2_ADDR 0x36C + +#define HW_ATL_FW2X_MPI_STATE_ADDR 0x370 +#define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374 + +static int aq_fw2x_init(struct aq_hw_s *self) +{ + int err = 0; + + /* check 10 times by 1ms */ + AQ_HW_WAIT_FOR(0U != (self->mbox_addr = + aq_hw_read_reg(self, HW_ATL_FW2X_MPI_MBOX_ADDR)), + 1000U, 10U); + return err; +} + +static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed) +{ + enum hw_atl_fw2x_rate rate = 0; + + if (speed & AQ_NIC_RATE_10G) + rate |= FW2X_RATE_10G; + + if (speed & AQ_NIC_RATE_5G) + rate |= FW2X_RATE_5G; + + if (speed & AQ_NIC_RATE_5GSR) + rate |= FW2X_RATE_5G; + + if (speed & AQ_NIC_RATE_2GS) + rate |= FW2X_RATE_2G5; + + if (speed & AQ_NIC_RATE_1G) + rate |= FW2X_RATE_1G; + + if (speed & AQ_NIC_RATE_100M) + rate |= FW2X_RATE_100M; + + return rate; +} + +static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed) +{ + u32 val = link_speed_mask_2fw2x_ratemask(speed); + + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL_ADDR, val); + + return 0; +} + +static int aq_fw2x_set_state(struct aq_hw_s *self, + enum hal_atl_utils_fw_state_e state) +{ + /* No explicit state in 2x fw */ + return 0; +} + +static int aq_fw2x_update_link_status(struct aq_hw_s *self) +{ + u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR); + u32 speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G | + FW2X_RATE_2G5 | FW2X_RATE_5G | FW2X_RATE_10G); + struct aq_hw_link_status_s *link_status = &self->aq_link_status; + + if (speed) { + if (speed & FW2X_RATE_10G) + link_status->mbps = 10000; + else if (speed & FW2X_RATE_5G) + link_status->mbps = 5000; + else if (speed & FW2X_RATE_2G5) + link_status->mbps = 2500; + else if (speed & FW2X_RATE_1G) + link_status->mbps = 1000; + else if (speed & FW2X_RATE_100M) + link_status->mbps = 100; + else + link_status->mbps = 10000; + } else { + link_status->mbps = 0; + } + + return 0; +} + +int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac) +{ + int err = 0; + u32 h = 0U; + u32 l = 0U; + u32 mac_addr[2] = { 0 }; + u32 efuse_addr = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_EFUSE_ADDR); + + if (efuse_addr != 0) { + err = hw_atl_utils_fw_downld_dwords(self, + efuse_addr + (40U * 4U), + mac_addr, + ARRAY_SIZE(mac_addr)); + if (err) + return err; + mac_addr[0] = __swab32(mac_addr[0]); + mac_addr[1] = __swab32(mac_addr[1]); + } + + ether_addr_copy(mac, (u8 *)mac_addr); + + if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) { + unsigned int rnd = 0; + + get_random_bytes(&rnd, sizeof(unsigned int)); + + l = 0xE3000000U + | (0xFFFFU & rnd) + | (0x00 << 16); + h = 0x8001300EU; + + mac[5] = (u8)(0xFFU & l); + l >>= 8; + mac[4] = (u8)(0xFFU & l); + l >>= 8; + mac[3] = (u8)(0xFFU & l); + l >>= 8; + mac[2] = (u8)(0xFFU & l); + mac[1] = (u8)(0xFFU & h); + h >>= 8; + mac[0] = (u8)(0xFFU & h); + } + return err; +} + +static int aq_fw2x_update_stats(struct aq_hw_s *self) +{ + int err = 0; + u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + u32 orig_stats_val = mpi_opts & BIT(CAPS_HI_STATISTICS); + + /* Toggle statistics bit for FW to update */ + mpi_opts = mpi_opts ^ BIT(CAPS_HI_STATISTICS); + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + + /* Wait FW to report back */ + AQ_HW_WAIT_FOR(orig_stats_val != + (aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) & + BIT(CAPS_HI_STATISTICS)), + 1U, 10000U); + if (err) + return err; + + return hw_atl_utils_update_stats(self); +} + +const struct aq_fw_ops aq_fw_2x_ops = { + .init = aq_fw2x_init, + .reset = NULL, + .get_mac_permanent = aq_fw2x_get_mac_permanent, + .set_link_speed = aq_fw2x_set_link_speed, + .set_state = aq_fw2x_set_state, + .update_link_status = aq_fw2x_update_link_status, + .update_stats = aq_fw2x_update_stats, +}; diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h index 9009f2651e70..5265b937677b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/ver.h +++ b/drivers/net/ethernet/aquantia/atlantic/ver.h @@ -10,9 +10,9 @@ #ifndef VER_H #define VER_H -#define NIC_MAJOR_DRIVER_VERSION 1 -#define NIC_MINOR_DRIVER_VERSION 6 -#define NIC_BUILD_DRIVER_VERSION 13 +#define NIC_MAJOR_DRIVER_VERSION 2 +#define NIC_MINOR_DRIVER_VERSION 0 +#define NIC_BUILD_DRIVER_VERSION 2 #define NIC_REVISION_DRIVER_VERSION 0 #define AQ_CFG_DRV_VERSION_SUFFIX "-kern" diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index d9346e2ac720..14a59e51db67 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1716,7 +1716,6 @@ static int bcm_enet_probe(struct platform_device *pdev) struct bcm63xx_enet_platform_data *pd; struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx; struct mii_bus *bus; - const char *clk_name; int i, ret; if (!bcm_enet_shared_base[0]) @@ -1751,20 +1750,8 @@ static int bcm_enet_probe(struct platform_device *pdev) dev->irq = priv->irq = res_irq->start; priv->irq_rx = res_irq_rx->start; priv->irq_tx = res_irq_tx->start; - priv->mac_id = pdev->id; - /* get rx & tx dma channel id for this mac */ - if (priv->mac_id == 0) { - priv->rx_chan = 0; - priv->tx_chan = 1; - clk_name = "enet0"; - } else { - priv->rx_chan = 2; - priv->tx_chan = 3; - clk_name = "enet1"; - } - - priv->mac_clk = devm_clk_get(&pdev->dev, clk_name); + priv->mac_clk = devm_clk_get(&pdev->dev, "enet"); if (IS_ERR(priv->mac_clk)) { ret = PTR_ERR(priv->mac_clk); goto out; @@ -1795,9 +1782,11 @@ static int bcm_enet_probe(struct platform_device *pdev) priv->dma_chan_width = pd->dma_chan_width; priv->dma_has_sram = pd->dma_has_sram; priv->dma_desc_shift = pd->dma_desc_shift; + priv->rx_chan = pd->rx_chan; + priv->tx_chan = pd->tx_chan; } - if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) { + if (priv->has_phy && !priv->use_external_mii) { /* using internal PHY, enable clock */ priv->phy_clk = devm_clk_get(&pdev->dev, "ephy"); if (IS_ERR(priv->phy_clk)) { @@ -1828,7 +1817,7 @@ static int bcm_enet_probe(struct platform_device *pdev) bus->priv = priv; bus->read = bcm_enet_mdio_read_phylib; bus->write = bcm_enet_mdio_write_phylib; - sprintf(bus->id, "%s-%d", pdev->name, priv->mac_id); + sprintf(bus->id, "%s-%d", pdev->name, pdev->id); /* only probe bus where we think the PHY is, because * the mdio read operation return 0 instead of 0xffff @@ -2139,27 +2128,25 @@ static int bcm_enetsw_open(struct net_device *dev) /* allocate rx dma ring */ size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); - p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); + p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); if (!p) { dev_err(kdev, "cannot allocate rx ring %u\n", size); ret = -ENOMEM; goto out_freeirq_tx; } - memset(p, 0, size); priv->rx_desc_alloc_size = size; priv->rx_desc_cpu = p; /* allocate tx dma ring */ size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); - p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); + p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); if (!p) { dev_err(kdev, "cannot allocate tx ring\n"); ret = -ENOMEM; goto out_free_rx_ring; } - memset(p, 0, size); priv->tx_desc_alloc_size = size; priv->tx_desc_cpu = p; diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h index 5a66728d4776..1d3c917eb830 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h @@ -193,9 +193,6 @@ struct bcm_enet_mib_counters { struct bcm_enet_priv { - /* mac id (from platform device id) */ - int mac_id; - /* base remapped address of device */ void __iomem *base; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 087f01b4dc3a..f15a8fc6dfc9 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1216,18 +1216,6 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, goto out; } - /* The Ethernet switch we are interfaced with needs packets to be at - * least 64 bytes (including FCS) otherwise they will be discarded when - * they enter the switch port logic. When Broadcom tags are enabled, we - * need to make sure that packets are at least 68 bytes - * (including FCS and tag) because the length verification is done after - * the Broadcom tag is stripped off the ingress packet. - */ - if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) { - ret = NETDEV_TX_OK; - goto out; - } - /* Insert TSB and checksum infos */ if (priv->tsb_en) { skb = bcm_sysport_insert_tsb(skb, dev); diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 1d96cd594ade..8eef9fb6b1fe 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -128,8 +128,6 @@ bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring, dma_desc->ctl1 = cpu_to_le32(ctl1); } -#define ENET_BRCM_TAG_LEN 4 - static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, struct bgmac_dma_ring *ring, struct sk_buff *skb) @@ -142,18 +140,6 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, u32 flags; int i; - /* The Ethernet switch we are interfaced with needs packets to be at - * least 64 bytes (including FCS) otherwise they will be discarded when - * they enter the switch port logic. When Broadcom tags are enabled, we - * need to make sure that packets are at least 68 bytes - * (including FCS and tag) because the length verification is done after - * the Broadcom tag is stripped off the ingress packet. - */ - if (netdev_uses_dsa(net_dev)) { - if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) - goto err_stats; - } - if (skb->len > BGMAC_DESC_CTL1_LEN) { netdev_err(bgmac->net_dev, "Too long skb (%d)\n", skb->len); goto err_drop; @@ -240,7 +226,6 @@ err_dma_head: err_drop: dev_kfree_skb(skb); -err_stats: net_dev->stats.tx_dropped++; net_dev->stats.tx_errors++; return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 7919f6112ecf..5e34b34f7740 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -5818,8 +5818,8 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) struct l2_fhdr *rx_hdr; int ret = -ENODEV; struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi; - struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; - struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; + struct bnx2_tx_ring_info *txr; + struct bnx2_rx_ring_info *rxr; tx_napi = bnapi; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 8ae269ec17a1..d7c98e807ca8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -738,8 +738,9 @@ static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp, bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum); break; default: - WARN_ONCE(1, "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n", - be16_to_cpu(skb->protocol)); + netdev_WARN_ONCE(bp->dev, + "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n", + be16_to_cpu(skb->protocol)); } } #endif @@ -2482,8 +2483,7 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index) */ if (bp->dev->features & NETIF_F_LRO) fp->mode = TPA_MODE_LRO; - else if (bp->dev->features & NETIF_F_GRO && - bnx2x_mtu_allows_gro(bp->dev->mtu)) + else if (bp->dev->features & NETIF_F_GRO_HW) fp->mode = TPA_MODE_GRO; else fp->mode = TPA_MODE_DISABLED; @@ -4874,6 +4874,9 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu) */ dev->mtu = new_mtu; + if (!bnx2x_mtu_allows_gro(new_mtu)) + dev->features &= ~NETIF_F_GRO_HW; + if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg)) SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS); @@ -4903,10 +4906,13 @@ netdev_features_t bnx2x_fix_features(struct net_device *dev, } /* TPA requires Rx CSUM offloading */ - if (!(features & NETIF_F_RXCSUM)) { + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; + + if (!(features & NETIF_F_GRO) || !bnx2x_mtu_allows_gro(dev->mtu)) + features &= ~NETIF_F_GRO_HW; + if (features & NETIF_F_GRO_HW) features &= ~NETIF_F_LRO; - features &= ~NETIF_F_GRO; - } return features; } @@ -4933,13 +4939,8 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features) } } - /* if GRO is changed while LRO is enabled, don't force a reload */ - if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO)) - changes &= ~NETIF_F_GRO; - - /* if GRO is changed while HW TPA is off, don't force a reload */ - if ((changes & NETIF_F_GRO) && bp->disable_tpa) - changes &= ~NETIF_F_GRO; + /* Don't care about GRO changes */ + changes &= ~NETIF_F_GRO; if (changes) bnx2x_reload = true; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index ddd5d3ebd201..7b08323e3f3d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -12409,8 +12409,8 @@ static int bnx2x_init_bp(struct bnx2x *bp) /* Set TPA flags */ if (bp->disable_tpa) { - bp->dev->hw_features &= ~NETIF_F_LRO; - bp->dev->features &= ~NETIF_F_LRO; + bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); + bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); } if (CHIP_IS_E1(bp)) @@ -13282,7 +13282,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | - NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | + NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX; if (!chip_is_e1x) { dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | @@ -13318,6 +13318,8 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX; dev->features |= NETIF_F_HIGHDMA; + if (dev->features & NETIF_F_LRO) + dev->features &= ~NETIF_F_GRO_HW; /* Add Loopback capability to the device */ dev->hw_features |= NETIF_F_LOOPBACK; diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile index 59c8ec9c1cad..7c560d545c03 100644 --- a/drivers/net/ethernet/broadcom/bnxt/Makefile +++ b/drivers/net/ethernet/broadcom/bnxt/Makefile @@ -1,4 +1,4 @@ obj-$(CONFIG_BNXT) += bnxt_en.o -bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o +bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 61ca4eb7c6fa..1500243b9886 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1,7 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation - * Copyright (c) 2016-2017 Broadcom Limited + * Copyright (c) 2016-2018 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -107,6 +107,7 @@ enum board_idx { BCM57416_NPAR, BCM57452, BCM57454, + BCM5745x_NPAR, BCM58802, BCM58804, BCM58808, @@ -147,6 +148,7 @@ static const struct { [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, + [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" }, [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, @@ -156,6 +158,8 @@ static const struct { }; static const struct pci_device_id bnxt_pci_tbl[] = { + { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR }, { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR }, { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, @@ -209,6 +213,7 @@ MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl); static const u16 bnxt_vf_req_snif[] = { HWRM_FUNC_CFG, + HWRM_FUNC_VF_CFG, HWRM_PORT_PHY_QCFG, HWRM_CFA_L2_FILTER_ALLOC, }; @@ -1510,7 +1515,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, (struct rx_tpa_start_cmp_ext *)rxcmp1); *event |= BNXT_RX_EVENT; - goto next_rx_no_prod; + goto next_rx_no_prod_no_len; } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons, @@ -1526,7 +1531,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, rc = 1; } *event |= BNXT_RX_EVENT; - goto next_rx_no_prod; + goto next_rx_no_prod_no_len; } cons = rxcmp->rx_cmp_opaque; @@ -1644,7 +1649,10 @@ next_rx: rxr->rx_prod = NEXT_RX(prod); rxr->rx_next_cons = NEXT_RX(cons); -next_rx_no_prod: + cpr->rx_packets += 1; + cpr->rx_bytes += len; + +next_rx_no_prod_no_len: *raw_cons = tmp_raw_cons; return rc; @@ -1706,12 +1714,16 @@ static int bnxt_async_event_process(struct bnxt *bp, if (BNXT_VF(bp)) goto async_event_process_exit; - if (data1 & 0x20000) { + + /* print unsupported speed warning in forced speed mode only */ + if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) && + (data1 & 0x20000)) { u16 fw_speed = link_info->force_link_speed; u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); - netdev_warn(bp->dev, "Link speed %d no longer supported\n", - speed); + if (speed != SPEED_UNKNOWN) + netdev_warn(bp->dev, "Link speed %d no longer supported\n", + speed); } set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); /* fall thru */ @@ -1798,6 +1810,7 @@ static irqreturn_t bnxt_msix(int irq, void *dev_instance) struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; u32 cons = RING_CMP(cpr->cp_raw_cons); + cpr->event_ctr++; prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); napi_schedule(&bnapi->napi); return IRQ_HANDLED; @@ -2021,6 +2034,15 @@ static int bnxt_poll(struct napi_struct *napi, int budget) break; } } + if (bp->flags & BNXT_FLAG_DIM) { + struct net_dim_sample dim_sample; + + net_dim_sample(cpr->event_ctr, + cpr->rx_packets, + cpr->rx_bytes, + &dim_sample); + net_dim(&cpr->dim, dim_sample); + } mmiowb(); return work_done; } @@ -2243,6 +2265,9 @@ static void bnxt_free_rx_rings(struct bnxt *bp) if (rxr->xdp_prog) bpf_prog_put(rxr->xdp_prog); + if (xdp_rxq_info_is_reg(&rxr->xdp_rxq)) + xdp_rxq_info_unreg(&rxr->xdp_rxq); + kfree(rxr->rx_tpa); rxr->rx_tpa = NULL; @@ -2276,6 +2301,10 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp) ring = &rxr->rx_ring_struct; + rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i); + if (rc < 0) + return rc; + rc = bnxt_alloc_ring(bp, ring); if (rc) return rc; @@ -2606,6 +2635,8 @@ static void bnxt_init_cp_rings(struct bnxt *bp) struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; ring->fw_ring_id = INVALID_HW_RING_ID; + cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; + cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; } } @@ -2751,7 +2782,7 @@ void bnxt_set_tpa_flags(struct bnxt *bp) return; if (bp->dev->features & NETIF_F_LRO) bp->flags |= BNXT_FLAG_LRO; - if (bp->dev->features & NETIF_F_GRO) + else if (bp->dev->features & NETIF_F_GRO_HW) bp->flags |= BNXT_FLAG_GRO; } @@ -2830,6 +2861,9 @@ void bnxt_set_ring_params(struct bnxt *bp) bp->cp_ring_mask = bp->cp_bit - 1; } +/* Changing allocation mode of RX rings. + * TODO: Update when extending xdp_rxq_info to support allocation modes. + */ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) { if (page_mode) { @@ -2839,10 +2873,10 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); bp->flags &= ~BNXT_FLAG_AGG_RINGS; bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE; - bp->dev->hw_features &= ~NETIF_F_LRO; - bp->dev->features &= ~NETIF_F_LRO; bp->rx_dir = DMA_BIDIRECTIONAL; bp->rx_skb_func = bnxt_rx_page_skb; + /* Disable LRO or GRO_HW */ + netdev_update_features(bp->dev); } else { bp->dev->max_mtu = bp->max_mtu; bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; @@ -4469,6 +4503,42 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) } } +static int bnxt_hwrm_get_rings(struct bnxt *bp) +{ + struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + struct hwrm_func_qcfg_input req = {0}; + int rc; + + if (bp->hwrm_spec_code < 0x10601) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); + req.fid = cpu_to_le16(0xffff); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) { + mutex_unlock(&bp->hwrm_cmd_lock); + return -EIO; + } + + hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings); + if (bp->flags & BNXT_FLAG_NEW_RM) { + u16 cp, stats; + + hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings); + hw_resc->resv_hw_ring_grps = + le32_to_cpu(resp->alloc_hw_ring_grps); + hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics); + cp = le16_to_cpu(resp->alloc_cmpl_rings); + stats = le16_to_cpu(resp->alloc_stat_ctx); + cp = min_t(u16, cp, stats); + hw_resc->resv_cp_rings = cp; + } + mutex_unlock(&bp->hwrm_cmd_lock); + return 0; +} + /* Caller must hold bp->hwrm_cmd_lock */ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) { @@ -4488,55 +4558,283 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) return rc; } -static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings) +static int +bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, + int ring_grps, int cp_rings, int vnics) { struct hwrm_func_cfg_input req = {0}; + u32 enables = 0; int rc; - if (bp->hwrm_spec_code < 0x10601) + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.fid = cpu_to_le16(0xffff); + enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; + req.num_tx_rings = cpu_to_le16(tx_rings); + if (bp->flags & BNXT_FLAG_NEW_RM) { + enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; + enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; + enables |= ring_grps ? + FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; + enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; + + req.num_rx_rings = cpu_to_le16(rx_rings); + req.num_hw_ring_grps = cpu_to_le16(ring_grps); + req.num_cmpl_rings = cpu_to_le16(cp_rings); + req.num_stat_ctxs = req.num_cmpl_rings; + req.num_vnics = cpu_to_le16(vnics); + } + if (!enables) return 0; - if (BNXT_VF(bp)) + req.enables = cpu_to_le32(enables); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + return -ENOMEM; + + if (bp->hwrm_spec_code < 0x10601) + bp->hw_resc.resv_tx_rings = tx_rings; + + rc = bnxt_hwrm_get_rings(bp); + return rc; +} + +static int +bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, + int ring_grps, int cp_rings, int vnics) +{ + struct hwrm_func_vf_cfg_input req = {0}; + u32 enables = 0; + int rc; + + if (!(bp->flags & BNXT_FLAG_NEW_RM)) { + bp->hw_resc.resv_tx_rings = tx_rings; return 0; + } - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(0xffff); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS); - req.num_tx_rings = cpu_to_le16(*tx_rings); + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); + enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; + enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; + enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS | + FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; + enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; + enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; + + req.num_tx_rings = cpu_to_le16(tx_rings); + req.num_rx_rings = cpu_to_le16(rx_rings); + req.num_hw_ring_grps = cpu_to_le16(ring_grps); + req.num_cmpl_rings = cpu_to_le16(cp_rings); + req.num_stat_ctxs = req.num_cmpl_rings; + req.num_vnics = cpu_to_le16(vnics); + + req.enables = cpu_to_le32(enables); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) + return -ENOMEM; + + rc = bnxt_hwrm_get_rings(bp); + return rc; +} + +static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp, + int cp, int vnic) +{ + if (BNXT_PF(bp)) + return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, vnic); + else + return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic); +} + +static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, + bool shared); + +static int __bnxt_reserve_rings(struct bnxt *bp) +{ + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + int tx = bp->tx_nr_rings; + int rx = bp->rx_nr_rings; + int cp = bp->cp_nr_rings; + int grp, rx_rings, rc; + bool sh = false; + int vnic = 1; + + if (bp->hwrm_spec_code < 0x10601) + return 0; + + if (bp->flags & BNXT_FLAG_SHARED_RINGS) + sh = true; + if (bp->flags & BNXT_FLAG_RFS) + vnic = rx + 1; + if (bp->flags & BNXT_FLAG_AGG_RINGS) + rx <<= 1; + + grp = bp->rx_nr_rings; + if (tx == hw_resc->resv_tx_rings && + (!(bp->flags & BNXT_FLAG_NEW_RM) || + (rx == hw_resc->resv_rx_rings && + grp == hw_resc->resv_hw_ring_grps && + cp == hw_resc->resv_cp_rings && vnic == hw_resc->resv_vnics))) + return 0; + + rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, vnic); + if (rc) return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings); - mutex_unlock(&bp->hwrm_cmd_lock); - if (!rc) - bp->tx_reserved_rings = *tx_rings; + tx = hw_resc->resv_tx_rings; + if (bp->flags & BNXT_FLAG_NEW_RM) { + rx = hw_resc->resv_rx_rings; + cp = hw_resc->resv_cp_rings; + grp = hw_resc->resv_hw_ring_grps; + vnic = hw_resc->resv_vnics; + } + + rx_rings = rx; + if (bp->flags & BNXT_FLAG_AGG_RINGS) { + if (rx >= 2) { + rx_rings = rx >> 1; + } else { + if (netif_running(bp->dev)) + return -ENOMEM; + + bp->flags &= ~BNXT_FLAG_AGG_RINGS; + bp->flags |= BNXT_FLAG_NO_AGG_RINGS; + bp->dev->hw_features &= ~NETIF_F_LRO; + bp->dev->features &= ~NETIF_F_LRO; + bnxt_set_ring_params(bp); + } + } + rx_rings = min_t(int, rx_rings, grp); + rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh); + if (bp->flags & BNXT_FLAG_AGG_RINGS) + rx = rx_rings << 1; + cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings; + bp->tx_nr_rings = tx; + bp->rx_nr_rings = rx_rings; + bp->cp_nr_rings = cp; + + if (!tx || !rx || !cp || !grp || !vnic) + return -ENOMEM; + return rc; } -static int bnxt_hwrm_check_tx_rings(struct bnxt *bp, int tx_rings) +static bool bnxt_need_reserve_rings(struct bnxt *bp) { - struct hwrm_func_cfg_input req = {0}; + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + int rx = bp->rx_nr_rings; + int vnic = 1; + + if (bp->hwrm_spec_code < 0x10601) + return false; + + if (hw_resc->resv_tx_rings != bp->tx_nr_rings) + return true; + + if (bp->flags & BNXT_FLAG_RFS) + vnic = rx + 1; + if (bp->flags & BNXT_FLAG_AGG_RINGS) + rx <<= 1; + if ((bp->flags & BNXT_FLAG_NEW_RM) && + (hw_resc->resv_rx_rings != rx || + hw_resc->resv_cp_rings != bp->cp_nr_rings || + hw_resc->resv_vnics != vnic)) + return true; + return false; +} + +static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, + int ring_grps, int cp_rings) +{ + struct hwrm_func_vf_cfg_input req = {0}; + u32 flags, enables; int rc; - if (bp->hwrm_spec_code < 0x10801) + if (!(bp->flags & BNXT_FLAG_NEW_RM)) return 0; - if (BNXT_VF(bp)) - return 0; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); + flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | + FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | + FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | + FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST | + FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | + FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; + enables = FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS | + FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | + FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS | + FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | + FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS | + FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS; + + req.flags = cpu_to_le32(flags); + req.enables = cpu_to_le32(enables); + req.num_tx_rings = cpu_to_le16(tx_rings); + req.num_rx_rings = cpu_to_le16(rx_rings); + req.num_cmpl_rings = cpu_to_le16(cp_rings); + req.num_hw_ring_grps = cpu_to_le16(ring_grps); + req.num_stat_ctxs = cpu_to_le16(cp_rings); + req.num_vnics = cpu_to_le16(1); + if (bp->flags & BNXT_FLAG_RFS) + req.num_vnics = cpu_to_le16(rx_rings + 1); + rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + return -ENOMEM; + return 0; +} + +static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, + int ring_grps, int cp_rings) +{ + struct hwrm_func_cfg_input req = {0}; + u32 flags, enables; + int rc; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); req.fid = cpu_to_le16(0xffff); - req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS); + flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; + enables = FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS; req.num_tx_rings = cpu_to_le16(tx_rings); + if (bp->flags & BNXT_FLAG_NEW_RM) { + flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | + FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | + FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST | + FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | + FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; + enables |= FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | + FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | + FUNC_CFG_REQ_ENABLES_NUM_VNICS; + req.num_rx_rings = cpu_to_le16(rx_rings); + req.num_cmpl_rings = cpu_to_le16(cp_rings); + req.num_hw_ring_grps = cpu_to_le16(ring_grps); + req.num_stat_ctxs = cpu_to_le16(cp_rings); + req.num_vnics = cpu_to_le16(1); + if (bp->flags & BNXT_FLAG_RFS) + req.num_vnics = cpu_to_le16(rx_rings + 1); + } + req.flags = cpu_to_le32(flags); + req.enables = cpu_to_le32(enables); rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) return -ENOMEM; return 0; } +static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, + int ring_grps, int cp_rings) +{ + if (bp->hwrm_spec_code < 0x10801) + return 0; + + if (BNXT_PF(bp)) + return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings, + ring_grps, cp_rings); + + return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps, + cp_rings); +} + static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) { @@ -4579,6 +4877,36 @@ static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, req->flags = cpu_to_le16(flags); } +int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) +{ + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_coal coal; + unsigned int grp_idx; + + /* Tick values in micro seconds. + * 1 coal_buf x bufs_per_record = 1 completion record. + */ + memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal)); + + coal.coal_ticks = cpr->rx_ring_coal.coal_ticks; + coal.coal_bufs = cpr->rx_ring_coal.coal_bufs; + + if (!bnapi->rx_ring) + return -ENODEV; + + bnxt_hwrm_cmd_hdr_init(bp, &req_rx, + HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); + + bnxt_hwrm_set_coal_params(&coal, &req_rx); + + grp_idx = bnapi->index; + req_rx.ring_id = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); + + return hwrm_send_message(bp, &req_rx, sizeof(req_rx), + HWRM_CMD_TIMEOUT); +} + int bnxt_hwrm_set_coal(struct bnxt *bp) { int i, rc = 0; @@ -4732,11 +5060,60 @@ func_qcfg_exit: return rc; } -static int bnxt_hwrm_func_qcaps(struct bnxt *bp) +static int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp) +{ + struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_resource_qcaps_input req = {0}; + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1); + req.fid = cpu_to_le16(0xffff); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) { + rc = -EIO; + goto hwrm_func_resc_qcaps_exit; + } + + hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx); + hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); + hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings); + hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); + hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings); + hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); + hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings); + hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); + hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps); + hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps); + hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs); + hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); + hw_resc->min_vnics = le16_to_cpu(resp->min_vnics); + hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); + hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx); + hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); + + if (BNXT_PF(bp)) { + struct bnxt_pf_info *pf = &bp->pf; + + pf->vf_resv_strategy = + le16_to_cpu(resp->vf_reservation_strategy); + if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL) + pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL; + } +hwrm_func_resc_qcaps_exit: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) { int rc = 0; struct hwrm_func_qcaps_input req = {0}; struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + u32 flags; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); req.fid = cpu_to_le16(0xffff); @@ -4746,16 +5123,27 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) if (rc) goto hwrm_func_qcaps_exit; - if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)) + flags = le32_to_cpu(resp->flags); + if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED) bp->flags |= BNXT_FLAG_ROCEV1_CAP; - if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)) + if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED) bp->flags |= BNXT_FLAG_ROCEV2_CAP; bp->tx_push_thresh = 0; - if (resp->flags & - cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)) + if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; + hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); + hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); + hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); + hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); + hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); + if (!hw_resc->max_hw_ring_grps) + hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings; + hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); + hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); + hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); + if (BNXT_PF(bp)) { struct bnxt_pf_info *pf = &bp->pf; @@ -4763,16 +5151,6 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) pf->port_id = le16_to_cpu(resp->port_id); bp->dev->dev_port = pf->port_id; memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); - pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); - pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); - pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); - pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings); - pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); - if (!pf->max_hw_ring_grps) - pf->max_hw_ring_grps = pf->max_tx_rings; - pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); - pf->max_vnics = le16_to_cpu(resp->max_vnics); - pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); pf->first_vf_id = le16_to_cpu(resp->first_vf_id); pf->max_vfs = le16_to_cpu(resp->max_vfs); pf->max_encap_records = le32_to_cpu(resp->max_encap_records); @@ -4781,26 +5159,13 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); - if (resp->flags & - cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)) + if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED) bp->flags |= BNXT_FLAG_WOL_CAP; } else { #ifdef CONFIG_BNXT_SRIOV struct bnxt_vf_info *vf = &bp->vf; vf->fw_fid = le16_to_cpu(resp->fid); - - vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); - vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); - vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); - vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings); - vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); - if (!vf->max_hw_ring_grps) - vf->max_hw_ring_grps = vf->max_tx_rings; - vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); - vf->max_vnics = le16_to_cpu(resp->max_vnics); - vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); - memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); #endif } @@ -4810,6 +5175,21 @@ hwrm_func_qcaps_exit: return rc; } +static int bnxt_hwrm_func_qcaps(struct bnxt *bp) +{ + int rc; + + rc = __bnxt_hwrm_func_qcaps(bp); + if (rc) + return rc; + if (bp->hwrm_spec_code >= 0x10803) { + rc = bnxt_hwrm_func_resc_qcaps(bp); + if (!rc) + bp->flags |= BNXT_FLAG_NEW_RM; + } + return 0; +} + static int bnxt_hwrm_func_reset(struct bnxt *bp) { struct hwrm_func_reset_input req = {0}; @@ -4879,23 +5259,24 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); - bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 | - resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd; - if (resp->hwrm_intf_maj < 1) { + bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 | + resp->hwrm_intf_min_8b << 8 | + resp->hwrm_intf_upd_8b; + if (resp->hwrm_intf_maj_8b < 1) { netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", - resp->hwrm_intf_maj, resp->hwrm_intf_min, - resp->hwrm_intf_upd); + resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, + resp->hwrm_intf_upd_8b); netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); } snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d", - resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld, - resp->hwrm_fw_rsvd); + resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b, + resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b); bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); if (!bp->hwrm_cmd_timeout) bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; - if (resp->hwrm_intf_maj >= 1) + if (resp->hwrm_intf_maj_8b >= 1) bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); bp->chip_num = le16_to_cpu(resp->chip_num); @@ -5031,6 +5412,28 @@ static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) return rc; } +static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) +{ + struct hwrm_func_cfg_input req = {0}; + int rc; + + if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.fid = cpu_to_le16(0xffff); + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); + req.cache_linesize = FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_64; + if (size == 128) + req.cache_linesize = + FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128; + + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + rc = -EIO; + return rc; +} + static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) { struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; @@ -5166,15 +5569,6 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) rc); goto err_out; } - if (bp->tx_reserved_rings != bp->tx_nr_rings) { - int tx = bp->tx_nr_rings; - - if (bnxt_hwrm_reserve_tx_rings(bp, &tx) || - tx < bp->tx_nr_rings) { - rc = -ENOMEM; - goto err_out; - } - } } rc = bnxt_hwrm_ring_alloc(bp); @@ -5394,79 +5788,45 @@ static int bnxt_setup_int_mode(struct bnxt *bp) #ifdef CONFIG_RFS_ACCEL static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) { -#if defined(CONFIG_BNXT_SRIOV) - if (BNXT_VF(bp)) - return bp->vf.max_rsscos_ctxs; -#endif - return bp->pf.max_rsscos_ctxs; + return bp->hw_resc.max_rsscos_ctxs; } static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) { -#if defined(CONFIG_BNXT_SRIOV) - if (BNXT_VF(bp)) - return bp->vf.max_vnics; -#endif - return bp->pf.max_vnics; + return bp->hw_resc.max_vnics; } #endif unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) { -#if defined(CONFIG_BNXT_SRIOV) - if (BNXT_VF(bp)) - return bp->vf.max_stat_ctxs; -#endif - return bp->pf.max_stat_ctxs; + return bp->hw_resc.max_stat_ctxs; } void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max) { -#if defined(CONFIG_BNXT_SRIOV) - if (BNXT_VF(bp)) - bp->vf.max_stat_ctxs = max; - else -#endif - bp->pf.max_stat_ctxs = max; + bp->hw_resc.max_stat_ctxs = max; } unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) { -#if defined(CONFIG_BNXT_SRIOV) - if (BNXT_VF(bp)) - return bp->vf.max_cp_rings; -#endif - return bp->pf.max_cp_rings; + return bp->hw_resc.max_cp_rings; } void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max) { -#if defined(CONFIG_BNXT_SRIOV) - if (BNXT_VF(bp)) - bp->vf.max_cp_rings = max; - else -#endif - bp->pf.max_cp_rings = max; + bp->hw_resc.max_cp_rings = max; } static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) { -#if defined(CONFIG_BNXT_SRIOV) - if (BNXT_VF(bp)) - return min_t(unsigned int, bp->vf.max_irqs, - bp->vf.max_cp_rings); -#endif - return min_t(unsigned int, bp->pf.max_irqs, bp->pf.max_cp_rings); + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + + return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); } void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) { -#if defined(CONFIG_BNXT_SRIOV) - if (BNXT_VF(bp)) - bp->vf.max_irqs = max_irqs; - else -#endif - bp->pf.max_irqs = max_irqs; + bp->hw_resc.max_irqs = max_irqs; } static int bnxt_init_msix(struct bnxt *bp) @@ -5567,6 +5927,36 @@ static void bnxt_clear_int_mode(struct bnxt *bp) bp->flags &= ~BNXT_FLAG_USING_MSIX; } +static int bnxt_reserve_rings(struct bnxt *bp) +{ + int orig_cp = bp->hw_resc.resv_cp_rings; + int tcs = netdev_get_num_tc(bp->dev); + int rc; + + if (!bnxt_need_reserve_rings(bp)) + return 0; + + rc = __bnxt_reserve_rings(bp); + if (rc) { + netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc); + return rc; + } + if ((bp->flags & BNXT_FLAG_NEW_RM) && bp->cp_nr_rings > orig_cp) { + bnxt_clear_int_mode(bp); + rc = bnxt_init_int_mode(bp); + if (rc) + return rc; + } + if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) { + netdev_err(bp->dev, "tx ring reservation failure\n"); + netdev_reset_tc(bp->dev); + bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + return -ENOMEM; + } + bp->num_stat_ctxs = bp->cp_nr_rings; + return 0; +} + static void bnxt_free_irq(struct bnxt *bp) { struct bnxt_irq *irq; @@ -5692,8 +6082,14 @@ static void bnxt_disable_napi(struct bnxt *bp) if (!bp->bnapi) return; - for (i = 0; i < bp->cp_nr_rings; i++) + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; + + if (bp->bnapi[i]->rx_ring) + cancel_work_sync(&cpr->dim.work); + napi_disable(&bp->bnapi[i]->napi); + } } static void bnxt_enable_napi(struct bnxt *bp) @@ -5701,7 +6097,13 @@ static void bnxt_enable_napi(struct bnxt *bp) int i; for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; bp->bnapi[i]->in_reset = false; + + if (bp->bnapi[i]->rx_ring) { + INIT_WORK(&cpr->dim.work, bnxt_dim_work); + cpr->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; + } napi_enable(&bp->bnapi[i]->napi); } } @@ -6311,6 +6713,10 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) bnxt_preset_reg_win(bp); netif_carrier_off(bp->dev); if (irq_re_init) { + rc = bnxt_reserve_rings(bp); + if (rc) + return rc; + rc = bnxt_setup_int_mode(bp); if (rc) { netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", @@ -6446,23 +6852,13 @@ static bool bnxt_drv_busy(struct bnxt *bp) test_bit(BNXT_STATE_READ_STATS, &bp->state)); } -int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) +static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, + bool link_re_init) { - int rc = 0; - -#ifdef CONFIG_BNXT_SRIOV - if (bp->sriov_cfg) { - rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, - !bp->sriov_cfg, - BNXT_SRIOV_CFG_WAIT_TMO); - if (rc) - netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n"); - } - /* Close the VF-reps before closing PF */ if (BNXT_PF(bp)) bnxt_vf_reps_close(bp); -#endif + /* Change device state to avoid TX queue wake up's */ bnxt_tx_disable(bp); @@ -6485,6 +6881,22 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) bnxt_del_napi(bp); } bnxt_free_mem(bp, irq_re_init); +} + +int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) +{ + int rc = 0; + +#ifdef CONFIG_BNXT_SRIOV + if (bp->sriov_cfg) { + rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, + !bp->sriov_cfg, + BNXT_SRIOV_CFG_WAIT_TMO); + if (rc) + netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n"); + } +#endif + __bnxt_close_nic(bp, irq_re_init, link_re_init); return rc; } @@ -6764,13 +7176,26 @@ static bool bnxt_rfs_capable(struct bnxt *bp) if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) max_rss_ctxs = max_vnics; if (vnics > max_vnics || vnics > max_rss_ctxs) { - netdev_warn(bp->dev, - "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", - min(max_rss_ctxs - 1, max_vnics - 1)); + if (bp->rx_nr_rings > 1) + netdev_warn(bp->dev, + "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", + min(max_rss_ctxs - 1, max_vnics - 1)); return false; } - return true; + if (!(bp->flags & BNXT_FLAG_NEW_RM)) + return true; + + if (vnics == bp->hw_resc.resv_vnics) + return true; + + bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, vnics); + if (vnics <= bp->hw_resc.resv_vnics) + return true; + + netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n"); + bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 1); + return false; #else return false; #endif @@ -6784,6 +7209,15 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev, if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) features &= ~NETIF_F_NTUPLE; + if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) + features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); + + if (!(features & NETIF_F_GRO)) + features &= ~NETIF_F_GRO_HW; + + if (features & NETIF_F_GRO_HW) + features &= ~NETIF_F_LRO; + /* Both CTAG and STAG VLAN accelaration on the RX side have to be * turned on or off together. */ @@ -6817,9 +7251,9 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) bool update_tpa = false; flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; - if ((features & NETIF_F_GRO) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) + if (features & NETIF_F_GRO_HW) flags |= BNXT_FLAG_GRO; - if (features & NETIF_F_LRO) + else if (features & NETIF_F_LRO) flags |= BNXT_FLAG_LRO; if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) @@ -7096,7 +7530,8 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, { int max_rx, max_tx, tx_sets = 1; int tx_rings_needed; - int rc; + int rx_rings = rx; + int cp, rc; if (tcs) tx_sets = tcs; @@ -7112,7 +7547,10 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, if (max_tx < tx_rings_needed) return -ENOMEM; - return bnxt_hwrm_check_tx_rings(bp, tx_rings_needed); + if (bp->flags & BNXT_FLAG_AGG_RINGS) + rx_rings <<= 1; + cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx; + return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp); } static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) @@ -7346,7 +7784,8 @@ static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, { struct bnxt *bp = cb_priv; - if (!bnxt_tc_flower_enabled(bp) || !tc_can_offload(bp->dev)) + if (!bnxt_tc_flower_enabled(bp) || + !tc_cls_can_offload_and_chain0(bp->dev, type_data)) return -EOPNOTSUPP; switch (type) { @@ -7717,12 +8156,8 @@ int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr) switch (attr->id) { case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: - /* In SRIOV each PF-pool (PF + child VFs) serves as a - * switching domain, the PF's perm mac-addr can be used - * as the unique parent-id - */ - attr->u.ppid.id_len = ETH_ALEN; - ether_addr_copy(attr->u.ppid.id, bp->pf.mac_addr); + attr->u.ppid.id_len = sizeof(bp->switch_id); + memcpy(attr->u.ppid.id, bp->switch_id, attr->u.ppid.id_len); break; default: return -EOPNOTSUPP; @@ -7800,8 +8235,6 @@ static void bnxt_remove_one(struct pci_dev *pdev) bnxt_dcb_free(bp); kfree(bp->edev); bp->edev = NULL; - if (bp->xdp_prog) - bpf_prog_put(bp->xdp_prog); bnxt_cleanup_pci(bp); free_netdev(dev); } @@ -7869,24 +8302,14 @@ static int bnxt_get_max_irq(struct pci_dev *pdev) static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, int *max_cp) { + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; int max_ring_grps = 0; -#ifdef CONFIG_BNXT_SRIOV - if (!BNXT_PF(bp)) { - *max_tx = bp->vf.max_tx_rings; - *max_rx = bp->vf.max_rx_rings; - *max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings); - *max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs); - max_ring_grps = bp->vf.max_hw_ring_grps; - } else -#endif - { - *max_tx = bp->pf.max_tx_rings; - *max_rx = bp->pf.max_rx_rings; - *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings); - *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs); - max_ring_grps = bp->pf.max_hw_ring_grps; - } + *max_tx = hw_resc->max_tx_rings; + *max_rx = hw_resc->max_rx_rings; + *max_cp = min_t(int, hw_resc->max_irqs, hw_resc->max_cp_rings); + *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs); + max_ring_grps = hw_resc->max_hw_ring_grps; if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { *max_cp -= 1; *max_rx -= 2; @@ -7922,8 +8345,8 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, if (rc) return rc; bp->flags |= BNXT_FLAG_NO_AGG_RINGS; - bp->dev->hw_features &= ~NETIF_F_LRO; - bp->dev->features &= ~NETIF_F_LRO; + bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); + bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); bnxt_set_ring_params(bp); } @@ -7951,6 +8374,17 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, return rc; } +/* In initial default shared ring setting, each shared ring must have a + * RX/TX ring pair. + */ +static void bnxt_trim_dflt_sh_rings(struct bnxt *bp) +{ + bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings); + bp->rx_nr_rings = bp->cp_nr_rings; + bp->tx_nr_rings_per_tc = bp->cp_nr_rings; + bp->tx_nr_rings = bp->tx_nr_rings_per_tc; +} + static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) { int dflt_rings, max_rx_rings, max_tx_rings, rc; @@ -7966,14 +8400,26 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) return rc; bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); + if (sh) + bnxt_trim_dflt_sh_rings(bp); + else + bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings; + bp->tx_nr_rings = bp->tx_nr_rings_per_tc; - rc = bnxt_hwrm_reserve_tx_rings(bp, &bp->tx_nr_rings_per_tc); + rc = __bnxt_reserve_rings(bp); if (rc) netdev_warn(bp->dev, "Unable to reserve tx rings\n"); + bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + if (sh) + bnxt_trim_dflt_sh_rings(bp); - bp->tx_nr_rings = bp->tx_nr_rings_per_tc; - bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : - bp->tx_nr_rings + bp->rx_nr_rings; + /* Rings may have been trimmed, re-reserve the trimmed rings. */ + if (bnxt_need_reserve_rings(bp)) { + rc = __bnxt_reserve_rings(bp); + if (rc) + netdev_warn(bp->dev, "2nd rings reservation failed.\n"); + bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + } bp->num_stat_ctxs = bp->cp_nr_rings; if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { bp->rx_nr_rings++; @@ -7982,11 +8428,23 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) return rc; } -void bnxt_restore_pf_fw_resources(struct bnxt *bp) +int bnxt_restore_pf_fw_resources(struct bnxt *bp) { + int rc; + ASSERT_RTNL(); + if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) + return 0; + bnxt_hwrm_func_qcaps(bp); - bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP); + __bnxt_close_nic(bp, true, false); + bnxt_clear_int_mode(bp); + rc = bnxt_init_int_mode(bp); + if (rc) + dev_close(bp->dev); + else + rc = bnxt_open_nic(bp, true, false); + return rc; } static int bnxt_init_mac_addr(struct bnxt *bp) @@ -8000,7 +8458,7 @@ static int bnxt_init_mac_addr(struct bnxt *bp) struct bnxt_vf_info *vf = &bp->vf; if (is_valid_ether_addr(vf->mac_addr)) { - /* overwrite netdev dev_adr with admin VF MAC */ + /* overwrite netdev dev_addr with admin VF MAC */ memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); } else { eth_hw_addr_random(bp->dev); @@ -8106,7 +8564,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; + if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) + dev->hw_features |= NETIF_F_GRO_HW; dev->features |= dev->hw_features | NETIF_F_HIGHDMA; + if (dev->features & NETIF_F_GRO_HW) + dev->features &= ~NETIF_F_LRO; dev->priv_flags |= IFF_UNICAST_FLT; #ifdef CONFIG_BNXT_SRIOV @@ -8208,6 +8670,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) else device_set_wakeup_capable(&pdev->dev, false); + bnxt_hwrm_set_cache_line_size(bp, cache_line_size()); + if (BNXT_PF(bp)) { if (!bnxt_pf_wq) { bnxt_pf_wq = diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 5359a1f0045f..1989c470172c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1,7 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation - * Copyright (c) 2016-2017 Broadcom Limited + * Copyright (c) 2016-2018 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -12,10 +12,10 @@ #define BNXT_H #define DRV_MODULE_NAME "bnxt_en" -#define DRV_MODULE_VERSION "1.8.0" +#define DRV_MODULE_VERSION "1.9.0" #define DRV_VER_MAJ 1 -#define DRV_VER_MIN 8 +#define DRV_VER_MIN 9 #define DRV_VER_UPD 0 #include <linux/interrupt.h> @@ -23,6 +23,8 @@ #include <net/devlink.h> #include <net/dst_metadata.h> #include <net/switchdev.h> +#include <net/xdp.h> +#include <linux/net_dim.h> struct tx_bd { __le32 tx_bd_len_flags_type; @@ -607,6 +609,17 @@ struct bnxt_tx_ring_info { struct bnxt_ring_struct tx_ring_struct; }; +struct bnxt_coal { + u16 coal_ticks; + u16 coal_ticks_irq; + u16 coal_bufs; + u16 coal_bufs_irq; + /* RING_IDLE enabled when coal ticks < idle_thresh */ + u16 idle_thresh; + u8 bufs_per_record; + u8 budget; +}; + struct bnxt_tpa_info { void *data; u8 *data_ptr; @@ -664,12 +677,20 @@ struct bnxt_rx_ring_info { struct bnxt_ring_struct rx_ring_struct; struct bnxt_ring_struct rx_agg_ring_struct; + struct xdp_rxq_info xdp_rxq; }; struct bnxt_cp_ring_info { u32 cp_raw_cons; void __iomem *cp_doorbell; + struct bnxt_coal rx_ring_coal; + u64 rx_packets; + u64 rx_bytes; + u64 event_ctr; + + struct net_dim dim; + struct tx_cmp *cp_desc_ring[MAX_CP_PAGES]; dma_addr_t cp_desc_mapping[MAX_CP_PAGES]; @@ -755,19 +776,38 @@ struct bnxt_vnic_info { #define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10 }; -#if defined(CONFIG_BNXT_SRIOV) -struct bnxt_vf_info { - u16 fw_fid; - u8 mac_addr[ETH_ALEN]; +struct bnxt_hw_resc { + u16 min_rsscos_ctxs; u16 max_rsscos_ctxs; + u16 min_cp_rings; u16 max_cp_rings; + u16 resv_cp_rings; + u16 min_tx_rings; u16 max_tx_rings; + u16 resv_tx_rings; + u16 min_rx_rings; u16 max_rx_rings; + u16 resv_rx_rings; + u16 min_hw_ring_grps; u16 max_hw_ring_grps; + u16 resv_hw_ring_grps; + u16 min_l2_ctxs; u16 max_l2_ctxs; - u16 max_irqs; + u16 min_vnics; u16 max_vnics; + u16 resv_vnics; + u16 min_stat_ctxs; u16 max_stat_ctxs; + u16 max_irqs; +}; + +#if defined(CONFIG_BNXT_SRIOV) +struct bnxt_vf_info { + u16 fw_fid; + u8 mac_addr[ETH_ALEN]; /* PF assigned MAC Address */ + u8 vf_mac_addr[ETH_ALEN]; /* VF assigned MAC address, only + * stored by PF. + */ u16 vlan; u32 flags; #define BNXT_VF_QOS 0x1 @@ -788,15 +828,6 @@ struct bnxt_pf_info { u16 fw_fid; u16 port_id; u8 mac_addr[ETH_ALEN]; - u16 max_rsscos_ctxs; - u16 max_cp_rings; - u16 max_tx_rings; /* HW assigned max tx rings for this PF */ - u16 max_rx_rings; /* HW assigned max rx rings for this PF */ - u16 max_hw_ring_grps; - u16 max_irqs; - u16 max_l2_ctxs; - u16 max_vnics; - u16 max_stat_ctxs; u32 first_vf_id; u16 active_vfs; u16 max_vfs; @@ -808,6 +839,9 @@ struct bnxt_pf_info { u32 max_rx_wm_flows; unsigned long *vf_event_bmap; u16 hwrm_cmd_req_pages; + u8 vf_resv_strategy; +#define BNXT_VF_RESV_STRATEGY_MAXIMAL 0 +#define BNXT_VF_RESV_STRATEGY_MINIMAL 1 void *hwrm_cmd_req_addr[4]; dma_addr_t hwrm_cmd_req_dma_addr[4]; struct bnxt_vf_info *vf; @@ -944,17 +978,6 @@ struct bnxt_test_info { #define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014 #define BNXT_CAG_REG_BASE 0x300000 -struct bnxt_coal { - u16 coal_ticks; - u16 coal_ticks_irq; - u16 coal_bufs; - u16 coal_bufs_irq; - /* RING_IDLE enabled when coal ticks < idle_thresh */ - u16 idle_thresh; - u8 bufs_per_record; - u8 budget; -}; - struct bnxt_tc_flow_stats { u64 packets; u64 bytes; @@ -1126,6 +1149,8 @@ struct bnxt { #define BNXT_FLAG_DOUBLE_DB 0x400000 #define BNXT_FLAG_FW_DCBX_AGENT 0x800000 #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000 + #define BNXT_FLAG_DIM 0x2000000 + #define BNXT_FLAG_NEW_RM 0x8000000 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ BNXT_FLAG_RFS | \ @@ -1185,7 +1210,6 @@ struct bnxt { int tx_nr_rings; int tx_nr_rings_per_tc; int tx_nr_rings_xdp; - int tx_reserved_rings; int tx_wake_thresh; int tx_push_thresh; @@ -1297,6 +1321,7 @@ struct bnxt { #define BNXT_LINK_SPEED_CHNG_SP_EVENT 14 #define BNXT_FLOW_STATS_SP_EVENT 15 + struct bnxt_hw_resc hw_resc; struct bnxt_pf_info pf; #ifdef CONFIG_BNXT_SRIOV int nr_vfs; @@ -1346,6 +1371,7 @@ struct bnxt { enum devlink_eswitch_mode eswitch_mode; struct bnxt_vf_rep **vf_reps; /* array of vf-rep ptrs */ u16 *cfa_code_map; /* cfa_code -> vf_idx map */ + u8 switch_id[8]; struct bnxt_tc_info *tc_info; }; @@ -1421,6 +1447,9 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, int tx_xdp); int bnxt_setup_mq_tc(struct net_device *dev, u8 tc); int bnxt_get_max_rings(struct bnxt *, int *, int *, bool); -void bnxt_restore_pf_fw_resources(struct bnxt *bp); +int bnxt_restore_pf_fw_resources(struct bnxt *bp); int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr); +void bnxt_dim_work(struct work_struct *work); +int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi); + #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index fed37cd9ae1d..3c746f2d9ed8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -278,12 +278,11 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app, n = IEEE_8021QAZ_MAX_TCS; data_len = sizeof(*data) + sizeof(*fw_app) * n; - data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping, - GFP_KERNEL); + data = dma_zalloc_coherent(&bp->pdev->dev, data_len, &mapping, + GFP_KERNEL); if (!data) return -ENOMEM; - memset(data, 0, data_len); bnxt_hwrm_cmd_hdr_init(bp, &get, HWRM_FW_GET_STRUCTURED_DATA, -1, -1); get.dest_data_addr = cpu_to_le64(mapping); get.structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c new file mode 100644 index 000000000000..408dd190331e --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c @@ -0,0 +1,32 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017-2018 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include <linux/net_dim.h> +#include "bnxt_hsi.h" +#include "bnxt.h" + +void bnxt_dim_work(struct work_struct *work) +{ + struct net_dim *dim = container_of(work, struct net_dim, + work); + struct bnxt_cp_ring_info *cpr = container_of(dim, + struct bnxt_cp_ring_info, + dim); + struct bnxt_napi *bnapi = container_of(cpr, + struct bnxt_napi, + cp_ring); + struct net_dim_cq_moder cur_profile = net_dim_get_profile(dim->mode, + dim->profile_ix); + + cpr->rx_ring_coal.coal_ticks = cur_profile.usec; + cpr->rx_ring_coal.coal_bufs = cur_profile.pkts; + + bnxt_hwrm_set_ring_coal(bnapi->bp, bnapi); + dim->state = NET_DIM_START_MEASURE; +} diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index b13ce5ebde8d..1801582076be 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -49,6 +49,8 @@ static int bnxt_get_coalesce(struct net_device *dev, memset(coal, 0, sizeof(*coal)); + coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM; + hw_coal = &bp->rx_coal; mult = hw_coal->bufs_per_record; coal->rx_coalesce_usecs = hw_coal->coal_ticks; @@ -77,6 +79,15 @@ static int bnxt_set_coalesce(struct net_device *dev, int rc = 0; u16 mult; + if (coal->use_adaptive_rx_coalesce) { + bp->flags |= BNXT_FLAG_DIM; + } else { + if (bp->flags & BNXT_FLAG_DIM) { + bp->flags &= ~(BNXT_FLAG_DIM); + goto reset_coalesce; + } + } + hw_coal = &bp->rx_coal; mult = hw_coal->bufs_per_record; hw_coal->coal_ticks = coal->rx_coalesce_usecs; @@ -104,6 +115,7 @@ static int bnxt_set_coalesce(struct net_device *dev, update_stats = true; } +reset_coalesce: if (netif_running(dev)) { if (update_stats) { rc = bnxt_close_nic(bp, true, false); @@ -1376,6 +1388,9 @@ static int bnxt_firmware_reset(struct net_device *dev, req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; break; + case BNXT_FW_RESET_AP: + req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP; + break; default: return -EINVAL; } @@ -2522,6 +2537,14 @@ static int bnxt_reset(struct net_device *dev, u32 *flags) rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP); if (!rc) netdev_info(dev, "Reset request successful. Reload driver to complete reset\n"); + } else if (*flags == ETH_RESET_AP) { + /* This feature is not supported in older firmware versions */ + if (bp->hwrm_spec_code < 0x10803) + return -EOPNOTSUPP; + + rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_AP); + if (!rc) + netdev_info(dev, "Reset Application Processor request successful.\n"); } else { rc = -EINVAL; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h index ff601b42fcc8..836ef682f24c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h @@ -34,6 +34,7 @@ struct bnxt_led_cfg { #define BNXT_LED_DFLT_ENABLES(x) \ cpu_to_le32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x))) +#define BNXT_FW_RESET_AP 0xfffe #define BNXT_FW_RESET_CHIP 0xffff extern const struct ethtool_ops bnxt_ethtool_ops; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index c99f4d0880e4..82d17f8cc0db 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -1,2437 +1,2700 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation - * Copyright (c) 2016-2017 Broadcom Limited + * Copyright (c) 2016-2018 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. + * + * DO NOT MODIFY!!! This file is automatically generated. */ -#ifndef BNXT_HSI_H -#define BNXT_HSI_H +#ifndef _BNXT_HSI_H_ +#define _BNXT_HSI_H_ + +/* hwrm_cmd_hdr (size:128b/16B) */ +struct hwrm_cmd_hdr { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_resp_hdr (size:64b/8B) */ +struct hwrm_resp_hdr { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; +}; + +#define CMD_DISCR_TLV_ENCAP 0x8000UL +#define CMD_DISCR_LAST CMD_DISCR_TLV_ENCAP + + +#define TLV_TYPE_HWRM_REQUEST 0x1UL +#define TLV_TYPE_HWRM_RESPONSE 0x2UL +#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL +#define TLV_TYPE_ENGINE_CKV_DEVICE_SERIAL_NUMBER 0x8001UL +#define TLV_TYPE_ENGINE_CKV_NONCE 0x8002UL +#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL +#define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL +#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT 0x8005UL +#define TLV_TYPE_ENGINE_CKV_ALGORITHMS 0x8006UL +#define TLV_TYPE_ENGINE_CKV_ECC_PUBLIC_KEY 0x8007UL +#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE 0x8008UL +#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE + + +/* tlv (size:64b/8B) */ +struct tlv { + __le16 cmd_discr; + u8 reserved_8b; + u8 flags; + #define TLV_FLAGS_MORE 0x1UL + #define TLV_FLAGS_MORE_LAST 0x0UL + #define TLV_FLAGS_MORE_NOT_LAST 0x1UL + #define TLV_FLAGS_REQUIRED 0x2UL + #define TLV_FLAGS_REQUIRED_NO (0x0UL << 1) + #define TLV_FLAGS_REQUIRED_YES (0x1UL << 1) + #define TLV_FLAGS_REQUIRED_LAST TLV_FLAGS_REQUIRED_YES + __le16 tlv_type; + __le16 length; +}; + +/* input (size:128b/16B) */ +struct input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; -/* HSI and HWRM Specification 1.8.3 */ -#define HWRM_VERSION_MAJOR 1 -#define HWRM_VERSION_MINOR 8 -#define HWRM_VERSION_UPDATE 3 +/* output (size:64b/8B) */ +struct output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; +}; -#define HWRM_VERSION_RSVD 1 /* non-zero means beta version */ +/* hwrm_short_input (size:128b/16B) */ +struct hwrm_short_input { + __le16 req_type; + __le16 signature; + #define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL + #define SHORT_REQ_SIGNATURE_LAST SHORT_REQ_SIGNATURE_SHORT_CMD + __le16 unused_0; + __le16 size; + __le64 req_addr; +}; -#define HWRM_VERSION_STR "1.8.3.1" -/* - * Following is the signature for HWRM message field that indicates not - * applicable (All F's). Need to cast it the size of the field if needed. - */ -#define HWRM_NA_SIGNATURE ((__le32)(-1)) -#define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */ -#define HWRM_MAX_RESP_LEN (280) /* hwrm_selftest_qlist */ -#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */ -#define HW_HASH_KEY_SIZE 40 -#define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */ - -/* Statistics Ejection Buffer Completion Record (16 bytes) */ +/* cmd_nums (size:64b/8B) */ +struct cmd_nums { + __le16 req_type; + #define HWRM_VER_GET 0x0UL + #define HWRM_FUNC_BUF_UNRGTR 0xeUL + #define HWRM_FUNC_VF_CFG 0xfUL + #define HWRM_RESERVED1 0x10UL + #define HWRM_FUNC_RESET 0x11UL + #define HWRM_FUNC_GETFID 0x12UL + #define HWRM_FUNC_VF_ALLOC 0x13UL + #define HWRM_FUNC_VF_FREE 0x14UL + #define HWRM_FUNC_QCAPS 0x15UL + #define HWRM_FUNC_QCFG 0x16UL + #define HWRM_FUNC_CFG 0x17UL + #define HWRM_FUNC_QSTATS 0x18UL + #define HWRM_FUNC_CLR_STATS 0x19UL + #define HWRM_FUNC_DRV_UNRGTR 0x1aUL + #define HWRM_FUNC_VF_RESC_FREE 0x1bUL + #define HWRM_FUNC_VF_VNIC_IDS_QUERY 0x1cUL + #define HWRM_FUNC_DRV_RGTR 0x1dUL + #define HWRM_FUNC_DRV_QVER 0x1eUL + #define HWRM_FUNC_BUF_RGTR 0x1fUL + #define HWRM_PORT_PHY_CFG 0x20UL + #define HWRM_PORT_MAC_CFG 0x21UL + #define HWRM_PORT_TS_QUERY 0x22UL + #define HWRM_PORT_QSTATS 0x23UL + #define HWRM_PORT_LPBK_QSTATS 0x24UL + #define HWRM_PORT_CLR_STATS 0x25UL + #define HWRM_PORT_LPBK_CLR_STATS 0x26UL + #define HWRM_PORT_PHY_QCFG 0x27UL + #define HWRM_PORT_MAC_QCFG 0x28UL + #define HWRM_PORT_MAC_PTP_QCFG 0x29UL + #define HWRM_PORT_PHY_QCAPS 0x2aUL + #define HWRM_PORT_PHY_I2C_WRITE 0x2bUL + #define HWRM_PORT_PHY_I2C_READ 0x2cUL + #define HWRM_PORT_LED_CFG 0x2dUL + #define HWRM_PORT_LED_QCFG 0x2eUL + #define HWRM_PORT_LED_QCAPS 0x2fUL + #define HWRM_QUEUE_QPORTCFG 0x30UL + #define HWRM_QUEUE_QCFG 0x31UL + #define HWRM_QUEUE_CFG 0x32UL + #define HWRM_FUNC_VLAN_CFG 0x33UL + #define HWRM_FUNC_VLAN_QCFG 0x34UL + #define HWRM_QUEUE_PFCENABLE_QCFG 0x35UL + #define HWRM_QUEUE_PFCENABLE_CFG 0x36UL + #define HWRM_QUEUE_PRI2COS_QCFG 0x37UL + #define HWRM_QUEUE_PRI2COS_CFG 0x38UL + #define HWRM_QUEUE_COS2BW_QCFG 0x39UL + #define HWRM_QUEUE_COS2BW_CFG 0x3aUL + #define HWRM_QUEUE_DSCP_QCAPS 0x3bUL + #define HWRM_QUEUE_DSCP2PRI_QCFG 0x3cUL + #define HWRM_QUEUE_DSCP2PRI_CFG 0x3dUL + #define HWRM_VNIC_ALLOC 0x40UL + #define HWRM_VNIC_FREE 0x41UL + #define HWRM_VNIC_CFG 0x42UL + #define HWRM_VNIC_QCFG 0x43UL + #define HWRM_VNIC_TPA_CFG 0x44UL + #define HWRM_VNIC_TPA_QCFG 0x45UL + #define HWRM_VNIC_RSS_CFG 0x46UL + #define HWRM_VNIC_RSS_QCFG 0x47UL + #define HWRM_VNIC_PLCMODES_CFG 0x48UL + #define HWRM_VNIC_PLCMODES_QCFG 0x49UL + #define HWRM_VNIC_QCAPS 0x4aUL + #define HWRM_RING_ALLOC 0x50UL + #define HWRM_RING_FREE 0x51UL + #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL + #define HWRM_RING_RESET 0x5eUL + #define HWRM_RING_GRP_ALLOC 0x60UL + #define HWRM_RING_GRP_FREE 0x61UL + #define HWRM_RESERVED5 0x64UL + #define HWRM_RESERVED6 0x65UL + #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL + #define HWRM_VNIC_RSS_COS_LB_CTX_FREE 0x71UL + #define HWRM_CFA_L2_FILTER_ALLOC 0x90UL + #define HWRM_CFA_L2_FILTER_FREE 0x91UL + #define HWRM_CFA_L2_FILTER_CFG 0x92UL + #define HWRM_CFA_L2_SET_RX_MASK 0x93UL + #define HWRM_CFA_VLAN_ANTISPOOF_CFG 0x94UL + #define HWRM_CFA_TUNNEL_FILTER_ALLOC 0x95UL + #define HWRM_CFA_TUNNEL_FILTER_FREE 0x96UL + #define HWRM_CFA_ENCAP_RECORD_ALLOC 0x97UL + #define HWRM_CFA_ENCAP_RECORD_FREE 0x98UL + #define HWRM_CFA_NTUPLE_FILTER_ALLOC 0x99UL + #define HWRM_CFA_NTUPLE_FILTER_FREE 0x9aUL + #define HWRM_CFA_NTUPLE_FILTER_CFG 0x9bUL + #define HWRM_CFA_EM_FLOW_ALLOC 0x9cUL + #define HWRM_CFA_EM_FLOW_FREE 0x9dUL + #define HWRM_CFA_EM_FLOW_CFG 0x9eUL + #define HWRM_TUNNEL_DST_PORT_QUERY 0xa0UL + #define HWRM_TUNNEL_DST_PORT_ALLOC 0xa1UL + #define HWRM_TUNNEL_DST_PORT_FREE 0xa2UL + #define HWRM_STAT_CTX_ALLOC 0xb0UL + #define HWRM_STAT_CTX_FREE 0xb1UL + #define HWRM_STAT_CTX_QUERY 0xb2UL + #define HWRM_STAT_CTX_CLR_STATS 0xb3UL + #define HWRM_FW_RESET 0xc0UL + #define HWRM_FW_QSTATUS 0xc1UL + #define HWRM_FW_SET_TIME 0xc8UL + #define HWRM_FW_GET_TIME 0xc9UL + #define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL + #define HWRM_FW_GET_STRUCTURED_DATA 0xcbUL + #define HWRM_FW_IPC_MAILBOX 0xccUL + #define HWRM_EXEC_FWD_RESP 0xd0UL + #define HWRM_REJECT_FWD_RESP 0xd1UL + #define HWRM_FWD_RESP 0xd2UL + #define HWRM_FWD_ASYNC_EVENT_CMPL 0xd3UL + #define HWRM_TEMP_MONITOR_QUERY 0xe0UL + #define HWRM_WOL_FILTER_ALLOC 0xf0UL + #define HWRM_WOL_FILTER_FREE 0xf1UL + #define HWRM_WOL_FILTER_QCFG 0xf2UL + #define HWRM_WOL_REASON_QCFG 0xf3UL + #define HWRM_CFA_METER_PROFILE_ALLOC 0xf5UL + #define HWRM_CFA_METER_PROFILE_FREE 0xf6UL + #define HWRM_CFA_METER_PROFILE_CFG 0xf7UL + #define HWRM_CFA_METER_INSTANCE_ALLOC 0xf8UL + #define HWRM_CFA_METER_INSTANCE_FREE 0xf9UL + #define HWRM_CFA_VFR_ALLOC 0xfdUL + #define HWRM_CFA_VFR_FREE 0xfeUL + #define HWRM_CFA_VF_PAIR_ALLOC 0x100UL + #define HWRM_CFA_VF_PAIR_FREE 0x101UL + #define HWRM_CFA_VF_PAIR_INFO 0x102UL + #define HWRM_CFA_FLOW_ALLOC 0x103UL + #define HWRM_CFA_FLOW_FREE 0x104UL + #define HWRM_CFA_FLOW_FLUSH 0x105UL + #define HWRM_CFA_FLOW_STATS 0x106UL + #define HWRM_CFA_FLOW_INFO 0x107UL + #define HWRM_CFA_DECAP_FILTER_ALLOC 0x108UL + #define HWRM_CFA_DECAP_FILTER_FREE 0x109UL + #define HWRM_CFA_VLAN_ANTISPOOF_QCFG 0x10aUL + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC 0x10bUL + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE 0x10cUL + #define HWRM_CFA_PAIR_ALLOC 0x10dUL + #define HWRM_CFA_PAIR_FREE 0x10eUL + #define HWRM_CFA_PAIR_INFO 0x10fUL + #define HWRM_FW_IPC_MSG 0x110UL + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO 0x111UL + #define HWRM_ENGINE_CKV_HELLO 0x12dUL + #define HWRM_ENGINE_CKV_STATUS 0x12eUL + #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL + #define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL + #define HWRM_ENGINE_CKV_KEY_ADD 0x131UL + #define HWRM_ENGINE_CKV_KEY_DELETE 0x132UL + #define HWRM_ENGINE_CKV_FLUSH 0x133UL + #define HWRM_ENGINE_CKV_RNG_GET 0x134UL + #define HWRM_ENGINE_CKV_KEY_GEN 0x135UL + #define HWRM_ENGINE_QG_CONFIG_QUERY 0x13cUL + #define HWRM_ENGINE_QG_QUERY 0x13dUL + #define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY 0x13eUL + #define HWRM_ENGINE_QG_METER_PROFILE_QUERY 0x13fUL + #define HWRM_ENGINE_QG_METER_PROFILE_ALLOC 0x140UL + #define HWRM_ENGINE_QG_METER_PROFILE_FREE 0x141UL + #define HWRM_ENGINE_QG_METER_QUERY 0x142UL + #define HWRM_ENGINE_QG_METER_BIND 0x143UL + #define HWRM_ENGINE_QG_METER_UNBIND 0x144UL + #define HWRM_ENGINE_QG_FUNC_BIND 0x145UL + #define HWRM_ENGINE_SG_CONFIG_QUERY 0x146UL + #define HWRM_ENGINE_SG_QUERY 0x147UL + #define HWRM_ENGINE_SG_METER_QUERY 0x148UL + #define HWRM_ENGINE_SG_METER_CONFIG 0x149UL + #define HWRM_ENGINE_SG_QG_BIND 0x14aUL + #define HWRM_ENGINE_QG_SG_UNBIND 0x14bUL + #define HWRM_ENGINE_CONFIG_QUERY 0x154UL + #define HWRM_ENGINE_STATS_CONFIG 0x155UL + #define HWRM_ENGINE_STATS_CLEAR 0x156UL + #define HWRM_ENGINE_STATS_QUERY 0x157UL + #define HWRM_ENGINE_RQ_ALLOC 0x15eUL + #define HWRM_ENGINE_RQ_FREE 0x15fUL + #define HWRM_ENGINE_CQ_ALLOC 0x160UL + #define HWRM_ENGINE_CQ_FREE 0x161UL + #define HWRM_ENGINE_NQ_ALLOC 0x162UL + #define HWRM_ENGINE_NQ_FREE 0x163UL + #define HWRM_ENGINE_ON_DIE_RQE_CREDITS 0x164UL + #define HWRM_FUNC_RESOURCE_QCAPS 0x190UL + #define HWRM_FUNC_VF_RESOURCE_CFG 0x191UL + #define HWRM_SELFTEST_QLIST 0x200UL + #define HWRM_SELFTEST_EXEC 0x201UL + #define HWRM_SELFTEST_IRQ 0x202UL + #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA 0x203UL + #define HWRM_DBG_READ_DIRECT 0xff10UL + #define HWRM_DBG_READ_INDIRECT 0xff11UL + #define HWRM_DBG_WRITE_DIRECT 0xff12UL + #define HWRM_DBG_WRITE_INDIRECT 0xff13UL + #define HWRM_DBG_DUMP 0xff14UL + #define HWRM_DBG_ERASE_NVM 0xff15UL + #define HWRM_DBG_CFG 0xff16UL + #define HWRM_DBG_COREDUMP_LIST 0xff17UL + #define HWRM_DBG_COREDUMP_INITIATE 0xff18UL + #define HWRM_DBG_COREDUMP_RETRIEVE 0xff19UL + #define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL + #define HWRM_NVM_VALIDATE_OPTION 0xffefUL + #define HWRM_NVM_FLUSH 0xfff0UL + #define HWRM_NVM_GET_VARIABLE 0xfff1UL + #define HWRM_NVM_SET_VARIABLE 0xfff2UL + #define HWRM_NVM_INSTALL_UPDATE 0xfff3UL + #define HWRM_NVM_MODIFY 0xfff4UL + #define HWRM_NVM_VERIFY_UPDATE 0xfff5UL + #define HWRM_NVM_GET_DEV_INFO 0xfff6UL + #define HWRM_NVM_ERASE_DIR_ENTRY 0xfff7UL + #define HWRM_NVM_MOD_DIR_ENTRY 0xfff8UL + #define HWRM_NVM_FIND_DIR_ENTRY 0xfff9UL + #define HWRM_NVM_GET_DIR_ENTRIES 0xfffaUL + #define HWRM_NVM_GET_DIR_INFO 0xfffbUL + #define HWRM_NVM_RAW_DUMP 0xfffcUL + #define HWRM_NVM_READ 0xfffdUL + #define HWRM_NVM_WRITE 0xfffeUL + #define HWRM_NVM_RAW_WRITE_BLK 0xffffUL + #define HWRM_LAST HWRM_NVM_RAW_WRITE_BLK + __le16 unused_0[3]; +}; + +/* ret_codes (size:64b/8B) */ +struct ret_codes { + __le16 error_code; + #define HWRM_ERR_CODE_SUCCESS 0x0UL + #define HWRM_ERR_CODE_FAIL 0x1UL + #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL + #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL + #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL + #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL + #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL + #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL + #define HWRM_ERR_CODE_NO_BUFFER 0x8UL + #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL + #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL + #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL + #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED + __le16 unused_0[3]; +}; + +/* hwrm_err_output (size:128b/16B) */ +struct hwrm_err_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 opaque_0; + __le16 opaque_1; + u8 cmd_err; + u8 valid; +}; +#define HWRM_NA_SIGNATURE ((__le32)(-1)) +#define HWRM_MAX_REQ_LEN 128 +#define HWRM_MAX_RESP_LEN 280 +#define HW_HASH_INDEX_SIZE 0x80 +#define HW_HASH_KEY_SIZE 40 +#define HWRM_RESP_VALID_KEY 1 +#define HWRM_VERSION_MAJOR 1 +#define HWRM_VERSION_MINOR 9 +#define HWRM_VERSION_UPDATE 0 +#define HWRM_VERSION_RSVD 0 +#define HWRM_VERSION_STR "1.9.0.0" + +/* hwrm_ver_get_input (size:192b/24B) */ +struct hwrm_ver_get_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 hwrm_intf_maj; + u8 hwrm_intf_min; + u8 hwrm_intf_upd; + u8 unused_0[5]; +}; + +/* hwrm_ver_get_output (size:1408b/176B) */ +struct hwrm_ver_get_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 hwrm_intf_maj_8b; + u8 hwrm_intf_min_8b; + u8 hwrm_intf_upd_8b; + u8 hwrm_intf_rsvd_8b; + u8 hwrm_fw_maj_8b; + u8 hwrm_fw_min_8b; + u8 hwrm_fw_bld_8b; + u8 hwrm_fw_rsvd_8b; + u8 mgmt_fw_maj_8b; + u8 mgmt_fw_min_8b; + u8 mgmt_fw_bld_8b; + u8 mgmt_fw_rsvd_8b; + u8 netctrl_fw_maj_8b; + u8 netctrl_fw_min_8b; + u8 netctrl_fw_bld_8b; + u8 netctrl_fw_rsvd_8b; + __le32 dev_caps_cfg; + #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL + #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL + #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL + #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL + u8 roce_fw_maj_8b; + u8 roce_fw_min_8b; + u8 roce_fw_bld_8b; + u8 roce_fw_rsvd_8b; + char hwrm_fw_name[16]; + char mgmt_fw_name[16]; + char netctrl_fw_name[16]; + u8 reserved2[16]; + char roce_fw_name[16]; + __le16 chip_num; + u8 chip_rev; + u8 chip_metal; + u8 chip_bond_id; + u8 chip_platform_type; + #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC 0x0UL + #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA 0x1UL + #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL + #define VER_GET_RESP_CHIP_PLATFORM_TYPE_LAST VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM + __le16 max_req_win_len; + __le16 max_resp_len; + __le16 def_req_timeout; + u8 flags; + #define VER_GET_RESP_FLAGS_DEV_NOT_RDY 0x1UL + #define VER_GET_RESP_FLAGS_EXT_VER_AVAIL 0x2UL + u8 unused_0[2]; + u8 always_1; + __le16 hwrm_intf_major; + __le16 hwrm_intf_minor; + __le16 hwrm_intf_build; + __le16 hwrm_intf_patch; + __le16 hwrm_fw_major; + __le16 hwrm_fw_minor; + __le16 hwrm_fw_build; + __le16 hwrm_fw_patch; + __le16 mgmt_fw_major; + __le16 mgmt_fw_minor; + __le16 mgmt_fw_build; + __le16 mgmt_fw_patch; + __le16 netctrl_fw_major; + __le16 netctrl_fw_minor; + __le16 netctrl_fw_build; + __le16 netctrl_fw_patch; + __le16 roce_fw_major; + __le16 roce_fw_minor; + __le16 roce_fw_build; + __le16 roce_fw_patch; + __le16 max_ext_req_len; + u8 unused_1[5]; + u8 valid; +}; + +/* eject_cmpl (size:128b/16B) */ struct eject_cmpl { - __le16 type; - #define EJECT_CMPL_TYPE_MASK 0x3fUL - #define EJECT_CMPL_TYPE_SFT 0 - #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL - __le16 len; - __le32 opaque; - __le32 v; - #define EJECT_CMPL_V 0x1UL - __le32 unused_2; -}; - -/* HWRM Completion Record (16 bytes) */ + __le16 type; + #define EJECT_CMPL_TYPE_MASK 0x3fUL + #define EJECT_CMPL_TYPE_SFT 0 + #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL + #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT + __le16 len; + __le32 opaque; + __le32 v; + #define EJECT_CMPL_V 0x1UL + __le32 unused_2; +}; + +/* hwrm_cmpl (size:128b/16B) */ struct hwrm_cmpl { - __le16 type; - #define CMPL_TYPE_MASK 0x3fUL - #define CMPL_TYPE_SFT 0 - #define CMPL_TYPE_HWRM_DONE 0x20UL - __le16 sequence_id; - __le32 unused_1; - __le32 v; - #define CMPL_V 0x1UL - __le32 unused_3; -}; - -/* HWRM Forwarded Request (16 bytes) */ + __le16 type; + #define CMPL_TYPE_MASK 0x3fUL + #define CMPL_TYPE_SFT 0 + #define CMPL_TYPE_HWRM_DONE 0x20UL + #define CMPL_TYPE_LAST CMPL_TYPE_HWRM_DONE + __le16 sequence_id; + __le32 unused_1; + __le32 v; + #define CMPL_V 0x1UL + __le32 unused_3; +}; + +/* hwrm_fwd_req_cmpl (size:128b/16B) */ struct hwrm_fwd_req_cmpl { - __le16 req_len_type; - #define FWD_REQ_CMPL_TYPE_MASK 0x3fUL - #define FWD_REQ_CMPL_TYPE_SFT 0 - #define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL - #define FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL - #define FWD_REQ_CMPL_REQ_LEN_SFT 6 - __le16 source_id; - __le32 unused_0; - __le32 req_buf_addr_v[2]; - #define FWD_REQ_CMPL_V 0x1UL - #define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL - #define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1 -}; - -/* HWRM Forwarded Response (16 bytes) */ + __le16 req_len_type; + #define FWD_REQ_CMPL_TYPE_MASK 0x3fUL + #define FWD_REQ_CMPL_TYPE_SFT 0 + #define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL + #define FWD_REQ_CMPL_TYPE_LAST FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ + #define FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL + #define FWD_REQ_CMPL_REQ_LEN_SFT 6 + __le16 source_id; + __le32 unused0; + __le32 req_buf_addr_v[2]; + #define FWD_REQ_CMPL_V 0x1UL + #define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL + #define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1 +}; + +/* hwrm_fwd_resp_cmpl (size:128b/16B) */ struct hwrm_fwd_resp_cmpl { - __le16 type; - #define FWD_RESP_CMPL_TYPE_MASK 0x3fUL - #define FWD_RESP_CMPL_TYPE_SFT 0 - #define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL - __le16 source_id; - __le16 resp_len; - __le16 unused_1; - __le32 resp_buf_addr_v[2]; - #define FWD_RESP_CMPL_V 0x1UL - #define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL - #define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1 -}; - -/* HWRM Asynchronous Event Completion Record (16 bytes) */ + __le16 type; + #define FWD_RESP_CMPL_TYPE_MASK 0x3fUL + #define FWD_RESP_CMPL_TYPE_SFT 0 + #define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL + #define FWD_RESP_CMPL_TYPE_LAST FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP + __le16 source_id; + __le16 resp_len; + __le16 unused_1; + __le32 resp_buf_addr_v[2]; + #define FWD_RESP_CMPL_V 0x1UL + #define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL + #define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1 +}; + +/* hwrm_async_event_cmpl (size:128b/16B) */ struct hwrm_async_event_cmpl { - __le16 type; - #define ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL - #define ASYNC_EVENT_CMPL_TYPE_SFT 0 - #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL - __le16 event_id; - #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL - #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL - #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL - #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL - #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL + __le16 type; + #define ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_TYPE_LAST ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL + #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL - #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL - #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL - #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL - #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL - #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL - #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL - #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL - #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL - #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL - #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL - #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL - #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL - #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL - __le32 event_data2; - u8 opaque_v; - #define ASYNC_EVENT_CMPL_V 0x1UL - #define ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL - #define ASYNC_EVENT_CMPL_OPAQUE_SFT 1 - u8 timestamp_lo; - __le16 timestamp_hi; - __le32 event_data1; -}; - -/* HWRM Asynchronous Event Completion Record for link status change (16 bytes) */ + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL + #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL + #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL + #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL + #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL + #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL + #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL + #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL + #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_V 0x1UL + #define ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; +}; + +/* hwrm_async_event_cmpl_link_status_change (size:128b/16B) */ struct hwrm_async_event_cmpl_link_status_change { - __le16 type; - #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL - #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0 - #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL - __le16 event_id; + __le16 type; + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL - __le32 event_data2; - u8 opaque_v; - #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL - #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL - #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1 - u8 timestamp_lo; - __le16 timestamp_hi; - __le32 event_data1; - #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL - #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN (0x0UL << 0) - #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP (0x1UL << 0) - #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP - #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL - #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1 - #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL - #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4 -}; - -/* HWRM Asynchronous Event Completion Record for link MTU change (16 bytes) */ -struct hwrm_async_event_cmpl_link_mtu_change { - __le16 type; - #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK 0x3fUL - #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT 0 - #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL - __le16 event_id; - #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE 0x1UL - __le32 event_data2; - u8 opaque_v; - #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V 0x1UL - #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK 0xfeUL - #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT 1 - u8 timestamp_lo; - __le16 timestamp_hi; - __le32 event_data1; - #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK 0xffffUL - #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0 -}; - -/* HWRM Asynchronous Event Completion Record for link speed change (16 bytes) */ -struct hwrm_async_event_cmpl_link_speed_change { - __le16 type; - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK 0x3fUL - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT 0 - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL - __le16 event_id; - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE 0x2UL - __le32 event_data2; - u8 opaque_v; - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V 0x1UL - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK 0xfeUL - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1 - u8 timestamp_lo; - __le16 timestamp_hi; - __le32 event_data1; - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE 0x1UL - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK 0xfffeUL - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT 1 - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB (0x1UL << 1) - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB (0xaUL << 1) - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB (0x14UL << 1) - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB (0x19UL << 1) - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB (0x64UL << 1) - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB (0xc8UL << 1) - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1) - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1) - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1) - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1) - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16 -}; - -/* HWRM Asynchronous Event Completion Record for DCB Config change (16 bytes) */ -struct hwrm_async_event_cmpl_dcb_config_change { - __le16 type; - #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK 0x3fUL - #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT 0 - #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL - __le16 event_id; - #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL - __le32 event_data2; - #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_ETS 0x1UL - #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_PFC 0x2UL - #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_APP 0x4UL - u8 opaque_v; - #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V 0x1UL - #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK 0xfeUL - #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1 - u8 timestamp_lo; - __le16 timestamp_hi; - __le32 event_data1; - #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL - #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0 - #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_MASK 0xff0000UL - #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_SFT 16 - #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE (0xffUL << 16) - #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE - #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_MASK 0xff000000UL - #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_SFT 24 - #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE (0xffUL << 24) - #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE -}; - -/* HWRM Asynchronous Event Completion Record for port connection not allowed (16 bytes) */ + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN 0x0UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP 0x1UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1 + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4 +}; + +/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */ struct hwrm_async_event_cmpl_port_conn_not_allowed { - __le16 type; - #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL - #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0 - #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL - __le16 event_id; + __le16 type; + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL - __le32 event_data2; - u8 opaque_v; - #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL - #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1 - u8 timestamp_lo; - __le16 timestamp_hi; - __le32 event_data1; - #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL - #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0 - #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL - #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16 - #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16) - #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16) - #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16) - #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16) - #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN -}; - -/* HWRM Asynchronous Event Completion Record for link speed config not allowed (16 bytes) */ -struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed { - __le16 type; - #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK 0x3fUL - #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT 0 - #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL - __le16 event_id; - #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL - __le32 event_data2; - u8 opaque_v; - #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V 0x1UL - #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_MASK 0xfeUL - #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_SFT 1 - u8 timestamp_lo; - __le16 timestamp_hi; - __le32 event_data1; - #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL - #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0 -}; - -/* HWRM Asynchronous Event Completion Record for link speed configuration change (16 bytes) */ + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0 + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16 + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16) + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16) + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16) + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16) + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN +}; + +/* hwrm_async_event_cmpl_link_speed_cfg_change (size:128b/16B) */ struct hwrm_async_event_cmpl_link_speed_cfg_change { - __le16 type; - #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL - #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0 - #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL - __le16 event_id; + __le16 type; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL - __le32 event_data2; - u8 opaque_v; - #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL - #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1 - u8 timestamp_lo; - __le16 timestamp_hi; - __le32 event_data1; - #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL - #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0 - #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL - #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL -}; - -/* HWRM Asynchronous Event Completion Record for Function Driver Unload (16 bytes) */ -struct hwrm_async_event_cmpl_func_drvr_unload { - __le16 type; - #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK 0x3fUL - #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT 0 - #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL - __le16 event_id; - #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL - __le32 event_data2; - u8 opaque_v; - #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V 0x1UL - #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL - #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT 1 - u8 timestamp_lo; - __le16 timestamp_hi; - __le32 event_data1; - #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL - #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0 -}; - -/* HWRM Asynchronous Event Completion Record for Function Driver load (16 bytes) */ -struct hwrm_async_event_cmpl_func_drvr_load { - __le16 type; - #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK 0x3fUL - #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT 0 - #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL - __le16 event_id; - #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD 0x11UL - __le32 event_data2; - u8 opaque_v; - #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V 0x1UL - #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK 0xfeUL - #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT 1 - u8 timestamp_lo; - __le16 timestamp_hi; - __le32 event_data1; - #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL - #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0 -}; - -/* HWRM Asynchronous Event Completion Record to indicate completion of FLR related processing (16 bytes) */ -struct hwrm_async_event_cmpl_func_flr_proc_cmplt { - __le16 type; - #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_MASK 0x3fUL - #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_SFT 0 - #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT 0x2eUL - __le16 event_id; - #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL - __le32 event_data2; - u8 opaque_v; - #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_V 0x1UL - #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_MASK 0xfeUL - #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_SFT 1 - u8 timestamp_lo; - __le16 timestamp_hi; - __le32 event_data1; - #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_MASK 0xffffUL - #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_SFT 0 -}; - -/* HWRM Asynchronous Event Completion Record for PF Driver Unload (16 bytes) */ + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0 + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL +}; + +/* hwrm_async_event_cmpl_pf_drvr_unload (size:128b/16B) */ struct hwrm_async_event_cmpl_pf_drvr_unload { - __le16 type; - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0 - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL - __le16 event_id; + __le16 type; + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_LAST ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD 0x20UL - __le32 event_data2; - u8 opaque_v; - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1 - u8 timestamp_lo; - __le16 timestamp_hi; - __le32 event_data1; + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_LAST ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0 - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16 -}; - -/* HWRM Asynchronous Event Completion Record for PF Driver load (16 bytes) */ -struct hwrm_async_event_cmpl_pf_drvr_load { - __le16 type; - #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK 0x3fUL - #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0 - #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL - __le16 event_id; - #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD 0x21UL - __le32 event_data2; - u8 opaque_v; - #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V 0x1UL - #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK 0xfeUL - #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT 1 - u8 timestamp_lo; - __le16 timestamp_hi; - __le32 event_data1; - #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL - #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0 - #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_MASK 0x70000UL - #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_SFT 16 -}; - -/* HWRM Asynchronous Event Completion Record for VF FLR (16 bytes) */ -struct hwrm_async_event_cmpl_vf_flr { - __le16 type; - #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK 0x3fUL - #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT 0 - #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT 0x2eUL - __le16 event_id; - #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR 0x30UL - __le32 event_data2; - u8 opaque_v; - #define ASYNC_EVENT_CMPL_VF_FLR_V 0x1UL - #define ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK 0xfeUL - #define ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT 1 - u8 timestamp_lo; - __le16 timestamp_hi; - __le32 event_data1; - #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK 0xffffUL - #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0 -}; - -/* HWRM Asynchronous Event Completion Record for VF MAC Addr change (16 bytes) */ -struct hwrm_async_event_cmpl_vf_mac_addr_change { - __le16 type; - #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK 0x3fUL - #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT 0 - #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL - __le16 event_id; - #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL - __le32 event_data2; - u8 opaque_v; - #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V 0x1UL - #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK 0xfeUL - #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1 - u8 timestamp_lo; - __le16 timestamp_hi; - __le32 event_data1; - #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK 0xffffUL - #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0 -}; - -/* HWRM Asynchronous Event Completion Record for PF-VF communication status change (16 bytes) */ -struct hwrm_async_event_cmpl_pf_vf_comm_status_change { - __le16 type; - #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK 0x3fUL - #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT 0 - #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL - __le16 event_id; - #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL - __le32 event_data2; - u8 opaque_v; - #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V 0x1UL - #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_MASK 0xfeUL - #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_SFT 1 - u8 timestamp_lo; - __le16 timestamp_hi; - __le32 event_data1; - #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED 0x1UL -}; - -/* HWRM Asynchronous Event Completion Record for VF configuration change (16 bytes) */ + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16 +}; + +/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */ struct hwrm_async_event_cmpl_vf_cfg_change { - __le16 type; - #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL - #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0 - #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL - __le16 event_id; + __le16 type; + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL - __le32 event_data2; - u8 opaque_v; - #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL - #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL - #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1 - u8 timestamp_lo; - __le16 timestamp_hi; - __le32 event_data1; - #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL - #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL - #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL - #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL -}; - -/* HWRM Asynchronous Event Completion Record for HWRM Error (16 bytes) */ -struct hwrm_async_event_cmpl_hwrm_error { - __le16 type; - #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL - #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0 - #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL - __le16 event_id; - #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL - __le32 event_data2; - #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL - #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0 - #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL - #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL - #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL - #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL - u8 opaque_v; - #define ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL - #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL - #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1 - u8 timestamp_lo; - __le16 timestamp_hi; - __le32 event_data1; - #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL -}; - -/* hwrm_ver_get */ -/* Input (24 bytes) */ -struct hwrm_ver_get_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - u8 hwrm_intf_maj; - u8 hwrm_intf_min; - u8 hwrm_intf_upd; - u8 unused_0[5]; -}; - -/* Output (128 bytes) */ -struct hwrm_ver_get_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - u8 hwrm_intf_maj; - u8 hwrm_intf_min; - u8 hwrm_intf_upd; - u8 hwrm_intf_rsvd; - u8 hwrm_fw_maj; - u8 hwrm_fw_min; - u8 hwrm_fw_bld; - u8 hwrm_fw_rsvd; - u8 mgmt_fw_maj; - u8 mgmt_fw_min; - u8 mgmt_fw_bld; - u8 mgmt_fw_rsvd; - u8 netctrl_fw_maj; - u8 netctrl_fw_min; - u8 netctrl_fw_bld; - u8 netctrl_fw_rsvd; - __le32 dev_caps_cfg; - #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL - #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL - #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL - #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL - u8 roce_fw_maj; - u8 roce_fw_min; - u8 roce_fw_bld; - u8 roce_fw_rsvd; - char hwrm_fw_name[16]; - char mgmt_fw_name[16]; - char netctrl_fw_name[16]; - __le32 reserved2[4]; - char roce_fw_name[16]; - __le16 chip_num; - u8 chip_rev; - u8 chip_metal; - u8 chip_bond_id; - u8 chip_platform_type; - #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC 0x0UL - #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA 0x1UL - #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL - __le16 max_req_win_len; - __le16 max_resp_len; - __le16 def_req_timeout; - u8 init_pending; - #define VER_GET_RESP_INIT_PENDING_DEV_NOT_RDY 0x1UL - u8 unused_0; - u8 unused_1; - u8 valid; -}; - -/* hwrm_func_reset */ -/* Input (24 bytes) */ + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL +}; + +/* hwrm_func_reset_input (size:192b/24B) */ struct hwrm_func_reset_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 enables; - #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL - __le16 vf_id; - u8 func_reset_level; - #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL 0x0UL - #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME 0x1UL - #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL - #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF 0x3UL - u8 unused_0; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL + __le16 vf_id; + u8 func_reset_level; + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL 0x0UL + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME 0x1UL + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF 0x3UL + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_LAST FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF + u8 unused_0; +}; + +/* hwrm_func_reset_output (size:128b/16B) */ struct hwrm_func_reset_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_func_getfid */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_getfid_input (size:192b/24B) */ struct hwrm_func_getfid_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 enables; - #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL - __le16 pci_id; - __le16 unused_0; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL + __le16 pci_id; + u8 unused_0[2]; +}; + +/* hwrm_func_getfid_output (size:128b/16B) */ struct hwrm_func_getfid_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le16 fid; - u8 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 unused_4; - u8 valid; -}; - -/* hwrm_func_vf_alloc */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 fid; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_func_vf_alloc_input (size:192b/24B) */ struct hwrm_func_vf_alloc_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 enables; - #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL - __le16 first_vf_id; - __le16 num_vfs; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL + __le16 first_vf_id; + __le16 num_vfs; +}; + +/* hwrm_func_vf_alloc_output (size:128b/16B) */ struct hwrm_func_vf_alloc_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le16 first_vf_id; - u8 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 unused_4; - u8 valid; -}; - -/* hwrm_func_vf_free */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 first_vf_id; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_func_vf_free_input (size:192b/24B) */ struct hwrm_func_vf_free_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 enables; - #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL - __le16 first_vf_id; - __le16 num_vfs; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL + __le16 first_vf_id; + __le16 num_vfs; +}; + +/* hwrm_func_vf_free_output (size:128b/16B) */ struct hwrm_func_vf_free_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_func_vf_cfg */ -/* Input (32 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_vf_cfg_input (size:448b/56B) */ struct hwrm_func_vf_cfg_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 enables; - #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL - #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL - #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL - #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL - __le16 mtu; - __le16 guest_vlan; - __le16 async_event_cr; - u8 dflt_mac_addr[6]; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL + #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL + #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL + #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x10UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x20UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS 0x40UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS 0x80UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS 0x100UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL + __le16 mtu; + __le16 guest_vlan; + __le16 async_event_cr; + u8 dflt_mac_addr[6]; + __le32 flags; + #define FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x1UL + #define FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x2UL + #define FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x4UL + #define FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x8UL + #define FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x10UL + #define FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x20UL + #define FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x40UL + #define FUNC_VF_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x80UL + __le16 num_rsscos_ctxs; + __le16 num_cmpl_rings; + __le16 num_tx_rings; + __le16 num_rx_rings; + __le16 num_l2_ctxs; + __le16 num_vnics; + __le16 num_stat_ctxs; + __le16 num_hw_ring_grps; + u8 unused_0[4]; +}; + +/* hwrm_func_vf_cfg_output (size:128b/16B) */ struct hwrm_func_vf_cfg_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_func_qcaps */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_qcaps_input (size:192b/24B) */ struct hwrm_func_qcaps_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 fid; - __le16 unused_0[3]; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[6]; }; -/* Output (80 bytes) */ +/* hwrm_func_qcaps_output (size:640b/80B) */ struct hwrm_func_qcaps_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le16 fid; - __le16 port_id; - __le32 flags; - #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL - #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL - #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL - #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL - #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL - #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL - #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL - #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL - #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL - #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL - #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL - #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL - u8 mac_address[6]; - __le16 max_rsscos_ctx; - __le16 max_cmpl_rings; - __le16 max_tx_rings; - __le16 max_rx_rings; - __le16 max_l2_ctxs; - __le16 max_vnics; - __le16 first_vf_id; - __le16 max_vfs; - __le16 max_stat_ctx; - __le32 max_encap_records; - __le32 max_decap_records; - __le32 max_tx_em_flows; - __le32 max_tx_wm_flows; - __le32 max_rx_em_flows; - __le32 max_rx_wm_flows; - __le32 max_mcast_filters; - __le32 max_flow_id; - __le32 max_hw_ring_grps; - __le16 max_sp_tx_rings; - u8 unused_0; - u8 valid; -}; - -/* hwrm_func_qcfg */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 fid; + __le16 port_id; + __le32 flags; + #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL + #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL + #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL + #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL + #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL + #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL + #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL + #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL + #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL + #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL + #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL + #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL + #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL + #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL + #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL + #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL + u8 mac_address[6]; + __le16 max_rsscos_ctx; + __le16 max_cmpl_rings; + __le16 max_tx_rings; + __le16 max_rx_rings; + __le16 max_l2_ctxs; + __le16 max_vnics; + __le16 first_vf_id; + __le16 max_vfs; + __le16 max_stat_ctx; + __le32 max_encap_records; + __le32 max_decap_records; + __le32 max_tx_em_flows; + __le32 max_tx_wm_flows; + __le32 max_rx_em_flows; + __le32 max_rx_wm_flows; + __le32 max_mcast_filters; + __le32 max_flow_id; + __le32 max_hw_ring_grps; + __le16 max_sp_tx_rings; + u8 unused_0; + u8 valid; +}; + +/* hwrm_func_qcfg_input (size:192b/24B) */ struct hwrm_func_qcfg_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 fid; - __le16 unused_0[3]; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[6]; }; -/* Output (72 bytes) */ +/* hwrm_func_qcfg_output (size:640b/80B) */ struct hwrm_func_qcfg_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le16 fid; - __le16 port_id; - __le16 vlan; - __le16 flags; - #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL - #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL - #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL - #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL - #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL - #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL - u8 mac_address[6]; - __le16 pci_id; - __le16 alloc_rsscos_ctx; - __le16 alloc_cmpl_rings; - __le16 alloc_tx_rings; - __le16 alloc_rx_rings; - __le16 alloc_l2_ctx; - __le16 alloc_vnics; - __le16 mtu; - __le16 mru; - __le16 stat_ctx_id; - u8 port_partition_type; - #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF 0x0UL - #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS 0x1UL - #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL - #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL - #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL - #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL - u8 port_pf_cnt; - #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL - __le16 dflt_vnic_id; - __le16 max_mtu_configured; - __le32 min_bw; - #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0 - #define FUNC_QCFG_RESP_MIN_BW_SCALE 0x10000000UL - #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS (0x0UL << 28) - #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES - #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 fid; + __le16 port_id; + __le16 vlan; + __le16 flags; + #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL + #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL + #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL + #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL + #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL + #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL + u8 mac_address[6]; + __le16 pci_id; + __le16 alloc_rsscos_ctx; + __le16 alloc_cmpl_rings; + __le16 alloc_tx_rings; + __le16 alloc_rx_rings; + __le16 alloc_l2_ctx; + __le16 alloc_vnics; + __le16 mtu; + __le16 mru; + __le16 stat_ctx_id; + u8 port_partition_type; + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF 0x0UL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS 0x1UL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_LAST FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN + u8 port_pf_cnt; + #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL + #define FUNC_QCFG_RESP_PORT_PF_CNT_LAST FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL + __le16 dflt_vnic_id; + __le16 max_mtu_configured; + __le32 min_bw; + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0 + #define FUNC_QCFG_RESP_MIN_BW_SCALE 0x10000000UL + #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 max_bw; - #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0 - #define FUNC_QCFG_RESP_MAX_BW_SCALE 0x10000000UL - #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS (0x0UL << 28) - #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES - #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 max_bw; + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0 + #define FUNC_QCFG_RESP_MAX_BW_SCALE 0x10000000UL + #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID - u8 evb_mode; - #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL - #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL - #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL - u8 unused_0; - __le16 alloc_vfs; - __le32 alloc_mcast_filters; - __le32 alloc_hw_ring_grps; - __le16 alloc_sp_tx_rings; - u8 unused_1; - u8 valid; -}; - -/* hwrm_func_vlan_cfg */ -/* Input (48 bytes) */ + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID + u8 evb_mode; + #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL + #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL + #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL + #define FUNC_QCFG_RESP_EVB_MODE_LAST FUNC_QCFG_RESP_EVB_MODE_VEPA + u8 cache_linesize; + #define FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_64 0x0UL + #define FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128 0x1UL + #define FUNC_QCFG_RESP_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128 + __le16 alloc_vfs; + __le32 alloc_mcast_filters; + __le32 alloc_hw_ring_grps; + __le16 alloc_sp_tx_rings; + __le16 alloc_stat_ctx; + u8 unused_2[7]; + u8 valid; +}; + +/* hwrm_func_vlan_cfg_input (size:384b/48B) */ struct hwrm_func_vlan_cfg_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 fid; - u8 unused_0; - u8 unused_1; - __le32 enables; - #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID 0x1UL - #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID 0x2UL - #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP 0x4UL - #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP 0x8UL - #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID 0x10UL - #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID 0x20UL - __le16 stag_vid; - u8 stag_pcp; - u8 unused_2; - __be16 stag_tpid; - __le16 ctag_vid; - u8 ctag_pcp; - u8 unused_3; - __be16 ctag_tpid; - __le32 rsvd1; - __le32 rsvd2; - __le32 unused_4; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[2]; + __le32 enables; + #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID 0x1UL + #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID 0x2UL + #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP 0x4UL + #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP 0x8UL + #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID 0x10UL + #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID 0x20UL + __le16 stag_vid; + u8 stag_pcp; + u8 unused_1; + __be16 stag_tpid; + __le16 ctag_vid; + u8 ctag_pcp; + u8 unused_2; + __be16 ctag_tpid; + __le32 rsvd1; + __le32 rsvd2; + u8 unused_3[4]; +}; + +/* hwrm_func_vlan_cfg_output (size:128b/16B) */ struct hwrm_func_vlan_cfg_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_func_cfg */ -/* Input (88 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_cfg_input (size:704b/88B) */ struct hwrm_func_cfg_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 fid; - u8 unused_0; - u8 unused_1; - __le32 flags; - #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL - #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL - #define FUNC_CFG_REQ_FLAGS_RSVD_MASK 0x1fcUL - #define FUNC_CFG_REQ_FLAGS_RSVD_SFT 2 - #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL - #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL - #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL - #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL - #define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL - __le32 enables; - #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL - #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL - #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL - #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL - #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL - #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL - #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL - #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL - #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL - #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL - #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL - #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL - #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL - #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL - #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL - #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL - #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL - #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL - #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL - #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL - __le16 mtu; - __le16 mru; - __le16 num_rsscos_ctxs; - __le16 num_cmpl_rings; - __le16 num_tx_rings; - __le16 num_rx_rings; - __le16 num_l2_ctxs; - __le16 num_vnics; - __le16 num_stat_ctxs; - __le16 num_hw_ring_grps; - u8 dflt_mac_addr[6]; - __le16 dflt_vlan; - __be32 dflt_ip_addr[4]; - __le32 min_bw; - #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0 - #define FUNC_CFG_REQ_MIN_BW_SCALE 0x10000000UL - #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS (0x0UL << 28) - #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST FUNC_CFG_REQ_MIN_BW_SCALE_BYTES - #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 max_bw; - #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0 - #define FUNC_CFG_REQ_MAX_BW_SCALE 0x10000000UL - #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS (0x0UL << 28) - #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST FUNC_CFG_REQ_MAX_BW_SCALE_BYTES - #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID - __le16 async_event_cr; - u8 vlan_antispoof_mode; - #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK 0x0UL - #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN 0x1UL - #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[2]; + __le32 flags; + #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL + #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL + #define FUNC_CFG_REQ_FLAGS_RSVD_MASK 0x1fcUL + #define FUNC_CFG_REQ_FLAGS_RSVD_SFT 2 + #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL + #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL + #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL + #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL + #define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL + #define FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x4000UL + #define FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x8000UL + #define FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x10000UL + #define FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x20000UL + #define FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x40000UL + #define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x80000UL + #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL + __le32 enables; + #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL + #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL + #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL + #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL + #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL + #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL + #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL + #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL + #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL + #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL + #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL + #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL + #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL + #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL + #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL + #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL + #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL + #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL + #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL + #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL + #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL + __le16 mtu; + __le16 mru; + __le16 num_rsscos_ctxs; + __le16 num_cmpl_rings; + __le16 num_tx_rings; + __le16 num_rx_rings; + __le16 num_l2_ctxs; + __le16 num_vnics; + __le16 num_stat_ctxs; + __le16 num_hw_ring_grps; + u8 dflt_mac_addr[6]; + __le16 dflt_vlan; + __be32 dflt_ip_addr[4]; + __le32 min_bw; + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0 + #define FUNC_CFG_REQ_MIN_BW_SCALE 0x10000000UL + #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST FUNC_CFG_REQ_MIN_BW_SCALE_BYTES + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 max_bw; + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0 + #define FUNC_CFG_REQ_MAX_BW_SCALE 0x10000000UL + #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST FUNC_CFG_REQ_MAX_BW_SCALE_BYTES + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID + __le16 async_event_cr; + u8 vlan_antispoof_mode; + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK 0x0UL + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN 0x1UL + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN 0x3UL - u8 allowed_vlan_pris; - u8 evb_mode; - #define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL - #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL - #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL - u8 unused_2; - __le16 num_mcast_filters; + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_LAST FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN + u8 allowed_vlan_pris; + u8 evb_mode; + #define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL + #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL + #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL + #define FUNC_CFG_REQ_EVB_MODE_LAST FUNC_CFG_REQ_EVB_MODE_VEPA + u8 cache_linesize; + #define FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_64 0x0UL + #define FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_128 0x1UL + #define FUNC_CFG_REQ_CACHE_LINESIZE_LAST FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_128 + __le16 num_mcast_filters; +}; + +/* hwrm_func_cfg_output (size:128b/16B) */ +struct hwrm_func_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; }; -/* Output (16 bytes) */ -struct hwrm_func_cfg_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_func_qstats */ -/* Input (24 bytes) */ +/* hwrm_func_qstats_input (size:192b/24B) */ struct hwrm_func_qstats_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 fid; - __le16 unused_0[3]; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[6]; }; -/* Output (176 bytes) */ +/* hwrm_func_qstats_output (size:1408b/176B) */ struct hwrm_func_qstats_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le64 tx_ucast_pkts; - __le64 tx_mcast_pkts; - __le64 tx_bcast_pkts; - __le64 tx_discard_pkts; - __le64 tx_drop_pkts; - __le64 tx_ucast_bytes; - __le64 tx_mcast_bytes; - __le64 tx_bcast_bytes; - __le64 rx_ucast_pkts; - __le64 rx_mcast_pkts; - __le64 rx_bcast_pkts; - __le64 rx_discard_pkts; - __le64 rx_drop_pkts; - __le64 rx_ucast_bytes; - __le64 rx_mcast_bytes; - __le64 rx_bcast_bytes; - __le64 rx_agg_pkts; - __le64 rx_agg_bytes; - __le64 rx_agg_events; - __le64 rx_agg_aborts; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_func_clr_stats */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_discard_pkts; + __le64 tx_drop_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_discard_pkts; + __le64 rx_drop_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 rx_agg_pkts; + __le64 rx_agg_bytes; + __le64 rx_agg_events; + __le64 rx_agg_aborts; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_clr_stats_input (size:192b/24B) */ struct hwrm_func_clr_stats_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 fid; - __le16 unused_0[3]; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[6]; }; -/* Output (16 bytes) */ +/* hwrm_func_clr_stats_output (size:128b/16B) */ struct hwrm_func_clr_stats_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_func_vf_resc_free */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_vf_resc_free_input (size:192b/24B) */ struct hwrm_func_vf_resc_free_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 vf_id; - __le16 unused_0[3]; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_id; + u8 unused_0[6]; }; -/* Output (16 bytes) */ +/* hwrm_func_vf_resc_free_output (size:128b/16B) */ struct hwrm_func_vf_resc_free_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_func_vf_vnic_ids_query */ -/* Input (32 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_vf_vnic_ids_query_input (size:256b/32B) */ struct hwrm_func_vf_vnic_ids_query_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 vf_id; - u8 unused_0; - u8 unused_1; - __le32 max_vnic_id_cnt; - __le64 vnic_id_tbl_addr; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_id; + u8 unused_0[2]; + __le32 max_vnic_id_cnt; + __le64 vnic_id_tbl_addr; +}; + +/* hwrm_func_vf_vnic_ids_query_output (size:128b/16B) */ struct hwrm_func_vf_vnic_ids_query_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 vnic_id_cnt; - u8 unused_0; - u8 unused_1; - u8 unused_2; - u8 valid; -}; - -/* hwrm_func_drv_rgtr */ -/* Input (80 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 vnic_id_cnt; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_func_drv_rgtr_input (size:832b/104B) */ struct hwrm_func_drv_rgtr_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 flags; - #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL - #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL - __le32 enables; - #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL - #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL - #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL - #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL - #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL - __le16 os_type; - #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN 0x0UL - #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER 0x1UL - #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS 0xeUL - #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS 0x12UL - #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS 0x1dUL - #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX 0x24UL - #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD 0x2aUL - #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL - #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL - #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL - #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL - u8 ver_maj; - u8 ver_min; - u8 ver_upd; - u8 unused_0; - __le16 unused_1; - __le32 timestamp; - __le32 unused_2; - __le32 vf_req_fwd[8]; - __le32 async_event_fwd[8]; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL + #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL + __le32 enables; + #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL + #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL + #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL + #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL + #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL + __le16 os_type; + #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN 0x0UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER 0x1UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS 0xeUL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS 0x12UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS 0x1dUL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX 0x24UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD 0x2aUL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_LAST FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI + u8 ver_maj; + u8 ver_min; + u8 ver_upd; + u8 unused_0[3]; + __le32 timestamp; + u8 unused_1[4]; + __le32 vf_req_fwd[8]; + __le32 async_event_fwd[8]; +}; + +/* hwrm_func_drv_rgtr_output (size:128b/16B) */ struct hwrm_func_drv_rgtr_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_func_drv_unrgtr */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_drv_unrgtr_input (size:192b/24B) */ struct hwrm_func_drv_unrgtr_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 flags; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; #define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN 0x1UL - __le32 unused_0; + u8 unused_0[4]; }; -/* Output (16 bytes) */ +/* hwrm_func_drv_unrgtr_output (size:128b/16B) */ struct hwrm_func_drv_unrgtr_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_func_buf_rgtr */ -/* Input (128 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_buf_rgtr_input (size:1024b/128B) */ struct hwrm_func_buf_rgtr_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 enables; - #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL - #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL - __le16 vf_id; - __le16 req_buf_num_pages; - __le16 req_buf_page_size; - #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL - #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K 0xcUL - #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K 0xdUL - #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL - #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M 0x15UL - #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M 0x16UL - #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G 0x1eUL - __le16 req_buf_len; - __le16 resp_buf_len; - u8 unused_0; - u8 unused_1; - __le64 req_buf_page_addr0; - __le64 req_buf_page_addr1; - __le64 req_buf_page_addr2; - __le64 req_buf_page_addr3; - __le64 req_buf_page_addr4; - __le64 req_buf_page_addr5; - __le64 req_buf_page_addr6; - __le64 req_buf_page_addr7; - __le64 req_buf_page_addr8; - __le64 req_buf_page_addr9; - __le64 error_buf_addr; - __le64 resp_buf_addr; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL + #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL + __le16 vf_id; + __le16 req_buf_num_pages; + __le16 req_buf_page_size; + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K 0xcUL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K 0xdUL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M 0x15UL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M 0x16UL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G 0x1eUL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_LAST FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G + __le16 req_buf_len; + __le16 resp_buf_len; + u8 unused_0[2]; + __le64 req_buf_page_addr0; + __le64 req_buf_page_addr1; + __le64 req_buf_page_addr2; + __le64 req_buf_page_addr3; + __le64 req_buf_page_addr4; + __le64 req_buf_page_addr5; + __le64 req_buf_page_addr6; + __le64 req_buf_page_addr7; + __le64 req_buf_page_addr8; + __le64 req_buf_page_addr9; + __le64 error_buf_addr; + __le64 resp_buf_addr; +}; + +/* hwrm_func_buf_rgtr_output (size:128b/16B) */ struct hwrm_func_buf_rgtr_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_func_drv_qver */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_drv_qver_input (size:192b/24B) */ struct hwrm_func_drv_qver_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 reserved; - __le16 fid; - __le16 unused_0; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 reserved; + __le16 fid; + u8 unused_0[2]; }; -/* Output (16 bytes) */ +/* hwrm_func_drv_qver_output (size:128b/16B) */ struct hwrm_func_drv_qver_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le16 os_type; - #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN 0x0UL - #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER 0x1UL - #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS 0xeUL - #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS 0x12UL - #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS 0x1dUL - #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX 0x24UL - #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD 0x2aUL - #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL - #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL - #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL - #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL - u8 ver_maj; - u8 ver_min; - u8 ver_upd; - u8 unused_0; - u8 unused_1; - u8 valid; -}; - -/* hwrm_port_phy_cfg */ -/* Input (56 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 os_type; + #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN 0x0UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER 0x1UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS 0xeUL + #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS 0x12UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS 0x1dUL + #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX 0x24UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD 0x2aUL + #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_LAST FUNC_DRV_QVER_RESP_OS_TYPE_UEFI + u8 ver_maj; + u8 ver_min; + u8 ver_upd; + u8 unused_0[2]; + u8 valid; +}; + +/* hwrm_func_resource_qcaps_input (size:192b/24B) */ +struct hwrm_func_resource_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[6]; +}; + +/* hwrm_func_resource_qcaps_output (size:384b/48B) */ +struct hwrm_func_resource_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 max_vfs; + __le16 max_msix; + __le16 vf_reservation_strategy; + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL + __le16 min_rsscos_ctx; + __le16 max_rsscos_ctx; + __le16 min_cmpl_rings; + __le16 max_cmpl_rings; + __le16 min_tx_rings; + __le16 max_tx_rings; + __le16 min_rx_rings; + __le16 max_rx_rings; + __le16 min_l2_ctxs; + __le16 max_l2_ctxs; + __le16 min_vnics; + __le16 max_vnics; + __le16 min_stat_ctx; + __le16 max_stat_ctx; + __le16 min_hw_ring_grps; + __le16 max_hw_ring_grps; + u8 unused_0; + u8 valid; +}; + +/* hwrm_func_vf_resource_cfg_input (size:448b/56B) */ +struct hwrm_func_vf_resource_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_id; + __le16 max_msix; + __le16 min_rsscos_ctx; + __le16 max_rsscos_ctx; + __le16 min_cmpl_rings; + __le16 max_cmpl_rings; + __le16 min_tx_rings; + __le16 max_tx_rings; + __le16 min_rx_rings; + __le16 max_rx_rings; + __le16 min_l2_ctxs; + __le16 max_l2_ctxs; + __le16 min_vnics; + __le16 max_vnics; + __le16 min_stat_ctx; + __le16 max_stat_ctx; + __le16 min_hw_ring_grps; + __le16 max_hw_ring_grps; + u8 unused_0[4]; +}; + +/* hwrm_func_vf_resource_cfg_output (size:256b/32B) */ +struct hwrm_func_vf_resource_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 reserved_rsscos_ctx; + __le16 reserved_cmpl_rings; + __le16 reserved_tx_rings; + __le16 reserved_rx_rings; + __le16 reserved_l2_ctxs; + __le16 reserved_vnics; + __le16 reserved_stat_ctx; + __le16 reserved_hw_ring_grps; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_phy_cfg_input (size:448b/56B) */ struct hwrm_port_phy_cfg_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 flags; - #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL - #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL - #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL - #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL - #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL - #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL - #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL - #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL - #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL - #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL - #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL - #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL - #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL - #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL - #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL - __le32 enables; - #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL - #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL - #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL - #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL - #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL - #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL - #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL - #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL - #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL - #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL - #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL - __le16 port_id; - __le16 force_link_speed; - #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL - #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB 0xaUL - #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB 0x14UL - #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL - #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB 0x64UL - #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB 0xc8UL - #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB 0xfaUL - #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL - #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL - #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL - #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL - u8 auto_mode; - #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE 0x0UL - #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS 0x1UL - #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED 0x2UL - #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL - #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK 0x4UL - u8 auto_duplex; - #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL - #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL - #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL - u8 auto_pause; - #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL - #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL - #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL - u8 unused_0; - __le16 auto_link_speed; - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB 0x14UL - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB 0x64UL - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB 0xc8UL - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB 0xfaUL - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL - __le16 auto_link_speed_mask; - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL - u8 wirespeed; - #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL - #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL - u8 lpbk; - #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL - #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL - #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL - u8 force_pause; - #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL - #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL - u8 unused_1; - __le32 preemphasis; - __le16 eee_link_speed_mask; - #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL - #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL - #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL - #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL - #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL - #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL - #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL - u8 unused_2; - u8 unused_3; - __le32 tx_lpi_timer; - __le32 unused_4; - #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL - #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0 -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL + #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL + #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL + #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL + #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL + __le32 enables; + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL + #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL + #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL + #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL + #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL + #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL + #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL + __le16 port_id; + __le16 force_link_speed; + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB 0xaUL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB 0x14UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB 0x64UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB 0xc8UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB 0xfaUL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB + u8 auto_mode; + #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE 0x0UL + #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED 0x2UL + #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL + #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK 0x4UL + #define PORT_PHY_CFG_REQ_AUTO_MODE_LAST PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK + u8 auto_duplex; + #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL + #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL + #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_LAST PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH + u8 auto_pause; + #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL + #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL + u8 unused_0; + __le16 auto_link_speed; + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB 0x14UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB 0x64UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB 0xc8UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB 0xfaUL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB + __le16 auto_link_speed_mask; + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL + u8 wirespeed; + #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL + #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL + #define PORT_PHY_CFG_REQ_WIRESPEED_LAST PORT_PHY_CFG_REQ_WIRESPEED_ON + u8 lpbk; + #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL + #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL + #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL + #define PORT_PHY_CFG_REQ_LPBK_LAST PORT_PHY_CFG_REQ_LPBK_REMOTE + u8 force_pause; + #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL + #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL + u8 unused_1; + __le32 preemphasis; + __le16 eee_link_speed_mask; + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL + u8 unused_2[2]; + __le32 tx_lpi_timer; + #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL + #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0 + __le32 unused_3; +}; + +/* hwrm_port_phy_cfg_output (size:128b/16B) */ struct hwrm_port_phy_cfg_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_port_phy_qcfg */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_phy_qcfg_input (size:192b/24B) */ struct hwrm_port_phy_qcfg_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 port_id; - __le16 unused_0[3]; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; }; -/* Output (96 bytes) */ +/* hwrm_port_phy_qcfg_output (size:768b/96B) */ struct hwrm_port_phy_qcfg_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - u8 link; - #define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL - #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL - #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL - u8 unused_0; - __le16 link_speed; - #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL - #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL - #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB 0x14UL - #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL - #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB 0x64UL - #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB 0xc8UL - #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB 0xfaUL - #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL - #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL - #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL - #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL - u8 duplex_cfg; - #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL - #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL - u8 pause; - #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL - #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL - __le16 support_speeds; - #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL - #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL - #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL - #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL - #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL - #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL - #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL - #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL - #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL - #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL - #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL - #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL - #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL - #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL - __le16 force_link_speed; - #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL - #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL - #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB 0x14UL - #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL - #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB 0x64UL - #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB 0xc8UL - #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB 0xfaUL - #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL - #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL - #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL - #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL - u8 auto_mode; - #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE 0x0UL - #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 0x1UL - #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 0x2UL - #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL - #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK 0x4UL - u8 auto_pause; - #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL - #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL - #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL - __le16 auto_link_speed; - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB 0xaUL - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB 0x14UL - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB 0x64UL - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB 0xc8UL - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB 0xfaUL - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL - __le16 auto_link_speed_mask; - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL - u8 wirespeed; - #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL - #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL - u8 lpbk; - #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL - #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL - #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL - u8 force_pause; - #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL - #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL - u8 module_status; - #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE 0x0UL - #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX 0x1UL - #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL - #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL - #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL - #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL - __le32 preemphasis; - u8 phy_maj; - u8 phy_min; - u8 phy_bld; - u8 phy_type; - #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN 0x0UL - #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR 0x1UL - #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 0x2UL - #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR 0x3UL - #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR 0x4UL - #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 0x5UL - #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX 0x6UL - #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR 0x7UL - #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL - #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL - #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL - #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L 0xbUL - #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S 0xcUL - #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N 0xdUL - #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR 0xeUL - #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4 0xfUL - #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4 0x10UL - #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4 0x11UL - #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4 0x12UL - #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10 0x13UL - #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4 0x14UL - #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4 0x15UL - #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL - #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL - #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL - #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL - #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL - #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL - u8 media_type; - #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL - #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL - #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL - #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL - u8 xcvr_pkg_type; - #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL - #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL - u8 eee_config_phy_addr; - #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL - #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0 - #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL - #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL - #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL - #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL - #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5 - u8 parallel_detect; - #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL - #define PORT_PHY_QCFG_RESP_RESERVED_MASK 0xfeUL - #define PORT_PHY_QCFG_RESP_RESERVED_SFT 1 - __le16 link_partner_adv_speeds; - #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL - #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL - #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL - #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL - #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL - #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL - #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL - #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL - #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL - #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL - #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL - #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL - #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL - #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL - u8 link_partner_adv_auto_mode; - #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL - #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL - #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 link; + #define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL + #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL + #define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK + u8 unused_0; + __le16 link_speed; + #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB 0x14UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB 0x64UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB 0xc8UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB 0xfaUL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB + u8 duplex_cfg; + #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL + #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL + #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_LAST PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL + u8 pause; + #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL + #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL + __le16 support_speeds; + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL + __le16 force_link_speed; + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB 0x14UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB 0x64UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB 0xc8UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB 0xfaUL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB + u8 auto_mode; + #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE 0x0UL + #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 0x1UL + #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 0x2UL + #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL + #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK 0x4UL + #define PORT_PHY_QCFG_RESP_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK + u8 auto_pause; + #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL + #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL + #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL + __le16 auto_link_speed; + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB 0xaUL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB 0x14UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB 0x64UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB 0xc8UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB 0xfaUL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB + __le16 auto_link_speed_mask; + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL + u8 wirespeed; + #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL + #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL + #define PORT_PHY_QCFG_RESP_WIRESPEED_LAST PORT_PHY_QCFG_RESP_WIRESPEED_ON + u8 lpbk; + #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL + #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL + #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL + #define PORT_PHY_QCFG_RESP_LPBK_LAST PORT_PHY_QCFG_RESP_LPBK_REMOTE + u8 force_pause; + #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL + #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL + u8 module_status; + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE 0x0UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX 0x1UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE + __le32 preemphasis; + u8 phy_maj; + u8 phy_min; + u8 phy_bld; + u8 phy_type; + #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN 0x0UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR 0x1UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 0x2UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR 0x3UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR 0x4UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 0x5UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX 0x6UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR 0x7UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L 0xbUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S 0xcUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N 0xdUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR 0xeUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4 0xfUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4 0x10UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4 0x11UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4 0x12UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10 0x13UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4 0x14UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4 0x15UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX + u8 media_type; + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_LAST PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE + u8 xcvr_pkg_type; + #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL + #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL + #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL + u8 eee_config_phy_addr; + #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL + #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0 + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5 + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL + u8 parallel_detect; + #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL + __le16 link_partner_adv_speeds; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL + u8 link_partner_adv_auto_mode; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW 0x3UL - #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL - u8 link_partner_adv_pause; - #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL - #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL - __le16 adv_eee_link_speed_mask; - #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL - #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL - #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL - #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL - #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL - #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL - #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL - __le16 link_partner_adv_eee_link_speed_mask; - #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL - #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL - #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL - #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL - #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL - #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL - #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL - __le32 xcvr_identifier_type_tx_lpi_timer; - #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL - #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0 - #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL - #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24 + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK + u8 link_partner_adv_pause; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL + __le16 adv_eee_link_speed_mask; + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL + __le16 link_partner_adv_eee_link_speed_mask; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL + __le32 xcvr_identifier_type_tx_lpi_timer; + #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL + #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0 + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24 #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_UNKNOWN (0x0UL << 24) #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFP (0x3UL << 24) #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24) #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24) #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24) - __le16 fec_cfg; - #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL - #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL - #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL - #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL - #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL - #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL - #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL - u8 duplex_state; - #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL - #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL - u8 unused_1; - char phy_vendor_name[16]; - char phy_vendor_partnumber[16]; - __le32 unused_2; - u8 unused_3; - u8 unused_4; - u8 unused_5; - u8 valid; -}; - -/* hwrm_port_mac_cfg */ -/* Input (40 bytes) */ + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 + __le16 fec_cfg; + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL + u8 duplex_state; + #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL + #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL + #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_LAST PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL + u8 option_flags; + #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL + char phy_vendor_name[16]; + char phy_vendor_partnumber[16]; + u8 unused_2[7]; + u8 valid; +}; + +/* hwrm_port_mac_cfg_input (size:320b/40B) */ struct hwrm_port_mac_cfg_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 flags; - #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL - #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL - #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL - #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL - #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL - #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL - #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL - #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL - #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL - #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL - #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL - #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL - #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL - __le32 enables; - #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL - #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL - #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL - #define PORT_MAC_CFG_REQ_ENABLES_RESERVED1 0x8UL - #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL - #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL - #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL - #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL - #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL - __le16 port_id; - u8 ipg; - u8 lpbk; - #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL - #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL - #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL - u8 vlan_pri2cos_map_pri; - u8 reserved1; - u8 tunnel_pri2cos_map_pri; - u8 dscp2pri_map_pri; - __le16 rx_ts_capture_ptp_msg_type; - __le16 tx_ts_capture_ptp_msg_type; - u8 cos_field_cfg; - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1 - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1) - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1) - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1) - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1) - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3 - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3) - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3) - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3) - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3) - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5 - u8 unused_0[3]; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL + #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL + #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL + #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL + #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL + #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL + #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL + #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL + #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL + #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL + #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL + #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL + #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL + __le32 enables; + #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL + #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL + #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL + #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL + #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL + #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL + #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL + #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL + __le16 port_id; + u8 ipg; + u8 lpbk; + #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL + #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL + #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL + #define PORT_MAC_CFG_REQ_LPBK_LAST PORT_MAC_CFG_REQ_LPBK_REMOTE + u8 vlan_pri2cos_map_pri; + u8 reserved1; + u8 tunnel_pri2cos_map_pri; + u8 dscp2pri_map_pri; + __le16 rx_ts_capture_ptp_msg_type; + __le16 tx_ts_capture_ptp_msg_type; + u8 cos_field_cfg; + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1 + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3 + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5 + u8 unused_0[3]; +}; + +/* hwrm_port_mac_cfg_output (size:128b/16B) */ struct hwrm_port_mac_cfg_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le16 mru; - __le16 mtu; - u8 ipg; - u8 lpbk; - #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL - #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL - #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL - u8 unused_0; - u8 valid; -}; - -/* hwrm_port_mac_ptp_qcfg */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 mru; + __le16 mtu; + u8 ipg; + u8 lpbk; + #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL + #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL + #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL + #define PORT_MAC_CFG_RESP_LPBK_LAST PORT_MAC_CFG_RESP_LPBK_REMOTE + u8 unused_0; + u8 valid; +}; + +/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */ struct hwrm_port_mac_ptp_qcfg_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 port_id; - __le16 unused_0[3]; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; }; -/* Output (80 bytes) */ +/* hwrm_port_mac_ptp_qcfg_output (size:640b/80B) */ struct hwrm_port_mac_ptp_qcfg_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - u8 flags; - #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL - #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL - u8 unused_0; - __le16 unused_1; - __le32 rx_ts_reg_off_lower; - __le32 rx_ts_reg_off_upper; - __le32 rx_ts_reg_off_seq_id; - __le32 rx_ts_reg_off_src_id_0; - __le32 rx_ts_reg_off_src_id_1; - __le32 rx_ts_reg_off_src_id_2; - __le32 rx_ts_reg_off_domain_id; - __le32 rx_ts_reg_off_fifo; - __le32 rx_ts_reg_off_fifo_adv; - __le32 rx_ts_reg_off_granularity; - __le32 tx_ts_reg_off_lower; - __le32 tx_ts_reg_off_upper; - __le32 tx_ts_reg_off_seq_id; - __le32 tx_ts_reg_off_fifo; - __le32 tx_ts_reg_off_granularity; - __le32 unused_2; - u8 unused_3; - u8 unused_4; - u8 unused_5; - u8 valid; -}; - -/* hwrm_port_qstats */ -/* Input (40 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL + #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL + u8 unused_0[3]; + __le32 rx_ts_reg_off_lower; + __le32 rx_ts_reg_off_upper; + __le32 rx_ts_reg_off_seq_id; + __le32 rx_ts_reg_off_src_id_0; + __le32 rx_ts_reg_off_src_id_1; + __le32 rx_ts_reg_off_src_id_2; + __le32 rx_ts_reg_off_domain_id; + __le32 rx_ts_reg_off_fifo; + __le32 rx_ts_reg_off_fifo_adv; + __le32 rx_ts_reg_off_granularity; + __le32 tx_ts_reg_off_lower; + __le32 tx_ts_reg_off_upper; + __le32 tx_ts_reg_off_seq_id; + __le32 tx_ts_reg_off_fifo; + __le32 tx_ts_reg_off_granularity; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_port_qstats_input (size:320b/40B) */ struct hwrm_port_qstats_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 port_id; - u8 unused_0; - u8 unused_1; - u8 unused_2[3]; - u8 unused_3; - __le64 tx_stat_host_addr; - __le64 rx_stat_host_addr; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; + __le64 tx_stat_host_addr; + __le64 rx_stat_host_addr; +}; + +/* hwrm_port_qstats_output (size:128b/16B) */ struct hwrm_port_qstats_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le16 tx_stat_size; - __le16 rx_stat_size; - u8 unused_0; - u8 unused_1; - u8 unused_2; - u8 valid; -}; - -/* hwrm_port_lpbk_qstats */ -/* Input (16 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 tx_stat_size; + __le16 rx_stat_size; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_port_lpbk_qstats_input (size:128b/16B) */ struct hwrm_port_lpbk_qstats_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; }; -/* Output (96 bytes) */ +/* hwrm_port_lpbk_qstats_output (size:768b/96B) */ struct hwrm_port_lpbk_qstats_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le64 lpbk_ucast_frames; - __le64 lpbk_mcast_frames; - __le64 lpbk_bcast_frames; - __le64 lpbk_ucast_bytes; - __le64 lpbk_mcast_bytes; - __le64 lpbk_bcast_bytes; - __le64 tx_stat_discard; - __le64 tx_stat_error; - __le64 rx_stat_discard; - __le64 rx_stat_error; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_port_clr_stats */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 lpbk_ucast_frames; + __le64 lpbk_mcast_frames; + __le64 lpbk_bcast_frames; + __le64 lpbk_ucast_bytes; + __le64 lpbk_mcast_bytes; + __le64 lpbk_bcast_bytes; + __le64 tx_stat_discard; + __le64 tx_stat_error; + __le64 rx_stat_discard; + __le64 rx_stat_error; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_clr_stats_input (size:192b/24B) */ struct hwrm_port_clr_stats_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 port_id; - __le16 unused_0[3]; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; }; -/* Output (16 bytes) */ +/* hwrm_port_clr_stats_output (size:128b/16B) */ struct hwrm_port_clr_stats_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_port_lpbk_clr_stats */ -/* Input (16 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_lpbk_clr_stats_input (size:128b/16B) */ struct hwrm_port_lpbk_clr_stats_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; }; -/* Output (16 bytes) */ +/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */ struct hwrm_port_lpbk_clr_stats_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_port_phy_qcaps */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_phy_qcaps_input (size:192b/24B) */ struct hwrm_port_phy_qcaps_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 port_id; - __le16 unused_0[3]; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; }; -/* Output (24 bytes) */ +/* hwrm_port_phy_qcaps_output (size:192b/24B) */ struct hwrm_port_phy_qcaps_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - u8 flags; - #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL - #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfeUL - #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 1 - u8 port_cnt; - #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL - #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL - #define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL - #define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL - #define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL - __le16 supported_speeds_force_mode; - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL - __le16 supported_speeds_auto_mode; - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL - __le16 supported_speeds_eee_mode; - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL - __le32 tx_lpi_timer_low; - #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL - #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0 - #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL - #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24 - __le32 valid_tx_lpi_timer_high; - #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL - #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0 - #define PORT_PHY_QCAPS_RESP_VALID_MASK 0xff000000UL - #define PORT_PHY_QCAPS_RESP_VALID_SFT 24 -}; - -/* hwrm_port_phy_i2c_read */ -/* Input (40 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL + #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfeUL + #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 1 + u8 port_cnt; + #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST PORT_PHY_QCAPS_RESP_PORT_CNT_4 + __le16 supported_speeds_force_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL + __le16 supported_speeds_auto_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL + __le16 supported_speeds_eee_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL + __le32 tx_lpi_timer_low; + #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL + #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0 + #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL + #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24 + __le32 valid_tx_lpi_timer_high; + #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL + #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0 + #define PORT_PHY_QCAPS_RESP_VALID_MASK 0xff000000UL + #define PORT_PHY_QCAPS_RESP_VALID_SFT 24 +}; + +/* hwrm_port_phy_i2c_read_input (size:320b/40B) */ struct hwrm_port_phy_i2c_read_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 flags; - __le32 enables; - #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL - __le16 port_id; - u8 i2c_slave_addr; - u8 unused_0; - __le16 page_number; - __le16 page_offset; - u8 data_length; - u8 unused_1[7]; -}; - -/* Output (80 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + __le32 enables; + #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL + __le16 port_id; + u8 i2c_slave_addr; + u8 unused_0; + __le16 page_number; + __le16 page_offset; + u8 data_length; + u8 unused_1[7]; +}; + +/* hwrm_port_phy_i2c_read_output (size:640b/80B) */ struct hwrm_port_phy_i2c_read_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 data[16]; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_port_led_cfg */ -/* Input (64 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 data[16]; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_led_cfg_input (size:512b/64B) */ struct hwrm_port_led_cfg_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 enables; - #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL - #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL - #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL - #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL - #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL - #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL - #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL - #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL - #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL - #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL - #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL - #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL - #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL - #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL - #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL - #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL - #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL - #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL - #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL - #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL - #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL - #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL - #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL - #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL - __le16 port_id; - u8 num_leds; - u8 rsvd; - u8 led0_id; - u8 led0_state; - #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL - #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL - #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL - #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL - #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL - u8 led0_color; - #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL - #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL - #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL - #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL - u8 unused_0; - __le16 led0_blink_on; - __le16 led0_blink_off; - u8 led0_group_id; - u8 rsvd0; - u8 led1_id; - u8 led1_state; - #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL - #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL - #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL - #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL - #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL - u8 led1_color; - #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL - #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL - #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL - #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL - u8 unused_1; - __le16 led1_blink_on; - __le16 led1_blink_off; - u8 led1_group_id; - u8 rsvd1; - u8 led2_id; - u8 led2_state; - #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL - #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL - #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL - #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL - #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL - u8 led2_color; - #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL - #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL - #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL - #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL - u8 unused_2; - __le16 led2_blink_on; - __le16 led2_blink_off; - u8 led2_group_id; - u8 rsvd2; - u8 led3_id; - u8 led3_state; - #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL - #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL - #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL - #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL - #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL - u8 led3_color; - #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL - #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL - #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL - #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL - u8 unused_3; - __le16 led3_blink_on; - __le16 led3_blink_off; - u8 led3_group_id; - u8 rsvd3; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL + __le16 port_id; + u8 num_leds; + u8 rsvd; + u8 led0_id; + u8 led0_state; + #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL + #define PORT_LED_CFG_REQ_LED0_STATE_LAST PORT_LED_CFG_REQ_LED0_STATE_BLINKALT + u8 led0_color; + #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL + #define PORT_LED_CFG_REQ_LED0_COLOR_LAST PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER + u8 unused_0; + __le16 led0_blink_on; + __le16 led0_blink_off; + u8 led0_group_id; + u8 rsvd0; + u8 led1_id; + u8 led1_state; + #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL + #define PORT_LED_CFG_REQ_LED1_STATE_LAST PORT_LED_CFG_REQ_LED1_STATE_BLINKALT + u8 led1_color; + #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL + #define PORT_LED_CFG_REQ_LED1_COLOR_LAST PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER + u8 unused_1; + __le16 led1_blink_on; + __le16 led1_blink_off; + u8 led1_group_id; + u8 rsvd1; + u8 led2_id; + u8 led2_state; + #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL + #define PORT_LED_CFG_REQ_LED2_STATE_LAST PORT_LED_CFG_REQ_LED2_STATE_BLINKALT + u8 led2_color; + #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL + #define PORT_LED_CFG_REQ_LED2_COLOR_LAST PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER + u8 unused_2; + __le16 led2_blink_on; + __le16 led2_blink_off; + u8 led2_group_id; + u8 rsvd2; + u8 led3_id; + u8 led3_state; + #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL + #define PORT_LED_CFG_REQ_LED3_STATE_LAST PORT_LED_CFG_REQ_LED3_STATE_BLINKALT + u8 led3_color; + #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL + #define PORT_LED_CFG_REQ_LED3_COLOR_LAST PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER + u8 unused_3; + __le16 led3_blink_on; + __le16 led3_blink_off; + u8 led3_group_id; + u8 rsvd3; +}; + +/* hwrm_port_led_cfg_output (size:128b/16B) */ struct hwrm_port_led_cfg_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_port_led_qcaps */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_led_qcfg_input (size:192b/24B) */ +struct hwrm_port_led_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_led_qcfg_output (size:448b/56B) */ +struct hwrm_port_led_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_leds; + u8 led0_id; + u8 led0_type; + #define PORT_LED_QCFG_RESP_LED0_TYPE_SPEED 0x0UL + #define PORT_LED_QCFG_RESP_LED0_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCFG_RESP_LED0_TYPE_INVALID 0xffUL + #define PORT_LED_QCFG_RESP_LED0_TYPE_LAST PORT_LED_QCFG_RESP_LED0_TYPE_INVALID + u8 led0_state; + #define PORT_LED_QCFG_RESP_LED0_STATE_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED0_STATE_OFF 0x1UL + #define PORT_LED_QCFG_RESP_LED0_STATE_ON 0x2UL + #define PORT_LED_QCFG_RESP_LED0_STATE_BLINK 0x3UL + #define PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT 0x4UL + #define PORT_LED_QCFG_RESP_LED0_STATE_LAST PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT + u8 led0_color; + #define PORT_LED_QCFG_RESP_LED0_COLOR_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED0_COLOR_AMBER 0x1UL + #define PORT_LED_QCFG_RESP_LED0_COLOR_GREEN 0x2UL + #define PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER 0x3UL + #define PORT_LED_QCFG_RESP_LED0_COLOR_LAST PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER + u8 unused_0; + __le16 led0_blink_on; + __le16 led0_blink_off; + u8 led0_group_id; + u8 led1_id; + u8 led1_type; + #define PORT_LED_QCFG_RESP_LED1_TYPE_SPEED 0x0UL + #define PORT_LED_QCFG_RESP_LED1_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCFG_RESP_LED1_TYPE_INVALID 0xffUL + #define PORT_LED_QCFG_RESP_LED1_TYPE_LAST PORT_LED_QCFG_RESP_LED1_TYPE_INVALID + u8 led1_state; + #define PORT_LED_QCFG_RESP_LED1_STATE_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED1_STATE_OFF 0x1UL + #define PORT_LED_QCFG_RESP_LED1_STATE_ON 0x2UL + #define PORT_LED_QCFG_RESP_LED1_STATE_BLINK 0x3UL + #define PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT 0x4UL + #define PORT_LED_QCFG_RESP_LED1_STATE_LAST PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT + u8 led1_color; + #define PORT_LED_QCFG_RESP_LED1_COLOR_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED1_COLOR_AMBER 0x1UL + #define PORT_LED_QCFG_RESP_LED1_COLOR_GREEN 0x2UL + #define PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER 0x3UL + #define PORT_LED_QCFG_RESP_LED1_COLOR_LAST PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER + u8 unused_1; + __le16 led1_blink_on; + __le16 led1_blink_off; + u8 led1_group_id; + u8 led2_id; + u8 led2_type; + #define PORT_LED_QCFG_RESP_LED2_TYPE_SPEED 0x0UL + #define PORT_LED_QCFG_RESP_LED2_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCFG_RESP_LED2_TYPE_INVALID 0xffUL + #define PORT_LED_QCFG_RESP_LED2_TYPE_LAST PORT_LED_QCFG_RESP_LED2_TYPE_INVALID + u8 led2_state; + #define PORT_LED_QCFG_RESP_LED2_STATE_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED2_STATE_OFF 0x1UL + #define PORT_LED_QCFG_RESP_LED2_STATE_ON 0x2UL + #define PORT_LED_QCFG_RESP_LED2_STATE_BLINK 0x3UL + #define PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT 0x4UL + #define PORT_LED_QCFG_RESP_LED2_STATE_LAST PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT + u8 led2_color; + #define PORT_LED_QCFG_RESP_LED2_COLOR_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED2_COLOR_AMBER 0x1UL + #define PORT_LED_QCFG_RESP_LED2_COLOR_GREEN 0x2UL + #define PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER 0x3UL + #define PORT_LED_QCFG_RESP_LED2_COLOR_LAST PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER + u8 unused_2; + __le16 led2_blink_on; + __le16 led2_blink_off; + u8 led2_group_id; + u8 led3_id; + u8 led3_type; + #define PORT_LED_QCFG_RESP_LED3_TYPE_SPEED 0x0UL + #define PORT_LED_QCFG_RESP_LED3_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCFG_RESP_LED3_TYPE_INVALID 0xffUL + #define PORT_LED_QCFG_RESP_LED3_TYPE_LAST PORT_LED_QCFG_RESP_LED3_TYPE_INVALID + u8 led3_state; + #define PORT_LED_QCFG_RESP_LED3_STATE_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED3_STATE_OFF 0x1UL + #define PORT_LED_QCFG_RESP_LED3_STATE_ON 0x2UL + #define PORT_LED_QCFG_RESP_LED3_STATE_BLINK 0x3UL + #define PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT 0x4UL + #define PORT_LED_QCFG_RESP_LED3_STATE_LAST PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT + u8 led3_color; + #define PORT_LED_QCFG_RESP_LED3_COLOR_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED3_COLOR_AMBER 0x1UL + #define PORT_LED_QCFG_RESP_LED3_COLOR_GREEN 0x2UL + #define PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER 0x3UL + #define PORT_LED_QCFG_RESP_LED3_COLOR_LAST PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER + u8 unused_3; + __le16 led3_blink_on; + __le16 led3_blink_off; + u8 led3_group_id; + u8 unused_4[6]; + u8 valid; +}; + +/* hwrm_port_led_qcaps_input (size:192b/24B) */ struct hwrm_port_led_qcaps_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 port_id; - __le16 unused_0[3]; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; }; -/* Output (48 bytes) */ +/* hwrm_port_led_qcaps_output (size:384b/48B) */ struct hwrm_port_led_qcaps_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - u8 num_leds; - u8 unused_0[3]; - u8 led0_id; - u8 led0_type; - #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL - #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL - #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL - u8 led0_group_id; - u8 unused_1; - __le16 led0_state_caps; - #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL - #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL - #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL - #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL - #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL - __le16 led0_color_caps; - #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL - #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL - #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL - u8 led1_id; - u8 led1_type; - #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL - #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL - #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL - u8 led1_group_id; - u8 unused_2; - __le16 led1_state_caps; - #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL - #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL - #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL - #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL - #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL - __le16 led1_color_caps; - #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL - #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL - #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL - u8 led2_id; - u8 led2_type; - #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL - #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL - #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL - u8 led2_group_id; - u8 unused_3; - __le16 led2_state_caps; - #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL - #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL - #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL - #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL - #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL - __le16 led2_color_caps; - #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL - #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL - #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL - u8 led3_id; - u8 led3_type; - #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL - #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL - #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL - u8 led3_group_id; - u8 unused_4; - __le16 led3_state_caps; - #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL - #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL - #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL - #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL - #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL - __le16 led3_color_caps; - #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL - #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL - #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL - u8 unused_5; - u8 unused_6; - u8 unused_7; - u8 valid; -}; - -/* hwrm_queue_qportcfg */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_leds; + u8 unused[3]; + u8 led0_id; + u8 led0_type; + #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL + #define PORT_LED_QCAPS_RESP_LED0_TYPE_LAST PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID + u8 led0_group_id; + u8 unused_0; + __le16 led0_state_caps; + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led0_color_caps; + #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 led1_id; + u8 led1_type; + #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL + #define PORT_LED_QCAPS_RESP_LED1_TYPE_LAST PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID + u8 led1_group_id; + u8 unused_1; + __le16 led1_state_caps; + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led1_color_caps; + #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 led2_id; + u8 led2_type; + #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL + #define PORT_LED_QCAPS_RESP_LED2_TYPE_LAST PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID + u8 led2_group_id; + u8 unused_2; + __le16 led2_state_caps; + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led2_color_caps; + #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 led3_id; + u8 led3_type; + #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL + #define PORT_LED_QCAPS_RESP_LED3_TYPE_LAST PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID + u8 led3_group_id; + u8 unused_3; + __le16 led3_state_caps; + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led3_color_caps; + #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 unused_4[3]; + u8 valid; +}; + +/* hwrm_queue_qportcfg_input (size:192b/24B) */ struct hwrm_queue_qportcfg_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 flags; - #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL - #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX 0x0UL - #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL - #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX - __le16 port_id; - __le16 unused_0; -}; - -/* Output (32 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL + #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX 0x0UL + #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL + #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX + __le16 port_id; + u8 unused_0[2]; +}; + +/* hwrm_queue_qportcfg_output (size:256b/32B) */ struct hwrm_queue_qportcfg_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - u8 max_configurable_queues; - u8 max_configurable_lossless_queues; - u8 queue_cfg_allowed; - u8 queue_cfg_info; - #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL - u8 queue_pfcenable_cfg_allowed; - u8 queue_pri2cos_cfg_allowed; - u8 queue_cos2bw_cfg_allowed; - u8 queue_id0; - u8 queue_id0_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 max_configurable_queues; + u8 max_configurable_lossless_queues; + u8 queue_cfg_allowed; + u8 queue_cfg_info; + #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL + u8 queue_pfcenable_cfg_allowed; + u8 queue_pri2cos_cfg_allowed; + u8 queue_cos2bw_cfg_allowed; + u8 queue_id0; + u8 queue_id0_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL - u8 queue_id1; - u8 queue_id1_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN + u8 queue_id1; + u8 queue_id1_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL - u8 queue_id2; - u8 queue_id2_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN + u8 queue_id2; + u8 queue_id2_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL - u8 queue_id3; - u8 queue_id3_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN + u8 queue_id3; + u8 queue_id3_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL - u8 queue_id4; - u8 queue_id4_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN + u8 queue_id4; + u8 queue_id4_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL - u8 queue_id5; - u8 queue_id5_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN + u8 queue_id5; + u8 queue_id5_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL - u8 queue_id6; - u8 queue_id6_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN + u8 queue_id6; + u8 queue_id6_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL - u8 queue_id7; - u8 queue_id7_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN + u8 queue_id7; + u8 queue_id7_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL - u8 valid; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN + u8 valid; }; -/* hwrm_queue_cfg */ -/* Input (40 bytes) */ +/* hwrm_queue_cfg_input (size:320b/40B) */ struct hwrm_queue_cfg_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 flags; - #define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL - #define QUEUE_CFG_REQ_FLAGS_PATH_SFT 0 - #define QUEUE_CFG_REQ_FLAGS_PATH_TX 0x0UL - #define QUEUE_CFG_REQ_FLAGS_PATH_RX 0x1UL - #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL - #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_BIDIR - __le32 enables; - #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL - #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL - __le32 queue_id; - __le32 dflt_len; - u8 service_profile; - #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY 0x0UL - #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN 0xffUL - u8 unused_0[7]; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL + #define QUEUE_CFG_REQ_FLAGS_PATH_SFT 0 + #define QUEUE_CFG_REQ_FLAGS_PATH_TX 0x0UL + #define QUEUE_CFG_REQ_FLAGS_PATH_RX 0x1UL + #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL + #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_BIDIR + __le32 enables; + #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL + #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL + __le32 queue_id; + __le32 dflt_len; + u8 service_profile; + #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_CFG_REQ_SERVICE_PROFILE_LAST QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN + u8 unused_0[7]; +}; + +/* hwrm_queue_cfg_output (size:128b/16B) */ struct hwrm_queue_cfg_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_queue_pfcenable_qcfg */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_pfcenable_qcfg_input (size:192b/24B) */ struct hwrm_queue_pfcenable_qcfg_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 port_id; - __le16 unused_0[3]; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; }; -/* Output (16 bytes) */ +/* hwrm_queue_pfcenable_qcfg_output (size:128b/16B) */ struct hwrm_queue_pfcenable_qcfg_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 flags; - #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL - #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL - #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL - #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL - #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL - #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL - #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL - #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL - u8 unused_0; - u8 unused_1; - u8 unused_2; - u8 valid; -}; - -/* hwrm_queue_pfcenable_cfg */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_queue_pfcenable_cfg_input (size:192b/24B) */ struct hwrm_queue_pfcenable_cfg_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 flags; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL @@ -2440,1729 +2703,1664 @@ struct hwrm_queue_pfcenable_cfg_input { #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL - __le16 port_id; - __le16 unused_0; + __le16 port_id; + u8 unused_0[2]; }; -/* Output (16 bytes) */ +/* hwrm_queue_pfcenable_cfg_output (size:128b/16B) */ struct hwrm_queue_pfcenable_cfg_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_queue_pri2cos_qcfg */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_pri2cos_qcfg_input (size:192b/24B) */ struct hwrm_queue_pri2cos_qcfg_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 flags; - #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH 0x1UL - #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX (0x0UL << 0) - #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX (0x1UL << 0) - #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX - #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN 0x2UL - u8 port_id; - u8 unused_0[3]; -}; - -/* Output (24 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH 0x1UL + #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX 0x0UL + #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX 0x1UL + #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX + #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN 0x2UL + u8 port_id; + u8 unused_0[3]; +}; + +/* hwrm_queue_pri2cos_qcfg_output (size:192b/24B) */ struct hwrm_queue_pri2cos_qcfg_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - u8 pri0_cos_queue_id; - u8 pri1_cos_queue_id; - u8 pri2_cos_queue_id; - u8 pri3_cos_queue_id; - u8 pri4_cos_queue_id; - u8 pri5_cos_queue_id; - u8 pri6_cos_queue_id; - u8 pri7_cos_queue_id; - u8 queue_cfg_info; - #define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL - u8 unused_0; - __le16 unused_1; - u8 unused_2; - u8 unused_3; - u8 unused_4; - u8 valid; -}; - -/* hwrm_queue_pri2cos_cfg */ -/* Input (40 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 pri0_cos_queue_id; + u8 pri1_cos_queue_id; + u8 pri2_cos_queue_id; + u8 pri3_cos_queue_id; + u8 pri4_cos_queue_id; + u8 pri5_cos_queue_id; + u8 pri6_cos_queue_id; + u8 pri7_cos_queue_id; + u8 queue_cfg_info; + #define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_queue_pri2cos_cfg_input (size:320b/40B) */ struct hwrm_queue_pri2cos_cfg_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 flags; - #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL - #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT 0 - #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0) - #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0) - #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR (0x2UL << 0) - #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR - #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x4UL - __le32 enables; - #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID 0x1UL - #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID 0x2UL - #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID 0x4UL - #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID 0x8UL - #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID 0x10UL - #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID 0x20UL - #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID 0x40UL - #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID 0x80UL - u8 port_id; - u8 pri0_cos_queue_id; - u8 pri1_cos_queue_id; - u8 pri2_cos_queue_id; - u8 pri3_cos_queue_id; - u8 pri4_cos_queue_id; - u8 pri5_cos_queue_id; - u8 pri6_cos_queue_id; - u8 pri7_cos_queue_id; - u8 unused_0[7]; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT 0 + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX 0x0UL + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX 0x1UL + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x4UL + __le32 enables; + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID 0x1UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID 0x2UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID 0x4UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID 0x8UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID 0x10UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID 0x20UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID 0x40UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID 0x80UL + u8 port_id; + u8 pri0_cos_queue_id; + u8 pri1_cos_queue_id; + u8 pri2_cos_queue_id; + u8 pri3_cos_queue_id; + u8 pri4_cos_queue_id; + u8 pri5_cos_queue_id; + u8 pri6_cos_queue_id; + u8 pri7_cos_queue_id; + u8 unused_0[7]; +}; + +/* hwrm_queue_pri2cos_cfg_output (size:128b/16B) */ struct hwrm_queue_pri2cos_cfg_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_queue_cos2bw_qcfg */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_cos2bw_qcfg_input (size:192b/24B) */ struct hwrm_queue_cos2bw_qcfg_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 port_id; - __le16 unused_0[3]; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; }; -/* Output (112 bytes) */ +/* hwrm_queue_cos2bw_qcfg_output (size:896b/112B) */ struct hwrm_queue_cos2bw_qcfg_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - u8 queue_id0; - u8 unused_0; - __le16 unused_1; - __le32 queue_id0_min_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id0_max_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id0_tsa_assign; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 queue_id0; + u8 unused_0; + __le16 unused_1; + __le32 queue_id0_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id0_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id0_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id0_pri_lvl; - u8 queue_id0_bw_weight; - u8 queue_id1; - __le32 queue_id1_min_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id1_max_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id1_tsa_assign; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id0_pri_lvl; + u8 queue_id0_bw_weight; + u8 queue_id1; + __le32 queue_id1_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id1_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id1_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id1_pri_lvl; - u8 queue_id1_bw_weight; - u8 queue_id2; - __le32 queue_id2_min_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id2_max_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id2_tsa_assign; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id1_pri_lvl; + u8 queue_id1_bw_weight; + u8 queue_id2; + __le32 queue_id2_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id2_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id2_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id2_pri_lvl; - u8 queue_id2_bw_weight; - u8 queue_id3; - __le32 queue_id3_min_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id3_max_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id3_tsa_assign; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id2_pri_lvl; + u8 queue_id2_bw_weight; + u8 queue_id3; + __le32 queue_id3_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id3_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id3_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id3_pri_lvl; - u8 queue_id3_bw_weight; - u8 queue_id4; - __le32 queue_id4_min_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id4_max_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id4_tsa_assign; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id3_pri_lvl; + u8 queue_id3_bw_weight; + u8 queue_id4; + __le32 queue_id4_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id4_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id4_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id4_pri_lvl; - u8 queue_id4_bw_weight; - u8 queue_id5; - __le32 queue_id5_min_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id5_max_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id5_tsa_assign; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id4_pri_lvl; + u8 queue_id4_bw_weight; + u8 queue_id5; + __le32 queue_id5_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id5_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id5_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id5_pri_lvl; - u8 queue_id5_bw_weight; - u8 queue_id6; - __le32 queue_id6_min_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id6_max_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id6_tsa_assign; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id5_pri_lvl; + u8 queue_id5_bw_weight; + u8 queue_id6; + __le32 queue_id6_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id6_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id6_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id6_pri_lvl; - u8 queue_id6_bw_weight; - u8 queue_id7; - __le32 queue_id7_min_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id7_max_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id7_tsa_assign; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id6_pri_lvl; + u8 queue_id6_bw_weight; + u8 queue_id7; + __le32 queue_id7_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id7_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id7_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id7_pri_lvl; - u8 queue_id7_bw_weight; - u8 unused_2; - u8 unused_3; - u8 unused_4; - u8 unused_5; - u8 valid; -}; - -/* hwrm_queue_cos2bw_cfg */ -/* Input (128 bytes) */ + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id7_pri_lvl; + u8 queue_id7_bw_weight; + u8 unused_2[4]; + u8 valid; +}; + +/* hwrm_queue_cos2bw_cfg_input (size:1024b/128B) */ struct hwrm_queue_cos2bw_cfg_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 flags; - __le32 enables; - #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL - #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL - #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL - #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL - #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL - #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL - #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL - #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL - __le16 port_id; - u8 queue_id0; - u8 unused_0; - __le32 queue_id0_min_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id0_max_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id0_tsa_assign; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + __le32 enables; + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL + __le16 port_id; + u8 queue_id0; + u8 unused_0; + __le32 queue_id0_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id0_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id0_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id0_pri_lvl; - u8 queue_id0_bw_weight; - u8 queue_id1; - __le32 queue_id1_min_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id1_max_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id1_tsa_assign; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id0_pri_lvl; + u8 queue_id0_bw_weight; + u8 queue_id1; + __le32 queue_id1_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id1_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id1_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id1_pri_lvl; - u8 queue_id1_bw_weight; - u8 queue_id2; - __le32 queue_id2_min_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id2_max_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id2_tsa_assign; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id1_pri_lvl; + u8 queue_id1_bw_weight; + u8 queue_id2; + __le32 queue_id2_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id2_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id2_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id2_pri_lvl; - u8 queue_id2_bw_weight; - u8 queue_id3; - __le32 queue_id3_min_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id3_max_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id3_tsa_assign; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id2_pri_lvl; + u8 queue_id2_bw_weight; + u8 queue_id3; + __le32 queue_id3_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id3_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id3_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id3_pri_lvl; - u8 queue_id3_bw_weight; - u8 queue_id4; - __le32 queue_id4_min_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id4_max_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id4_tsa_assign; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id3_pri_lvl; + u8 queue_id3_bw_weight; + u8 queue_id4; + __le32 queue_id4_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id4_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id4_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id4_pri_lvl; - u8 queue_id4_bw_weight; - u8 queue_id5; - __le32 queue_id5_min_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id5_max_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id5_tsa_assign; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id4_pri_lvl; + u8 queue_id4_bw_weight; + u8 queue_id5; + __le32 queue_id5_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id5_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id5_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id5_pri_lvl; - u8 queue_id5_bw_weight; - u8 queue_id6; - __le32 queue_id6_min_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id6_max_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id6_tsa_assign; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id5_pri_lvl; + u8 queue_id5_bw_weight; + u8 queue_id6; + __le32 queue_id6_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id6_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id6_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id6_pri_lvl; - u8 queue_id6_bw_weight; - u8 queue_id7; - __le32 queue_id7_min_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id7_max_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id7_tsa_assign; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id6_pri_lvl; + u8 queue_id6_bw_weight; + u8 queue_id7; + __le32 queue_id7_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id7_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id7_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id7_pri_lvl; - u8 queue_id7_bw_weight; - u8 unused_1[5]; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id7_pri_lvl; + u8 queue_id7_bw_weight; + u8 unused_1[5]; }; -/* Output (16 bytes) */ +/* hwrm_queue_cos2bw_cfg_output (size:128b/16B) */ struct hwrm_queue_cos2bw_cfg_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_queue_dscp_qcaps */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_dscp_qcaps_input (size:192b/24B) */ struct hwrm_queue_dscp_qcaps_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - u8 port_id; - u8 unused_0[7]; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 port_id; + u8 unused_0[7]; }; -/* Output (16 bytes) */ +/* hwrm_queue_dscp_qcaps_output (size:128b/16B) */ struct hwrm_queue_dscp_qcaps_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - u8 num_dscp_bits; - u8 unused_0; - __le16 max_entries; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_queue_dscp2pri_qcfg */ -/* Input (32 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_dscp_bits; + u8 unused_0; + __le16 max_entries; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_queue_dscp2pri_qcfg_input (size:256b/32B) */ struct hwrm_queue_dscp2pri_qcfg_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le64 dest_data_addr; - u8 port_id; - u8 unused_0; - __le16 dest_data_buffer_size; - __le32 unused_1; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 dest_data_addr; + u8 port_id; + u8 unused_0; + __le16 dest_data_buffer_size; + u8 unused_1[4]; +}; + +/* hwrm_queue_dscp2pri_qcfg_output (size:128b/16B) */ struct hwrm_queue_dscp2pri_qcfg_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le16 entry_cnt; - u8 default_pri; - u8 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_queue_dscp2pri_cfg */ -/* Input (40 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 entry_cnt; + u8 default_pri; + u8 unused_0[4]; + u8 valid; +}; + +/* hwrm_queue_dscp2pri_cfg_input (size:320b/40B) */ struct hwrm_queue_dscp2pri_cfg_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le64 src_data_addr; - __le32 flags; - #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL - __le32 enables; - #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL - u8 port_id; - u8 default_pri; - __le16 entry_cnt; - __le32 unused_0; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 src_data_addr; + __le32 flags; + #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL + __le32 enables; + #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL + u8 port_id; + u8 default_pri; + __le16 entry_cnt; + u8 unused_0[4]; +}; + +/* hwrm_queue_dscp2pri_cfg_output (size:128b/16B) */ struct hwrm_queue_dscp2pri_cfg_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_vnic_alloc */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_alloc_input (size:192b/24B) */ struct hwrm_vnic_alloc_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 flags; - #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL - __le32 unused_0; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL + u8 unused_0[4]; }; -/* Output (16 bytes) */ +/* hwrm_vnic_alloc_output (size:128b/16B) */ struct hwrm_vnic_alloc_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 vnic_id; - u8 unused_0; - u8 unused_1; - u8 unused_2; - u8 valid; -}; - -/* hwrm_vnic_free */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 vnic_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_vnic_free_input (size:192b/24B) */ struct hwrm_vnic_free_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 vnic_id; - __le32 unused_0; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 vnic_id; + u8 unused_0[4]; }; -/* Output (16 bytes) */ +/* hwrm_vnic_free_output (size:128b/16B) */ struct hwrm_vnic_free_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_vnic_cfg */ -/* Input (40 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_cfg_input (size:320b/40B) */ struct hwrm_vnic_cfg_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 flags; - #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL - #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL - #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL - #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL - #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL - #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL - #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL - __le32 enables; - #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL - #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL - #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL - #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL - #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL - __le16 vnic_id; - __le16 dflt_ring_grp; - __le16 rss_rule; - __le16 cos_rule; - __le16 lb_rule; - __le16 mru; - __le32 unused_0; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL + #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL + #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL + #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL + #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL + #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL + #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL + __le32 enables; + #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL + #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL + #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL + #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL + #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL + __le16 vnic_id; + __le16 dflt_ring_grp; + __le16 rss_rule; + __le16 cos_rule; + __le16 lb_rule; + __le16 mru; + u8 unused_0[4]; +}; + +/* hwrm_vnic_cfg_output (size:128b/16B) */ struct hwrm_vnic_cfg_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_vnic_qcaps */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_qcaps_input (size:192b/24B) */ struct hwrm_vnic_qcaps_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 enables; - __le32 unused_0; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + u8 unused_0[4]; }; -/* Output (24 bytes) */ +/* hwrm_vnic_qcaps_output (size:192b/24B) */ struct hwrm_vnic_qcaps_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le16 mru; - u8 unused_0; - u8 unused_1; - __le32 flags; - #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL - #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL - #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL - #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL - #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL - #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL - #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRROING_CAPABLE_VNIC_CAP 0x40UL - __le32 unused_2; - u8 unused_3; - u8 unused_4; - u8 unused_5; - u8 valid; -}; - -/* hwrm_vnic_tpa_cfg */ -/* Input (40 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 mru; + u8 unused_0[2]; + __le32 flags; + #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL + #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL + #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL + #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL + #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL + #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_vnic_tpa_cfg_input (size:320b/40B) */ struct hwrm_vnic_tpa_cfg_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 flags; - #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL - #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL - #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL - #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL - #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL - #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL - #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL - #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL - __le32 enables; - #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL - #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL - #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL - #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL - __le16 vnic_id; - __le16 max_agg_segs; - #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL - #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 0x1UL - #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 0x2UL - #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 0x3UL - #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL - __le16 max_aggs; - #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 0x0UL - #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 0x1UL - #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 0x2UL - #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 0x3UL - #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 0x4UL - #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL - u8 unused_0; - u8 unused_1; - __le32 max_agg_timer; - __le32 min_agg_len; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL + #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL + #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL + #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL + #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL + #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL + #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL + #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL + __le32 enables; + #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL + #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL + #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL + #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL + __le16 vnic_id; + __le16 max_agg_segs; + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 0x1UL + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 0x2UL + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 0x3UL + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_LAST VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX + __le16 max_aggs; + #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 0x0UL + #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 0x1UL + #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 0x2UL + #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 0x3UL + #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 0x4UL + #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL + #define VNIC_TPA_CFG_REQ_MAX_AGGS_LAST VNIC_TPA_CFG_REQ_MAX_AGGS_MAX + u8 unused_0[2]; + __le32 max_agg_timer; + __le32 min_agg_len; +}; + +/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */ struct hwrm_vnic_tpa_cfg_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_vnic_rss_cfg */ -/* Input (48 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_tpa_qcfg_input (size:192b/24B) */ +struct hwrm_vnic_tpa_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vnic_id; + u8 unused_0[6]; +}; + +/* hwrm_vnic_tpa_qcfg_output (size:256b/32B) */ +struct hwrm_vnic_tpa_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define VNIC_TPA_QCFG_RESP_FLAGS_TPA 0x1UL + #define VNIC_TPA_QCFG_RESP_FLAGS_ENCAP_TPA 0x2UL + #define VNIC_TPA_QCFG_RESP_FLAGS_RSC_WND_UPDATE 0x4UL + #define VNIC_TPA_QCFG_RESP_FLAGS_GRO 0x8UL + #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_ECN 0x10UL + #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL + #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_IPID_CHECK 0x40UL + #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_TTL_CHECK 0x80UL + __le16 max_agg_segs; + #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_1 0x0UL + #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_2 0x1UL + #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_4 0x2UL + #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_8 0x3UL + #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX 0x1fUL + #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX + __le16 max_aggs; + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_1 0x0UL + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_2 0x1UL + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_4 0x2UL + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_8 0x3UL + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_16 0x4UL + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX 0x7UL + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX + __le32 max_agg_timer; + __le32 min_agg_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_rss_cfg_input (size:384b/48B) */ struct hwrm_vnic_rss_cfg_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 hash_type; - #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL - #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL - #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL - #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL - #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL - #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL - __le32 unused_0; - __le64 ring_grp_tbl_addr; - __le64 hash_key_tbl_addr; - __le16 rss_ctx_idx; - __le16 unused_1[3]; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 hash_type; + #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL + u8 unused_0[4]; + __le64 ring_grp_tbl_addr; + __le64 hash_key_tbl_addr; + __le16 rss_ctx_idx; + u8 unused_1[6]; +}; + +/* hwrm_vnic_rss_cfg_output (size:128b/16B) */ struct hwrm_vnic_rss_cfg_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_vnic_plcmodes_cfg */ -/* Input (40 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */ struct hwrm_vnic_plcmodes_cfg_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 flags; - #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL - #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL - #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL - #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL - #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL - #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL - __le32 enables; - #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL - #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL - #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL - __le32 vnic_id; - __le16 jumbo_thresh; - __le16 hds_offset; - __le16 hds_threshold; - __le16 unused_0[3]; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL + __le32 enables; + #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL + #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL + #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL + __le32 vnic_id; + __le16 jumbo_thresh; + __le16 hds_offset; + __le16 hds_threshold; + u8 unused_0[6]; +}; + +/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */ struct hwrm_vnic_plcmodes_cfg_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_vnic_rss_cos_lb_ctx_alloc */ -/* Input (16 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */ struct hwrm_vnic_rss_cos_lb_ctx_alloc_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; }; -/* Output (16 bytes) */ +/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */ struct hwrm_vnic_rss_cos_lb_ctx_alloc_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le16 rss_cos_lb_ctx_id; - u8 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 unused_4; - u8 valid; -}; - -/* hwrm_vnic_rss_cos_lb_ctx_free */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 rss_cos_lb_ctx_id; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_vnic_rss_cos_lb_ctx_free_input (size:192b/24B) */ struct hwrm_vnic_rss_cos_lb_ctx_free_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 rss_cos_lb_ctx_id; - __le16 unused_0[3]; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 rss_cos_lb_ctx_id; + u8 unused_0[6]; }; -/* Output (16 bytes) */ +/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */ struct hwrm_vnic_rss_cos_lb_ctx_free_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_ring_alloc */ -/* Input (80 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_ring_alloc_input (size:640b/80B) */ struct hwrm_ring_alloc_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 enables; - #define RING_ALLOC_REQ_ENABLES_RESERVED1 0x1UL - #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL - #define RING_ALLOC_REQ_ENABLES_RESERVED3 0x4UL - #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL - #define RING_ALLOC_REQ_ENABLES_RESERVED4 0x10UL - #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL - u8 ring_type; - #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL - #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL - #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL - #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL - u8 unused_0; - __le16 unused_1; - __le64 page_tbl_addr; - __le32 fbo; - u8 page_size; - u8 page_tbl_depth; - u8 unused_2; - u8 unused_3; - __le32 length; - __le16 logical_id; - __le16 cmpl_ring_id; - __le16 queue_id; - u8 unused_4; - u8 unused_5; - __le32 reserved1; - __le16 ring_arb_cfg; - #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL - #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0 - #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP (0x1UL << 0) - #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ (0x2UL << 0) - #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ - #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK 0xf0UL - #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4 - #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL - #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8 - u8 unused_6; - u8 unused_7; - __le32 reserved3; - __le32 stat_ctx_id; - __le32 reserved4; - __le32 max_bw; - #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0 - #define RING_ALLOC_REQ_MAX_BW_SCALE 0x10000000UL - #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS (0x0UL << 28) - #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST RING_ALLOC_REQ_MAX_BW_SCALE_BYTES - #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL + #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL + #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL + u8 ring_type; + #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL + #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL + #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL + #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL + #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL + u8 unused_0[3]; + __le64 page_tbl_addr; + __le32 fbo; + u8 page_size; + u8 page_tbl_depth; + u8 unused_1[2]; + __le32 length; + __le16 logical_id; + __le16 cmpl_ring_id; + __le16 queue_id; + u8 unused_2[2]; + __le32 reserved1; + __le16 ring_arb_cfg; + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0 + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP 0x1UL + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ 0x2UL + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ + #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK 0xf0UL + #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4 + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8 + __le16 unused_3; + __le32 reserved3; + __le32 stat_ctx_id; + __le32 reserved4; + __le32 max_bw; + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0 + #define RING_ALLOC_REQ_MAX_BW_SCALE 0x10000000UL + #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS (0x0UL << 28) + #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST RING_ALLOC_REQ_MAX_BW_SCALE_BYTES + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID - u8 int_mode; - #define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL - #define RING_ALLOC_REQ_INT_MODE_RSVD 0x1UL - #define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL - #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL - u8 unused_8[3]; + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID + u8 int_mode; + #define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL + #define RING_ALLOC_REQ_INT_MODE_RSVD 0x1UL + #define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL + #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL + #define RING_ALLOC_REQ_INT_MODE_LAST RING_ALLOC_REQ_INT_MODE_POLL + u8 unused_4[3]; }; -/* Output (16 bytes) */ +/* hwrm_ring_alloc_output (size:128b/16B) */ struct hwrm_ring_alloc_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le16 ring_id; - __le16 logical_ring_id; - u8 unused_0; - u8 unused_1; - u8 unused_2; - u8 valid; -}; - -/* hwrm_ring_free */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 ring_id; + __le16 logical_ring_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_ring_free_input (size:192b/24B) */ struct hwrm_ring_free_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - u8 ring_type; - #define RING_FREE_REQ_RING_TYPE_L2_CMPL 0x0UL - #define RING_FREE_REQ_RING_TYPE_TX 0x1UL - #define RING_FREE_REQ_RING_TYPE_RX 0x2UL - #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL - u8 unused_0; - __le16 ring_id; - __le32 unused_1; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 ring_type; + #define RING_FREE_REQ_RING_TYPE_L2_CMPL 0x0UL + #define RING_FREE_REQ_RING_TYPE_TX 0x1UL + #define RING_FREE_REQ_RING_TYPE_RX 0x2UL + #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL + #define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_ROCE_CMPL + u8 unused_0; + __le16 ring_id; + u8 unused_1[4]; +}; + +/* hwrm_ring_free_output (size:128b/16B) */ struct hwrm_ring_free_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_ring_cmpl_ring_qaggint_params */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */ struct hwrm_ring_cmpl_ring_qaggint_params_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 ring_id; - __le16 unused_0[3]; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 ring_id; + u8 unused_0[6]; }; -/* Output (32 bytes) */ +/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */ struct hwrm_ring_cmpl_ring_qaggint_params_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le16 flags; - #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL - #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL - __le16 num_cmpl_dma_aggr; - __le16 num_cmpl_dma_aggr_during_int; - __le16 cmpl_aggr_dma_tmr; - __le16 cmpl_aggr_dma_tmr_during_int; - __le16 int_lat_tmr_min; - __le16 int_lat_tmr_max; - __le16 num_cmpl_aggr_int; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_ring_cmpl_ring_cfg_aggint_params */ -/* Input (40 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 flags; + #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL + #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL + __le16 num_cmpl_dma_aggr; + __le16 num_cmpl_dma_aggr_during_int; + __le16 cmpl_aggr_dma_tmr; + __le16 cmpl_aggr_dma_tmr_during_int; + __le16 int_lat_tmr_min; + __le16 int_lat_tmr_max; + __le16 num_cmpl_aggr_int; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_ring_cmpl_ring_cfg_aggint_params_input (size:320b/40B) */ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 ring_id; - __le16 flags; - #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL - #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL - __le16 num_cmpl_dma_aggr; - __le16 num_cmpl_dma_aggr_during_int; - __le16 cmpl_aggr_dma_tmr; - __le16 cmpl_aggr_dma_tmr_during_int; - __le16 int_lat_tmr_min; - __le16 int_lat_tmr_max; - __le16 num_cmpl_aggr_int; - __le16 unused_0[3]; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 ring_id; + __le16 flags; + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL + __le16 num_cmpl_dma_aggr; + __le16 num_cmpl_dma_aggr_during_int; + __le16 cmpl_aggr_dma_tmr; + __le16 cmpl_aggr_dma_tmr_during_int; + __le16 int_lat_tmr_min; + __le16 int_lat_tmr_max; + __le16 num_cmpl_aggr_int; + u8 unused_0[6]; +}; + +/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */ struct hwrm_ring_cmpl_ring_cfg_aggint_params_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_ring_reset */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_ring_reset_input (size:192b/24B) */ struct hwrm_ring_reset_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - u8 ring_type; - #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL - #define RING_RESET_REQ_RING_TYPE_TX 0x1UL - #define RING_RESET_REQ_RING_TYPE_RX 0x2UL - #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL - u8 unused_0; - __le16 ring_id; - __le32 unused_1; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 ring_type; + #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL + #define RING_RESET_REQ_RING_TYPE_TX 0x1UL + #define RING_RESET_REQ_RING_TYPE_RX 0x2UL + #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL + #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_ROCE_CMPL + u8 unused_0; + __le16 ring_id; + u8 unused_1[4]; +}; + +/* hwrm_ring_reset_output (size:128b/16B) */ struct hwrm_ring_reset_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_ring_grp_alloc */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_ring_grp_alloc_input (size:192b/24B) */ struct hwrm_ring_grp_alloc_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 cr; - __le16 rr; - __le16 ar; - __le16 sc; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 cr; + __le16 rr; + __le16 ar; + __le16 sc; +}; + +/* hwrm_ring_grp_alloc_output (size:128b/16B) */ struct hwrm_ring_grp_alloc_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 ring_group_id; - u8 unused_0; - u8 unused_1; - u8 unused_2; - u8 valid; -}; - -/* hwrm_ring_grp_free */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 ring_group_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_ring_grp_free_input (size:192b/24B) */ struct hwrm_ring_grp_free_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 ring_group_id; - __le32 unused_0; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 ring_group_id; + u8 unused_0[4]; }; -/* Output (16 bytes) */ +/* hwrm_ring_grp_free_output (size:128b/16B) */ struct hwrm_ring_grp_free_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_cfa_l2_filter_alloc */ -/* Input (96 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */ struct hwrm_cfa_l2_filter_alloc_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 flags; - #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL - #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX (0x0UL << 0) - #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX (0x1UL << 0) - #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX - #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL - #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL - #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL - __le32 enables; - #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL - #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL - #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL - #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL - #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL - #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL - #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL - #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL - #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL - #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL - #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL - #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL - #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL - #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL - #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL - #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL - #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL - u8 l2_addr[6]; - u8 unused_0; - u8 unused_1; - u8 l2_addr_mask[6]; - __le16 l2_ovlan; - __le16 l2_ovlan_mask; - __le16 l2_ivlan; - __le16 l2_ivlan_mask; - u8 unused_2; - u8 unused_3; - u8 t_l2_addr[6]; - u8 unused_4; - u8 unused_5; - u8 t_l2_addr_mask[6]; - __le16 t_l2_ovlan; - __le16 t_l2_ovlan_mask; - __le16 t_l2_ivlan; - __le16 t_l2_ivlan_mask; - u8 src_type; - #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL - #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF 0x1UL - #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF 0x2UL - #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC 0x3UL - #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG 0x4UL - #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE 0x5UL - #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO 0x6UL - #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG 0x7UL - u8 unused_6; - __le32 src_id; - u8 tunnel_type; - #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL - #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL - #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL - #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL - #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL - #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL - #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL - #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL - #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL - #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL - #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL - u8 unused_7; - __le16 dst_id; - __le16 mirror_vnic_id; - u8 pri_hint; - #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL - #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL - #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL - #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX 0x3UL - #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN 0x4UL - u8 unused_8; - __le32 unused_9; - __le64 l2_filter_id_hint; -}; - -/* Output (24 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL + __le32 enables; + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL + u8 l2_addr[6]; + u8 unused_0[2]; + u8 l2_addr_mask[6]; + __le16 l2_ovlan; + __le16 l2_ovlan_mask; + __le16 l2_ivlan; + __le16 l2_ivlan_mask; + u8 unused_1[2]; + u8 t_l2_addr[6]; + u8 unused_2[2]; + u8 t_l2_addr_mask[6]; + __le16 t_l2_ovlan; + __le16 t_l2_ovlan_mask; + __le16 t_l2_ivlan; + __le16 t_l2_ivlan_mask; + u8 src_type; + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC 0x3UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE 0x5UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO 0x6UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG 0x7UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG + u8 unused_3; + __le32 src_id; + u8 tunnel_type; + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 unused_4; + __le16 dst_id; + __le16 mirror_vnic_id; + u8 pri_hint; + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX 0x3UL + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN + u8 unused_5; + __le32 unused_6; + __le64 l2_filter_id_hint; +}; + +/* hwrm_cfa_l2_filter_alloc_output (size:192b/24B) */ struct hwrm_cfa_l2_filter_alloc_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le64 l2_filter_id; - __le32 flow_id; - u8 unused_0; - u8 unused_1; - u8 unused_2; - u8 valid; -}; - -/* hwrm_cfa_l2_filter_free */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 l2_filter_id; + __le32 flow_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_l2_filter_free_input (size:192b/24B) */ struct hwrm_cfa_l2_filter_free_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le64 l2_filter_id; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 l2_filter_id; }; -/* Output (16 bytes) */ +/* hwrm_cfa_l2_filter_free_output (size:128b/16B) */ struct hwrm_cfa_l2_filter_free_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_cfa_l2_filter_cfg */ -/* Input (40 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_l2_filter_cfg_input (size:320b/40B) */ struct hwrm_cfa_l2_filter_cfg_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 flags; - #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL - #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0) - #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0) - #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX - #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL - __le32 enables; - #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL - #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL - __le64 l2_filter_id; - __le32 dst_id; - __le32 new_mirror_vnic_id; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX + #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL + __le32 enables; + #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL + #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL + __le64 l2_filter_id; + __le32 dst_id; + __le32 new_mirror_vnic_id; +}; + +/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */ struct hwrm_cfa_l2_filter_cfg_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_cfa_l2_set_rx_mask */ -/* Input (56 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_l2_set_rx_mask_input (size:448b/56B) */ struct hwrm_cfa_l2_set_rx_mask_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 vnic_id; - __le32 mask; - #define CFA_L2_SET_RX_MASK_REQ_MASK_RESERVED 0x1UL - #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL - #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL - #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL - #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL - #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL - #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL - #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL - #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL - __le64 mc_tbl_addr; - __le32 num_mc_entries; - __le32 unused_0; - __le64 vlan_tag_tbl_addr; - __le32 num_vlan_tags; - __le32 unused_1; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 vnic_id; + __le32 mask; + #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL + __le64 mc_tbl_addr; + __le32 num_mc_entries; + u8 unused_0[4]; + __le64 vlan_tag_tbl_addr; + __le32 num_vlan_tags; + u8 unused_1[4]; +}; + +/* hwrm_cfa_l2_set_rx_mask_output (size:128b/16B) */ struct hwrm_cfa_l2_set_rx_mask_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* Command specific Error Codes (8 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_l2_set_rx_mask_cmd_err (size:64b/8B) */ struct hwrm_cfa_l2_set_rx_mask_cmd_err { - u8 code; - #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL + u8 code; + #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR 0x1UL - u8 unused_0[7]; + #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR + u8 unused_0[7]; }; -/* hwrm_cfa_tunnel_filter_alloc */ -/* Input (88 bytes) */ +/* hwrm_cfa_tunnel_filter_alloc_input (size:704b/88B) */ struct hwrm_cfa_tunnel_filter_alloc_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 flags; - #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL - __le32 enables; - #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL - #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL - #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL - #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL - #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL - #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL - #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL - #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL - #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL - #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL - #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL - __le64 l2_filter_id; - u8 l2_addr[6]; - __le16 l2_ivlan; - __le32 l3_addr[4]; - __le32 t_l3_addr[4]; - u8 l3_addr_type; - u8 t_l3_addr_type; - u8 tunnel_type; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL + __le32 enables; + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL + __le64 l2_filter_id; + u8 l2_addr[6]; + __le16 l2_ivlan; + __le32 l3_addr[4]; + __le32 t_l3_addr[4]; + u8 l3_addr_type; + u8 t_l3_addr_type; + u8 tunnel_type; #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL @@ -4174,158 +4372,204 @@ struct hwrm_cfa_tunnel_filter_alloc_input { #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL - u8 unused_0; - __le32 vni; - __le32 dst_vnic_id; - __le32 mirror_vnic_id; + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 tunnel_flags; + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR 0x1UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 0x2UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_EXTHDR_SEQNUM_S0 0x4UL + __le32 vni; + __le32 dst_vnic_id; + __le32 mirror_vnic_id; }; -/* Output (24 bytes) */ +/* hwrm_cfa_tunnel_filter_alloc_output (size:192b/24B) */ struct hwrm_cfa_tunnel_filter_alloc_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le64 tunnel_filter_id; - __le32 flow_id; - u8 unused_0; - u8 unused_1; - u8 unused_2; - u8 valid; -}; - -/* hwrm_cfa_tunnel_filter_free */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 tunnel_filter_id; + __le32 flow_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_tunnel_filter_free_input (size:192b/24B) */ struct hwrm_cfa_tunnel_filter_free_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le64 tunnel_filter_id; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 tunnel_filter_id; }; -/* Output (16 bytes) */ +/* hwrm_cfa_tunnel_filter_free_output (size:128b/16B) */ struct hwrm_cfa_tunnel_filter_free_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_cfa_encap_record_alloc */ -/* Input (32 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vxlan_ipv4_hdr (size:128b/16B) */ +struct hwrm_vxlan_ipv4_hdr { + u8 ver_hlen; + #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK 0xfUL + #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0 + #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK 0xf0UL + #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4 + u8 tos; + __be16 ip_id; + __be16 flags_frag_offset; + u8 ttl; + u8 protocol; + __be32 src_ip_addr; + __be32 dest_ip_addr; +}; + +/* hwrm_vxlan_ipv6_hdr (size:320b/40B) */ +struct hwrm_vxlan_ipv6_hdr { + __be32 ver_tc_flow_label; + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT 0x1cUL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK 0xf0000000UL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT 0x14UL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK 0xff00000UL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT 0x0UL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK 0xfffffUL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_LAST VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK + __be16 payload_len; + u8 next_hdr; + u8 ttl; + __be32 src_ip_addr[4]; + __be32 dest_ip_addr[4]; +}; + +/* hwrm_cfa_encap_data_vxlan (size:576b/72B) */ +struct hwrm_cfa_encap_data_vxlan { + u8 src_mac_addr[6]; + __le16 unused_0; + u8 dst_mac_addr[6]; + u8 num_vlan_tags; + u8 unused_1; + __be16 ovlan_tpid; + __be16 ovlan_tci; + __be16 ivlan_tpid; + __be16 ivlan_tci; + __le32 l3[10]; + #define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK 0xfUL + #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 0x4UL + #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 0x6UL + #define CFA_ENCAP_DATA_VXLAN_L3_LAST CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 + __be16 src_port; + __be16 dst_port; + __be32 vni; +}; + +/* hwrm_cfa_encap_record_alloc_input (size:832b/104B) */ struct hwrm_cfa_encap_record_alloc_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 flags; - #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL - u8 encap_type; - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL - u8 unused_0; - __le16 unused_1; - __le32 encap_data[20]; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL + u8 encap_type; + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE + u8 unused_0[3]; + __le32 encap_data[20]; +}; + +/* hwrm_cfa_encap_record_alloc_output (size:128b/16B) */ struct hwrm_cfa_encap_record_alloc_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 encap_record_id; - u8 unused_0; - u8 unused_1; - u8 unused_2; - u8 valid; -}; - -/* hwrm_cfa_encap_record_free */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 encap_record_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_encap_record_free_input (size:192b/24B) */ struct hwrm_cfa_encap_record_free_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 encap_record_id; - __le32 unused_0; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 encap_record_id; + u8 unused_0[4]; }; -/* Output (16 bytes) */ +/* hwrm_cfa_encap_record_free_output (size:128b/16B) */ struct hwrm_cfa_encap_record_free_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_cfa_ntuple_filter_alloc */ -/* Input (128 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */ struct hwrm_cfa_ntuple_filter_alloc_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 flags; - #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL - __le32 enables; - #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x10UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x20UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x80UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x200UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x400UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK 0x800UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x1000UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL - __le64 l2_filter_id; - u8 src_macaddr[6]; - __be16 ethertype; - u8 ip_addr_type; - #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL - u8 ip_protocol; - #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL - __le16 dst_id; - __le16 mirror_vnic_id; - u8 tunnel_type; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL + __le32 enables; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x10UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x20UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x80UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x200UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x400UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK 0x800UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x1000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL + __le64 l2_filter_id; + u8 src_macaddr[6]; + __be16 ethertype; + u8 ip_addr_type; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 + u8 ip_protocol; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP + __le16 dst_id; + __le16 mirror_vnic_id; + u8 tunnel_type; #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL @@ -4337,2221 +4581,1723 @@ struct hwrm_cfa_ntuple_filter_alloc_input { #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL - u8 pri_hint; - #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW 0x2UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST 0x3UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST 0x4UL - __be32 src_ipaddr[4]; - __be32 src_ipaddr_mask[4]; - __be32 dst_ipaddr[4]; - __be32 dst_ipaddr_mask[4]; - __be16 src_port; - __be16 src_port_mask; - __be16 dst_port; - __be16 dst_port_mask; - __le64 ntuple_filter_id_hint; -}; - -/* Output (24 bytes) */ + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 pri_hint; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW 0x2UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST 0x3UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST 0x4UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST + __be32 src_ipaddr[4]; + __be32 src_ipaddr_mask[4]; + __be32 dst_ipaddr[4]; + __be32 dst_ipaddr_mask[4]; + __be16 src_port; + __be16 src_port_mask; + __be16 dst_port; + __be16 dst_port_mask; + __le64 ntuple_filter_id_hint; +}; + +/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */ struct hwrm_cfa_ntuple_filter_alloc_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le64 ntuple_filter_id; - __le32 flow_id; - u8 unused_0; - u8 unused_1; - u8 unused_2; - u8 valid; -}; - -/* Command specific Error Codes (8 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 ntuple_filter_id; + __le32 flow_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_ntuple_filter_alloc_cmd_err (size:64b/8B) */ struct hwrm_cfa_ntuple_filter_alloc_cmd_err { - u8 code; - #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL + u8 code; + #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR 0x1UL - u8 unused_0[7]; + #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR + u8 unused_0[7]; }; -/* hwrm_cfa_ntuple_filter_free */ -/* Input (24 bytes) */ +/* hwrm_cfa_ntuple_filter_free_input (size:192b/24B) */ struct hwrm_cfa_ntuple_filter_free_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le64 ntuple_filter_id; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 ntuple_filter_id; }; -/* Output (16 bytes) */ +/* hwrm_cfa_ntuple_filter_free_output (size:128b/16B) */ struct hwrm_cfa_ntuple_filter_free_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_cfa_ntuple_filter_cfg */ -/* Input (48 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_ntuple_filter_cfg_input (size:384b/48B) */ struct hwrm_cfa_ntuple_filter_cfg_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 enables; - #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL - #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL - #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL - __le32 unused_0; - __le64 ntuple_filter_id; - __le32 new_dst_id; - __le32 new_mirror_vnic_id; - __le16 new_meter_instance_id; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL + #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL + #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL + u8 unused_0[4]; + __le64 ntuple_filter_id; + __le32 new_dst_id; + __le32 new_mirror_vnic_id; + __le16 new_meter_instance_id; #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL - __le16 unused_1[3]; + #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_LAST CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID + u8 unused_1[6]; }; -/* Output (16 bytes) */ +/* hwrm_cfa_ntuple_filter_cfg_output (size:128b/16B) */ struct hwrm_cfa_ntuple_filter_cfg_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_cfa_decap_filter_alloc */ -/* Input (104 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_decap_filter_alloc_input (size:832b/104B) */ struct hwrm_cfa_decap_filter_alloc_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 flags; - #define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL 0x1UL - __le32 enables; - #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x1UL - #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID 0x2UL - #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x4UL - #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x8UL - #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID 0x10UL - #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID 0x20UL - #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID 0x40UL - #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID 0x80UL - #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x100UL - #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x200UL - #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x400UL - #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x800UL - #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x1000UL - #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x2000UL - #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x4000UL - #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL - #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL - __be32 tunnel_id; - u8 tunnel_type; - #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL - #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL - #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL - #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL - #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL - #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL - #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL - #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL - #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL - #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL - #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL - u8 unused_0; - __le16 unused_1; - u8 src_macaddr[6]; - u8 unused_2; - u8 unused_3; - u8 dst_macaddr[6]; - __be16 ovlan_vid; - __be16 ivlan_vid; - __be16 t_ovlan_vid; - __be16 t_ivlan_vid; - __be16 ethertype; - u8 ip_addr_type; - #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL - #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL - #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL - u8 ip_protocol; - #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL - #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL - #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL - u8 unused_4; - u8 unused_5; - u8 unused_6[3]; - u8 unused_7; - __be32 src_ipaddr[4]; - __be32 dst_ipaddr[4]; - __be16 src_port; - __be16 dst_port; - __le16 dst_id; - __le16 l2_ctxt_ref_id; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL 0x1UL + __le32 enables; + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x1UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID 0x2UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x4UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x8UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID 0x10UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID 0x20UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID 0x40UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID 0x80UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x100UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x200UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x400UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x800UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x1000UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x2000UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x4000UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL + __be32 tunnel_id; + u8 tunnel_type; + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 unused_0; + __le16 unused_1; + u8 src_macaddr[6]; + u8 unused_2[2]; + u8 dst_macaddr[6]; + __be16 ovlan_vid; + __be16 ivlan_vid; + __be16 t_ovlan_vid; + __be16 t_ivlan_vid; + __be16 ethertype; + u8 ip_addr_type; + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 + u8 ip_protocol; + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP + __le16 unused_3; + __le32 unused_4; + __be32 src_ipaddr[4]; + __be32 dst_ipaddr[4]; + __be16 src_port; + __be16 dst_port; + __le16 dst_id; + __le16 l2_ctxt_ref_id; +}; + +/* hwrm_cfa_decap_filter_alloc_output (size:128b/16B) */ struct hwrm_cfa_decap_filter_alloc_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 decap_filter_id; - u8 unused_0; - u8 unused_1; - u8 unused_2; - u8 valid; -}; - -/* hwrm_cfa_decap_filter_free */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 decap_filter_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_decap_filter_free_input (size:192b/24B) */ struct hwrm_cfa_decap_filter_free_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 decap_filter_id; - __le32 unused_0; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 decap_filter_id; + u8 unused_0[4]; }; -/* Output (16 bytes) */ +/* hwrm_cfa_decap_filter_free_output (size:128b/16B) */ struct hwrm_cfa_decap_filter_free_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_cfa_flow_alloc */ -/* Input (128 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_flow_alloc_input (size:1024b/128B) */ struct hwrm_cfa_flow_alloc_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 flags; - #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL - #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL - #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1 - #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1) - #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1) - #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1) - #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO - #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL - #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3 - #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3) - #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3) - #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3) - #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 - __le16 src_fid; - __le32 tunnel_handle; - __le16 action_flags; - #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL - #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL - #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL - #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL - #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL - #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL - #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL - #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL - #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL - #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL - __le16 dst_fid; - __be16 l2_rewrite_vlan_tpid; - __be16 l2_rewrite_vlan_tci; - __le16 act_meter_id; - __le16 ref_flow_handle; - __be16 ethertype; - __be16 outer_vlan_tci; - __be16 dmac[3]; - __be16 inner_vlan_tci; - __be16 smac[3]; - u8 ip_dst_mask_len; - u8 ip_src_mask_len; - __be32 ip_dst[4]; - __be32 ip_src[4]; - __be16 l4_src_port; - __be16 l4_src_port_mask; - __be16 l4_dst_port; - __be16 l4_dst_port_mask; - __be32 nat_ip_address[4]; - __be16 l2_rewrite_dmac[3]; - __be16 nat_port; - __be16 l2_rewrite_smac[3]; - u8 ip_proto; - u8 unused_0; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flags; + #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1 + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1) + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1) + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1) + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3 + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3) + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3) + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3) + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 + __le16 src_fid; + __le32 tunnel_handle; + __le16 action_flags; + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL + __le16 dst_fid; + __be16 l2_rewrite_vlan_tpid; + __be16 l2_rewrite_vlan_tci; + __le16 act_meter_id; + __le16 ref_flow_handle; + __be16 ethertype; + __be16 outer_vlan_tci; + __be16 dmac[3]; + __be16 inner_vlan_tci; + __be16 smac[3]; + u8 ip_dst_mask_len; + u8 ip_src_mask_len; + __be32 ip_dst[4]; + __be32 ip_src[4]; + __be16 l4_src_port; + __be16 l4_src_port_mask; + __be16 l4_dst_port; + __be16 l4_dst_port_mask; + __be32 nat_ip_address[4]; + __be16 l2_rewrite_dmac[3]; + __be16 nat_port; + __be16 l2_rewrite_smac[3]; + u8 ip_proto; + u8 unused_0; +}; + +/* hwrm_cfa_flow_alloc_output (size:128b/16B) */ struct hwrm_cfa_flow_alloc_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le16 flow_handle; - u8 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 unused_4; - u8 valid; -}; - -/* hwrm_cfa_flow_free */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 flow_handle; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_cfa_flow_free_input (size:192b/24B) */ struct hwrm_cfa_flow_free_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 flow_handle; - __le16 unused_0[3]; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flow_handle; + u8 unused_0[6]; }; -/* Output (32 bytes) */ +/* hwrm_cfa_flow_free_output (size:256b/32B) */ struct hwrm_cfa_flow_free_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le64 packet; - __le64 byte; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_cfa_flow_stats */ -/* Input (40 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 packet; + __le64 byte; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_flow_stats_input (size:320b/40B) */ struct hwrm_cfa_flow_stats_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 num_flows; - __le16 flow_handle_0; - __le16 flow_handle_1; - __le16 flow_handle_2; - __le16 flow_handle_3; - __le16 flow_handle_4; - __le16 flow_handle_5; - __le16 flow_handle_6; - __le16 flow_handle_7; - __le16 flow_handle_8; - __le16 flow_handle_9; - __le16 unused_0; -}; - -/* Output (176 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 num_flows; + __le16 flow_handle_0; + __le16 flow_handle_1; + __le16 flow_handle_2; + __le16 flow_handle_3; + __le16 flow_handle_4; + __le16 flow_handle_5; + __le16 flow_handle_6; + __le16 flow_handle_7; + __le16 flow_handle_8; + __le16 flow_handle_9; + u8 unused_0[2]; +}; + +/* hwrm_cfa_flow_stats_output (size:1408b/176B) */ struct hwrm_cfa_flow_stats_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le64 packet_0; - __le64 packet_1; - __le64 packet_2; - __le64 packet_3; - __le64 packet_4; - __le64 packet_5; - __le64 packet_6; - __le64 packet_7; - __le64 packet_8; - __le64 packet_9; - __le64 byte_0; - __le64 byte_1; - __le64 byte_2; - __le64 byte_3; - __le64 byte_4; - __le64 byte_5; - __le64 byte_6; - __le64 byte_7; - __le64 byte_8; - __le64 byte_9; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_cfa_vfr_alloc */ -/* Input (32 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 packet_0; + __le64 packet_1; + __le64 packet_2; + __le64 packet_3; + __le64 packet_4; + __le64 packet_5; + __le64 packet_6; + __le64 packet_7; + __le64 packet_8; + __le64 packet_9; + __le64 byte_0; + __le64 byte_1; + __le64 byte_2; + __le64 byte_3; + __le64 byte_4; + __le64 byte_5; + __le64 byte_6; + __le64 byte_7; + __le64 byte_8; + __le64 byte_9; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */ struct hwrm_cfa_vfr_alloc_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 vf_id; - __le16 reserved; - __le32 unused_0; - char vfr_name[32]; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_id; + __le16 reserved; + u8 unused_0[4]; + char vfr_name[32]; +}; + +/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */ struct hwrm_cfa_vfr_alloc_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le16 rx_cfa_code; - __le16 tx_cfa_action; - u8 unused_0; - u8 unused_1; - u8 unused_2; - u8 valid; -}; - -/* hwrm_cfa_vfr_free */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 rx_cfa_code; + __le16 tx_cfa_action; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_vfr_free_input (size:384b/48B) */ struct hwrm_cfa_vfr_free_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - char vfr_name[32]; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + char vfr_name[32]; }; -/* Output (16 bytes) */ +/* hwrm_cfa_vfr_free_output (size:128b/16B) */ struct hwrm_cfa_vfr_free_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_tunnel_dst_port_query */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */ struct hwrm_tunnel_dst_port_query_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - u8 tunnel_type; - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL - u8 unused_0[7]; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 tunnel_type; + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 + u8 unused_0[7]; +}; + +/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */ struct hwrm_tunnel_dst_port_query_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le16 tunnel_dst_port_id; - __be16 tunnel_dst_port_val; - u8 unused_0; - u8 unused_1; - u8 unused_2; - u8 valid; -}; - -/* hwrm_tunnel_dst_port_alloc */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 tunnel_dst_port_id; + __be16 tunnel_dst_port_val; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_tunnel_dst_port_alloc_input (size:192b/24B) */ struct hwrm_tunnel_dst_port_alloc_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - u8 tunnel_type; - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL - u8 unused_0; - __be16 tunnel_dst_port_val; - __be32 unused_1; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 tunnel_type; + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 + u8 unused_0; + __be16 tunnel_dst_port_val; + u8 unused_1[4]; +}; + +/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */ struct hwrm_tunnel_dst_port_alloc_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le16 tunnel_dst_port_id; - u8 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 unused_4; - u8 valid; -}; - -/* hwrm_tunnel_dst_port_free */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 tunnel_dst_port_id; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_tunnel_dst_port_free_input (size:192b/24B) */ struct hwrm_tunnel_dst_port_free_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - u8 tunnel_type; - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL - u8 unused_0; - __le16 tunnel_dst_port_id; - __le32 unused_1; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 tunnel_type; + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 + u8 unused_0; + __le16 tunnel_dst_port_id; + u8 unused_1[4]; +}; + +/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */ struct hwrm_tunnel_dst_port_free_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_stat_ctx_alloc */ -/* Input (32 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_1[7]; + u8 valid; +}; + +/* ctx_hw_stats (size:1280b/160B) */ +struct ctx_hw_stats { + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_discard_pkts; + __le64 rx_drop_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_discard_pkts; + __le64 tx_drop_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 tpa_pkts; + __le64 tpa_bytes; + __le64 tpa_events; + __le64 tpa_aborts; +}; + +/* hwrm_stat_ctx_alloc_input (size:256b/32B) */ struct hwrm_stat_ctx_alloc_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le64 stats_dma_addr; - __le32 update_period_ms; - u8 stat_ctx_flags; - #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL - u8 unused_0[3]; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 stats_dma_addr; + __le32 update_period_ms; + u8 stat_ctx_flags; + #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL + u8 unused_0[3]; +}; + +/* hwrm_stat_ctx_alloc_output (size:128b/16B) */ struct hwrm_stat_ctx_alloc_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 stat_ctx_id; - u8 unused_0; - u8 unused_1; - u8 unused_2; - u8 valid; -}; - -/* hwrm_stat_ctx_free */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 stat_ctx_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_stat_ctx_free_input (size:192b/24B) */ struct hwrm_stat_ctx_free_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 stat_ctx_id; - __le32 unused_0; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 stat_ctx_id; + u8 unused_0[4]; }; -/* Output (16 bytes) */ +/* hwrm_stat_ctx_free_output (size:128b/16B) */ struct hwrm_stat_ctx_free_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 stat_ctx_id; - u8 unused_0; - u8 unused_1; - u8 unused_2; - u8 valid; -}; - -/* hwrm_stat_ctx_query */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 stat_ctx_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_stat_ctx_query_input (size:192b/24B) */ struct hwrm_stat_ctx_query_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 stat_ctx_id; - __le32 unused_0; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 stat_ctx_id; + u8 unused_0[4]; }; -/* Output (176 bytes) */ +/* hwrm_stat_ctx_query_output (size:1408b/176B) */ struct hwrm_stat_ctx_query_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le64 tx_ucast_pkts; - __le64 tx_mcast_pkts; - __le64 tx_bcast_pkts; - __le64 tx_err_pkts; - __le64 tx_drop_pkts; - __le64 tx_ucast_bytes; - __le64 tx_mcast_bytes; - __le64 tx_bcast_bytes; - __le64 rx_ucast_pkts; - __le64 rx_mcast_pkts; - __le64 rx_bcast_pkts; - __le64 rx_err_pkts; - __le64 rx_drop_pkts; - __le64 rx_ucast_bytes; - __le64 rx_mcast_bytes; - __le64 rx_bcast_bytes; - __le64 rx_agg_pkts; - __le64 rx_agg_bytes; - __le64 rx_agg_events; - __le64 rx_agg_aborts; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_stat_ctx_clr_stats */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_err_pkts; + __le64 tx_drop_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_err_pkts; + __le64 rx_drop_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 rx_agg_pkts; + __le64 rx_agg_bytes; + __le64 rx_agg_events; + __le64 rx_agg_aborts; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */ struct hwrm_stat_ctx_clr_stats_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 stat_ctx_id; - __le32 unused_0; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 stat_ctx_id; + u8 unused_0[4]; }; -/* Output (16 bytes) */ +/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */ struct hwrm_stat_ctx_clr_stats_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_fw_reset */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* tx_port_stats (size:3264b/408B) */ +struct tx_port_stats { + __le64 tx_64b_frames; + __le64 tx_65b_127b_frames; + __le64 tx_128b_255b_frames; + __le64 tx_256b_511b_frames; + __le64 tx_512b_1023b_frames; + __le64 tx_1024b_1518_frames; + __le64 tx_good_vlan_frames; + __le64 tx_1519b_2047_frames; + __le64 tx_2048b_4095b_frames; + __le64 tx_4096b_9216b_frames; + __le64 tx_9217b_16383b_frames; + __le64 tx_good_frames; + __le64 tx_total_frames; + __le64 tx_ucast_frames; + __le64 tx_mcast_frames; + __le64 tx_bcast_frames; + __le64 tx_pause_frames; + __le64 tx_pfc_frames; + __le64 tx_jabber_frames; + __le64 tx_fcs_err_frames; + __le64 tx_control_frames; + __le64 tx_oversz_frames; + __le64 tx_single_dfrl_frames; + __le64 tx_multi_dfrl_frames; + __le64 tx_single_coll_frames; + __le64 tx_multi_coll_frames; + __le64 tx_late_coll_frames; + __le64 tx_excessive_coll_frames; + __le64 tx_frag_frames; + __le64 tx_err; + __le64 tx_tagged_frames; + __le64 tx_dbl_tagged_frames; + __le64 tx_runt_frames; + __le64 tx_fifo_underruns; + __le64 tx_pfc_ena_frames_pri0; + __le64 tx_pfc_ena_frames_pri1; + __le64 tx_pfc_ena_frames_pri2; + __le64 tx_pfc_ena_frames_pri3; + __le64 tx_pfc_ena_frames_pri4; + __le64 tx_pfc_ena_frames_pri5; + __le64 tx_pfc_ena_frames_pri6; + __le64 tx_pfc_ena_frames_pri7; + __le64 tx_eee_lpi_events; + __le64 tx_eee_lpi_duration; + __le64 tx_llfc_logical_msgs; + __le64 tx_hcfc_msgs; + __le64 tx_total_collisions; + __le64 tx_bytes; + __le64 tx_xthol_frames; + __le64 tx_stat_discard; + __le64 tx_stat_error; +}; + +/* rx_port_stats (size:4224b/528B) */ +struct rx_port_stats { + __le64 rx_64b_frames; + __le64 rx_65b_127b_frames; + __le64 rx_128b_255b_frames; + __le64 rx_256b_511b_frames; + __le64 rx_512b_1023b_frames; + __le64 rx_1024b_1518_frames; + __le64 rx_good_vlan_frames; + __le64 rx_1519b_2047b_frames; + __le64 rx_2048b_4095b_frames; + __le64 rx_4096b_9216b_frames; + __le64 rx_9217b_16383b_frames; + __le64 rx_total_frames; + __le64 rx_ucast_frames; + __le64 rx_mcast_frames; + __le64 rx_bcast_frames; + __le64 rx_fcs_err_frames; + __le64 rx_ctrl_frames; + __le64 rx_pause_frames; + __le64 rx_pfc_frames; + __le64 rx_unsupported_opcode_frames; + __le64 rx_unsupported_da_pausepfc_frames; + __le64 rx_wrong_sa_frames; + __le64 rx_align_err_frames; + __le64 rx_oor_len_frames; + __le64 rx_code_err_frames; + __le64 rx_false_carrier_frames; + __le64 rx_ovrsz_frames; + __le64 rx_jbr_frames; + __le64 rx_mtu_err_frames; + __le64 rx_match_crc_frames; + __le64 rx_promiscuous_frames; + __le64 rx_tagged_frames; + __le64 rx_double_tagged_frames; + __le64 rx_trunc_frames; + __le64 rx_good_frames; + __le64 rx_pfc_xon2xoff_frames_pri0; + __le64 rx_pfc_xon2xoff_frames_pri1; + __le64 rx_pfc_xon2xoff_frames_pri2; + __le64 rx_pfc_xon2xoff_frames_pri3; + __le64 rx_pfc_xon2xoff_frames_pri4; + __le64 rx_pfc_xon2xoff_frames_pri5; + __le64 rx_pfc_xon2xoff_frames_pri6; + __le64 rx_pfc_xon2xoff_frames_pri7; + __le64 rx_pfc_ena_frames_pri0; + __le64 rx_pfc_ena_frames_pri1; + __le64 rx_pfc_ena_frames_pri2; + __le64 rx_pfc_ena_frames_pri3; + __le64 rx_pfc_ena_frames_pri4; + __le64 rx_pfc_ena_frames_pri5; + __le64 rx_pfc_ena_frames_pri6; + __le64 rx_pfc_ena_frames_pri7; + __le64 rx_sch_crc_err_frames; + __le64 rx_undrsz_frames; + __le64 rx_frag_frames; + __le64 rx_eee_lpi_events; + __le64 rx_eee_lpi_duration; + __le64 rx_llfc_physical_msgs; + __le64 rx_llfc_logical_msgs; + __le64 rx_llfc_msgs_with_crc_err; + __le64 rx_hcfc_msgs; + __le64 rx_hcfc_msgs_with_crc_err; + __le64 rx_bytes; + __le64 rx_runt_bytes; + __le64 rx_runt_frames; + __le64 rx_stat_discard; + __le64 rx_stat_err; +}; + +/* hwrm_fw_reset_input (size:192b/24B) */ struct hwrm_fw_reset_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - u8 embedded_proc_type; - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL - u8 selfrst_status; - #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL - #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL - #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL - u8 host_idx; - u8 unused_0[5]; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 embedded_proc_type; + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP + u8 selfrst_status; + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL + #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST + u8 host_idx; + u8 unused_0[5]; +}; + +/* hwrm_fw_reset_output (size:128b/16B) */ struct hwrm_fw_reset_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - u8 selfrst_status; - #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL - #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL - #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL - u8 unused_0; - __le16 unused_1; - u8 unused_2; - u8 unused_3; - u8 unused_4; - u8 valid; -}; - -/* hwrm_fw_qstatus */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 selfrst_status; + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL + #define FW_RESET_RESP_SELFRST_STATUS_LAST FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_fw_qstatus_input (size:192b/24B) */ struct hwrm_fw_qstatus_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - u8 embedded_proc_type; - #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL - #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL - #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL - #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL - #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL - #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL - #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL - u8 unused_0[7]; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 embedded_proc_type; + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_LAST FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP + u8 unused_0[7]; +}; + +/* hwrm_fw_qstatus_output (size:128b/16B) */ struct hwrm_fw_qstatus_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - u8 selfrst_status; - #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL - #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL - #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL - u8 unused_0; - __le16 unused_1; - u8 unused_2; - u8 unused_3; - u8 unused_4; - u8 valid; -}; - -/* hwrm_fw_set_time */ -/* Input (32 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 selfrst_status; + #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL + #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL + #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL + #define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_fw_set_time_input (size:256b/32B) */ struct hwrm_fw_set_time_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 year; - #define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL - u8 month; - u8 day; - u8 hour; - u8 minute; - u8 second; - u8 unused_0; - __le16 millisecond; - __le16 zone; - #define FW_SET_TIME_REQ_ZONE_UTC 0x0UL - #define FW_SET_TIME_REQ_ZONE_UNKNOWN 0xffffUL - __le32 unused_1; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 year; + #define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL + #define FW_SET_TIME_REQ_YEAR_LAST FW_SET_TIME_REQ_YEAR_UNKNOWN + u8 month; + u8 day; + u8 hour; + u8 minute; + u8 second; + u8 unused_0; + __le16 millisecond; + __le16 zone; + #define FW_SET_TIME_REQ_ZONE_UTC 0x0UL + #define FW_SET_TIME_REQ_ZONE_UNKNOWN 0xffffUL + #define FW_SET_TIME_REQ_ZONE_LAST FW_SET_TIME_REQ_ZONE_UNKNOWN + u8 unused_1[4]; +}; + +/* hwrm_fw_set_time_output (size:128b/16B) */ struct hwrm_fw_set_time_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_fw_set_structured_data */ -/* Input (32 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_struct_hdr (size:128b/16B) */ +struct hwrm_struct_hdr { + __le16 struct_id; + #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL + #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL + #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL + #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL + #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL + #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL + #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL + #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL + #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL + #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL + #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_RSS_V2 + __le16 len; + u8 version; + u8 count; + __le16 subtype; + __le16 next_offset; + #define STRUCT_HDR_NEXT_OFFSET_LAST 0x0UL + u8 unused_0[6]; +}; + +/* hwrm_struct_data_dcbx_app (size:64b/8B) */ +struct hwrm_struct_data_dcbx_app { + __be16 protocol_id; + u8 protocol_selector; + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_LAST STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT + u8 priority; + u8 valid; + u8 unused_0[3]; +}; + +/* hwrm_fw_set_structured_data_input (size:256b/32B) */ struct hwrm_fw_set_structured_data_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le64 src_data_addr; - __le16 data_len; - u8 hdr_cnt; - u8 unused_0[5]; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 src_data_addr; + __le16 data_len; + u8 hdr_cnt; + u8 unused_0[5]; +}; + +/* hwrm_fw_set_structured_data_output (size:128b/16B) */ struct hwrm_fw_set_structured_data_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* Command specific Error Codes (8 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_fw_set_structured_data_cmd_err (size:64b/8B) */ struct hwrm_fw_set_structured_data_cmd_err { - u8 code; - #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL - #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL - #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL - #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL - u8 unused_0[7]; + u8 code; + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID + u8 unused_0[7]; }; -/* hwrm_fw_get_structured_data */ -/* Input (32 bytes) */ +/* hwrm_fw_get_structured_data_input (size:256b/32B) */ struct hwrm_fw_get_structured_data_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le64 dest_data_addr; - __le16 data_len; - __le16 structure_id; - __le16 subtype; - #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_ALL 0xffffUL - #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_ADMIN 0x100UL - #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_PEER 0x101UL + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 dest_data_addr; + __le16 data_len; + __le16 structure_id; + __le16 subtype; + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_UNUSED 0x0UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_ALL 0xffffUL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_ADMIN 0x100UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_PEER 0x101UL #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_OPERATIONAL 0x102UL - #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL - #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER 0x201UL - #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL - #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL - u8 count; - u8 unused_0; + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER 0x201UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_LAST FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL + u8 count; + u8 unused_0; }; -/* Output (16 bytes) */ +/* hwrm_fw_get_structured_data_output (size:128b/16B) */ struct hwrm_fw_get_structured_data_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - u8 hdr_cnt; - u8 unused_0; - __le16 unused_1; - u8 unused_2; - u8 unused_3; - u8 unused_4; - u8 valid; -}; - -/* Command specific Error Codes (8 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 hdr_cnt; + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_fw_get_structured_data_cmd_err (size:64b/8B) */ struct hwrm_fw_get_structured_data_cmd_err { - u8 code; - #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL - #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL - u8 unused_0[7]; + u8 code; + #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL + #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL + #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID + u8 unused_0[7]; }; -/* hwrm_exec_fwd_resp */ -/* Input (128 bytes) */ +/* hwrm_exec_fwd_resp_input (size:1024b/128B) */ struct hwrm_exec_fwd_resp_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 encap_request[26]; - __le16 encap_resp_target_id; - __le16 unused_0[3]; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 encap_request[26]; + __le16 encap_resp_target_id; + u8 unused_0[6]; }; -/* Output (16 bytes) */ +/* hwrm_exec_fwd_resp_output (size:128b/16B) */ struct hwrm_exec_fwd_resp_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_reject_fwd_resp */ -/* Input (128 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_reject_fwd_resp_input (size:1024b/128B) */ struct hwrm_reject_fwd_resp_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 encap_request[26]; - __le16 encap_resp_target_id; - __le16 unused_0[3]; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 encap_request[26]; + __le16 encap_resp_target_id; + u8 unused_0[6]; }; -/* Output (16 bytes) */ +/* hwrm_reject_fwd_resp_output (size:128b/16B) */ struct hwrm_reject_fwd_resp_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_fwd_resp */ -/* Input (40 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_fwd_resp_input (size:1024b/128B) */ struct hwrm_fwd_resp_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 encap_resp_target_id; - __le16 encap_resp_cmpl_ring; - __le16 encap_resp_len; - u8 unused_0; - u8 unused_1; - __le64 encap_resp_addr; - __le32 encap_resp[24]; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 encap_resp_target_id; + __le16 encap_resp_cmpl_ring; + __le16 encap_resp_len; + u8 unused_0; + u8 unused_1; + __le64 encap_resp_addr; + __le32 encap_resp[24]; +}; + +/* hwrm_fwd_resp_output (size:128b/16B) */ struct hwrm_fwd_resp_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_fwd_async_event_cmpl */ -/* Input (32 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_fwd_async_event_cmpl_input (size:320b/40B) */ struct hwrm_fwd_async_event_cmpl_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 encap_async_event_target_id; - u8 unused_0; - u8 unused_1; - u8 unused_2[3]; - u8 unused_3; - __le32 encap_async_event_cmpl[4]; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 encap_async_event_target_id; + u8 unused_0[6]; + __le32 encap_async_event_cmpl[4]; +}; + +/* hwrm_fwd_async_event_cmpl_output (size:128b/16B) */ struct hwrm_fwd_async_event_cmpl_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_temp_monitor_query */ -/* Input (16 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_temp_monitor_query_input (size:128b/16B) */ struct hwrm_temp_monitor_query_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; }; -/* Output (16 bytes) */ +/* hwrm_temp_monitor_query_output (size:128b/16B) */ struct hwrm_temp_monitor_query_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - u8 temp; - u8 unused_0; - __le16 unused_1; - u8 unused_2; - u8 unused_3; - u8 unused_4; - u8 valid; -}; - -/* hwrm_wol_filter_alloc */ -/* Input (64 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 temp; + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_wol_filter_alloc_input (size:512b/64B) */ struct hwrm_wol_filter_alloc_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 flags; - __le32 enables; - #define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS 0x1UL - #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET 0x2UL + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + __le32 enables; + #define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS 0x1UL + #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET 0x2UL #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_SIZE 0x4UL #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_ADDR 0x8UL #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_ADDR 0x10UL #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_SIZE 0x20UL - __le16 port_id; - u8 wol_type; - #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT 0x0UL - #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP 0x1UL - #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID 0xffUL - u8 unused_0; - __le32 unused_1; - u8 mac_address[6]; - __le16 pattern_offset; - __le16 pattern_buf_size; - __le16 pattern_mask_size; - __le32 unused_2; - __le64 pattern_buf_addr; - __le64 pattern_mask_addr; -}; - -/* Output (16 bytes) */ + __le16 port_id; + u8 wol_type; + #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT 0x0UL + #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP 0x1UL + #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID 0xffUL + #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_LAST WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID + u8 unused_0[5]; + u8 mac_address[6]; + __le16 pattern_offset; + __le16 pattern_buf_size; + __le16 pattern_mask_size; + u8 unused_1[4]; + __le64 pattern_buf_addr; + __le64 pattern_mask_addr; +}; + +/* hwrm_wol_filter_alloc_output (size:128b/16B) */ struct hwrm_wol_filter_alloc_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - u8 wol_filter_id; - u8 unused_0; - __le16 unused_1; - u8 unused_2; - u8 unused_3; - u8 unused_4; - u8 valid; -}; - -/* hwrm_wol_filter_free */ -/* Input (32 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 wol_filter_id; + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_wol_filter_free_input (size:256b/32B) */ struct hwrm_wol_filter_free_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 flags; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; #define WOL_FILTER_FREE_REQ_FLAGS_FREE_ALL_WOL_FILTERS 0x1UL - __le32 enables; - #define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID 0x1UL - __le16 port_id; - u8 wol_filter_id; - u8 unused_0[5]; + __le32 enables; + #define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID 0x1UL + __le16 port_id; + u8 wol_filter_id; + u8 unused_0[5]; }; -/* Output (16 bytes) */ +/* hwrm_wol_filter_free_output (size:128b/16B) */ struct hwrm_wol_filter_free_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_wol_filter_qcfg */ -/* Input (56 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_wol_filter_qcfg_input (size:448b/56B) */ struct hwrm_wol_filter_qcfg_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 port_id; - __le16 handle; - __le32 unused_0; - __le64 pattern_buf_addr; - __le16 pattern_buf_size; - u8 unused_1; - u8 unused_2; - u8 unused_3[3]; - u8 unused_4; - __le64 pattern_mask_addr; - __le16 pattern_mask_size; - __le16 unused_5[3]; -}; - -/* Output (32 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 handle; + u8 unused_0[4]; + __le64 pattern_buf_addr; + __le16 pattern_buf_size; + u8 unused_1[6]; + __le64 pattern_mask_addr; + __le16 pattern_mask_size; + u8 unused_2[6]; +}; + +/* hwrm_wol_filter_qcfg_output (size:256b/32B) */ struct hwrm_wol_filter_qcfg_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le16 next_handle; - u8 wol_filter_id; - u8 wol_type; - #define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT 0x0UL - #define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP 0x1UL - #define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID 0xffUL - __le32 unused_0; - u8 mac_address[6]; - __le16 pattern_offset; - __le16 pattern_size; - __le16 pattern_mask_size; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_wol_reason_qcfg */ -/* Input (40 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 next_handle; + u8 wol_filter_id; + u8 wol_type; + #define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT 0x0UL + #define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP 0x1UL + #define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID 0xffUL + #define WOL_FILTER_QCFG_RESP_WOL_TYPE_LAST WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID + __le32 unused_0; + u8 mac_address[6]; + __le16 pattern_offset; + __le16 pattern_size; + __le16 pattern_mask_size; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_wol_reason_qcfg_input (size:320b/40B) */ struct hwrm_wol_reason_qcfg_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 port_id; - u8 unused_0; - u8 unused_1; - u8 unused_2[3]; - u8 unused_3; - __le64 wol_pkt_buf_addr; - __le16 wol_pkt_buf_size; - __le16 unused_4[3]; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; + __le64 wol_pkt_buf_addr; + __le16 wol_pkt_buf_size; + u8 unused_1[6]; +}; + +/* hwrm_wol_reason_qcfg_output (size:128b/16B) */ struct hwrm_wol_reason_qcfg_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - u8 wol_filter_id; - u8 wol_reason; - #define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT 0x0UL - #define WOL_REASON_QCFG_RESP_WOL_REASON_BMP 0x1UL - #define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID 0xffUL - u8 wol_pkt_len; - u8 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_dbg_read_direct */ -/* Input (32 bytes) */ -struct hwrm_dbg_read_direct_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le64 host_dest_addr; - __le32 read_addr; - __le32 read_len32; -}; - -/* Output (16 bytes) */ -struct hwrm_dbg_read_direct_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_nvm_read */ -/* Input (40 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 wol_filter_id; + u8 wol_reason; + #define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT 0x0UL + #define WOL_REASON_QCFG_RESP_WOL_REASON_BMP 0x1UL + #define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID 0xffUL + #define WOL_REASON_QCFG_RESP_WOL_REASON_LAST WOL_REASON_QCFG_RESP_WOL_REASON_INVALID + u8 wol_pkt_len; + u8 unused_0[4]; + u8 valid; +}; + +/* hwrm_nvm_read_input (size:320b/40B) */ struct hwrm_nvm_read_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le64 host_dest_addr; - __le16 dir_idx; - u8 unused_0; - u8 unused_1; - __le32 offset; - __le32 len; - __le32 unused_2; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le16 dir_idx; + u8 unused_0[2]; + __le32 offset; + __le32 len; + u8 unused_1[4]; +}; + +/* hwrm_nvm_read_output (size:128b/16B) */ struct hwrm_nvm_read_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_nvm_get_dir_entries */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_get_dir_entries_input (size:192b/24B) */ struct hwrm_nvm_get_dir_entries_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le64 host_dest_addr; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; }; -/* Output (16 bytes) */ +/* hwrm_nvm_get_dir_entries_output (size:128b/16B) */ struct hwrm_nvm_get_dir_entries_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_nvm_get_dir_info */ -/* Input (16 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_get_dir_info_input (size:128b/16B) */ struct hwrm_nvm_get_dir_info_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; }; -/* Output (24 bytes) */ +/* hwrm_nvm_get_dir_info_output (size:192b/24B) */ struct hwrm_nvm_get_dir_info_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 entries; - __le32 entry_length; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_nvm_write */ -/* Input (48 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 entries; + __le32 entry_length; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_write_input (size:384b/48B) */ struct hwrm_nvm_write_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le64 host_src_addr; - __le16 dir_type; - __le16 dir_ordinal; - __le16 dir_ext; - __le16 dir_attr; - __le32 dir_data_length; - __le16 option; - __le16 flags; - #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL - __le32 dir_item_length; - __le32 unused_0; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_src_addr; + __le16 dir_type; + __le16 dir_ordinal; + __le16 dir_ext; + __le16 dir_attr; + __le32 dir_data_length; + __le16 option; + __le16 flags; + #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL + __le32 dir_item_length; + __le32 unused_0; +}; + +/* hwrm_nvm_write_output (size:128b/16B) */ struct hwrm_nvm_write_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 dir_item_length; - __le16 dir_idx; - u8 unused_0; - u8 valid; + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 dir_item_length; + __le16 dir_idx; + u8 unused_0; + u8 valid; }; -/* Command specific Error Codes (8 bytes) */ +/* hwrm_nvm_write_cmd_err (size:64b/8B) */ struct hwrm_nvm_write_cmd_err { - u8 code; - #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN 0x0UL - #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL - #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL - u8 unused_0[7]; + u8 code; + #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL + #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL + #define NVM_WRITE_CMD_ERR_CODE_LAST NVM_WRITE_CMD_ERR_CODE_NO_SPACE + u8 unused_0[7]; }; -/* hwrm_nvm_modify */ -/* Input (40 bytes) */ +/* hwrm_nvm_modify_input (size:320b/40B) */ struct hwrm_nvm_modify_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le64 host_src_addr; - __le16 dir_idx; - u8 unused_0; - u8 unused_1; - __le32 offset; - __le32 len; - __le32 unused_2; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_src_addr; + __le16 dir_idx; + u8 unused_0[2]; + __le32 offset; + __le32 len; + u8 unused_1[4]; +}; + +/* hwrm_nvm_modify_output (size:128b/16B) */ struct hwrm_nvm_modify_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_nvm_find_dir_entry */ -/* Input (32 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_find_dir_entry_input (size:256b/32B) */ struct hwrm_nvm_find_dir_entry_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 enables; - #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID 0x1UL - __le16 dir_idx; - __le16 dir_type; - __le16 dir_ordinal; - __le16 dir_ext; - u8 opt_ordinal; - #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL - #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0 - #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ 0x0UL - #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE 0x1UL - #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT 0x2UL - u8 unused_1[3]; -}; - -/* Output (32 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID 0x1UL + __le16 dir_idx; + __le16 dir_type; + __le16 dir_ordinal; + __le16 dir_ext; + u8 opt_ordinal; + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0 + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ 0x0UL + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE 0x1UL + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT 0x2UL + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_LAST NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT + u8 unused_0[3]; +}; + +/* hwrm_nvm_find_dir_entry_output (size:256b/32B) */ struct hwrm_nvm_find_dir_entry_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 dir_item_length; - __le32 dir_data_length; - __le32 fw_ver; - __le16 dir_ordinal; - __le16 dir_idx; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_nvm_erase_dir_entry */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 dir_item_length; + __le32 dir_data_length; + __le32 fw_ver; + __le16 dir_ordinal; + __le16 dir_idx; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_erase_dir_entry_input (size:192b/24B) */ struct hwrm_nvm_erase_dir_entry_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 dir_idx; - __le16 unused_0[3]; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 dir_idx; + u8 unused_0[6]; }; -/* Output (16 bytes) */ +/* hwrm_nvm_erase_dir_entry_output (size:128b/16B) */ struct hwrm_nvm_erase_dir_entry_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_nvm_get_dev_info */ -/* Input (16 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_get_dev_info_input (size:128b/16B) */ struct hwrm_nvm_get_dev_info_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; }; -/* Output (32 bytes) */ +/* hwrm_nvm_get_dev_info_output (size:256b/32B) */ struct hwrm_nvm_get_dev_info_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le16 manufacturer_id; - __le16 device_id; - __le32 sector_size; - __le32 nvram_size; - __le32 reserved_size; - __le32 available_size; - u8 unused_0; - u8 unused_1; - u8 unused_2; - u8 valid; -}; - -/* hwrm_nvm_mod_dir_entry */ -/* Input (32 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 manufacturer_id; + __le16 device_id; + __le32 sector_size; + __le32 nvram_size; + __le32 reserved_size; + __le32 available_size; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_nvm_mod_dir_entry_input (size:256b/32B) */ struct hwrm_nvm_mod_dir_entry_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 enables; - #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM 0x1UL - __le16 dir_idx; - __le16 dir_ordinal; - __le16 dir_ext; - __le16 dir_attr; - __le32 checksum; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM 0x1UL + __le16 dir_idx; + __le16 dir_ordinal; + __le16 dir_ext; + __le16 dir_attr; + __le32 checksum; +}; + +/* hwrm_nvm_mod_dir_entry_output (size:128b/16B) */ struct hwrm_nvm_mod_dir_entry_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_nvm_verify_update */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_verify_update_input (size:192b/24B) */ struct hwrm_nvm_verify_update_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le16 dir_type; - __le16 dir_ordinal; - __le16 dir_ext; - __le16 unused_0; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 dir_type; + __le16 dir_ordinal; + __le16 dir_ext; + u8 unused_0[2]; +}; + +/* hwrm_nvm_verify_update_output (size:128b/16B) */ struct hwrm_nvm_verify_update_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_nvm_install_update */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_install_update_input (size:192b/24B) */ struct hwrm_nvm_install_update_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le32 install_type; - #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL - #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL - __le16 flags; - #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL - #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL - #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL - __le16 unused_0; -}; - -/* Output (24 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 install_type; + #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL + #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL + #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_LAST NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL + __le16 flags; + #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL + #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL + #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL + u8 unused_0[2]; +}; + +/* hwrm_nvm_install_update_output (size:192b/24B) */ struct hwrm_nvm_install_update_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le64 installed_items; - u8 result; - #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL - u8 problem_item; - #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL - #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL - u8 reset_required; - #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE 0x0UL - #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI 0x1UL - #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER 0x2UL - u8 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* Command specific Error Codes (8 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 installed_items; + u8 result; + #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_LAST NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS + u8 problem_item; + #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL + #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL + #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_LAST NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE + u8 reset_required; + #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE 0x0UL + #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI 0x1UL + #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER 0x2UL + #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_LAST NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER + u8 unused_0[4]; + u8 valid; +}; + +/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */ struct hwrm_nvm_install_update_cmd_err { - u8 code; - #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL - #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL - #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL - u8 unused_0[7]; + u8 code; + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE + u8 unused_0[7]; }; -/* hwrm_nvm_get_variable */ -/* Input (40 bytes) */ +/* hwrm_nvm_get_variable_input (size:320b/40B) */ struct hwrm_nvm_get_variable_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le64 dest_data_addr; - __le16 data_len; - __le16 option_num; - #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL - #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL - __le16 dimensions; - __le16 index_0; - __le16 index_1; - __le16 index_2; - __le16 index_3; - u8 flags; - #define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT 0x1UL - u8 unused_0; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 dest_data_addr; + __le16 data_len; + __le16 option_num; + #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL + #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL + #define NVM_GET_VARIABLE_REQ_OPTION_NUM_LAST NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF + __le16 dimensions; + __le16 index_0; + __le16 index_1; + __le16 index_2; + __le16 index_3; + u8 flags; + #define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT 0x1UL + u8 unused_0; +}; + +/* hwrm_nvm_get_variable_output (size:128b/16B) */ struct hwrm_nvm_get_variable_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le16 data_len; - __le16 option_num; - #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0 0x0UL - #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL - u8 unused_0; - u8 unused_1; - u8 unused_2; - u8 valid; -}; - -/* Command specific Error Codes (8 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 data_len; + __le16 option_num; + #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0 0x0UL + #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL + #define NVM_GET_VARIABLE_RESP_OPTION_NUM_LAST NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */ struct hwrm_nvm_get_variable_cmd_err { - u8 code; - #define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL - #define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL - #define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL - #define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL - u8 unused_0[7]; + u8 code; + #define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL + #define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL + #define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL + #define NVM_GET_VARIABLE_CMD_ERR_CODE_LAST NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT + u8 unused_0[7]; }; -/* hwrm_nvm_set_variable */ -/* Input (40 bytes) */ +/* hwrm_nvm_set_variable_input (size:320b/40B) */ struct hwrm_nvm_set_variable_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le64 src_data_addr; - __le16 data_len; - __le16 option_num; - #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL - #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL - __le16 dimensions; - __le16 index_0; - __le16 index_1; - __le16 index_2; - __le16 index_3; - u8 flags; - #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL - #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL - #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1 - #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1) - #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1) - #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 - u8 unused_0; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 src_data_addr; + __le16 data_len; + __le16 option_num; + #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL + #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL + #define NVM_SET_VARIABLE_REQ_OPTION_NUM_LAST NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF + __le16 dimensions; + __le16 index_0; + __le16 index_1; + __le16 index_2; + __le16 index_3; + u8 flags; + #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1 + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1) + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1) + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 + u8 unused_0; +}; + +/* hwrm_nvm_set_variable_output (size:128b/16B) */ struct hwrm_nvm_set_variable_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* Command specific Error Codes (8 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */ struct hwrm_nvm_set_variable_cmd_err { - u8 code; - #define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL - #define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL - #define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL - u8 unused_0[7]; + u8 code; + #define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL + #define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL + #define NVM_SET_VARIABLE_CMD_ERR_CODE_LAST NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR + u8 unused_0[7]; }; -/* hwrm_selftest_qlist */ -/* Input (16 bytes) */ +/* hwrm_selftest_qlist_input (size:128b/16B) */ struct hwrm_selftest_qlist_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; }; -/* Output (280 bytes) */ +/* hwrm_selftest_qlist_output (size:2240b/280B) */ struct hwrm_selftest_qlist_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - u8 num_tests; - u8 available_tests; - #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST 0x1UL - #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL - #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL - #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL - #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST 0x10UL - #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST 0x20UL - u8 offline_tests; - #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL - #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL - #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL - #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL - #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST 0x10UL - #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST 0x20UL - u8 unused_0; - __le16 test_timeout; - u8 unused_1; - u8 unused_2; - char test0_name[32]; - char test1_name[32]; - char test2_name[32]; - char test3_name[32]; - char test4_name[32]; - char test5_name[32]; - char test6_name[32]; - char test7_name[32]; - __le32 unused_3; - u8 unused_4; - u8 unused_5; - u8 unused_6; - u8 valid; -}; - -/* hwrm_selftest_exec */ -/* Input (24 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_tests; + u8 available_tests; + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST 0x1UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST 0x20UL + u8 offline_tests; + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST 0x20UL + u8 unused_0; + __le16 test_timeout; + u8 unused_1[2]; + char test0_name[32]; + char test1_name[32]; + char test2_name[32]; + char test3_name[32]; + char test4_name[32]; + char test5_name[32]; + char test6_name[32]; + char test7_name[32]; + u8 unused_2[7]; + u8 valid; +}; + +/* hwrm_selftest_exec_input (size:192b/24B) */ struct hwrm_selftest_exec_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - u8 flags; - #define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST 0x1UL - #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL - #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL - #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL - #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL - #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL - u8 unused_0[7]; -}; - -/* Output (16 bytes) */ + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST 0x1UL + #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL + #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL + #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL + #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL + u8 pcie_lane_num; + u8 unused_0[6]; +}; + +/* hwrm_selftest_exec_output (size:128b/16B) */ struct hwrm_selftest_exec_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - u8 requested_tests; - #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST 0x1UL - #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL - #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL - #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL - #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST 0x10UL - #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST 0x20UL - u8 test_success; - #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL - #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL - #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL - #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL - #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST 0x10UL - #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST 0x20UL - u8 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 unused_4; - u8 valid; -}; - -/* hwrm_selftest_irq */ -/* Input (16 bytes) */ + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 requested_tests; + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST 0x1UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST 0x20UL + u8 test_success; + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST 0x20UL + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_selftest_irq_input (size:128b/16B) */ struct hwrm_selftest_irq_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; }; -/* Output (16 bytes) */ +/* hwrm_selftest_irq_output (size:128b/16B) */ struct hwrm_selftest_irq_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 unused_0; - u8 unused_1; - u8 unused_2; - u8 unused_3; - u8 valid; -}; - -/* hwrm_selftest_retrieve_serdes_data */ -/* Input (32 bytes) */ -struct hwrm_selftest_retrieve_serdes_data_input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; - __le64 resp_data_addr; - __le32 resp_data_offset; - __le16 data_len; - u8 flags; - #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_UNUSED_TEST_MASK 0xfUL - #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_UNUSED_TEST_SFT 0 - #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL - #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL - u8 unused_0; -}; - -/* Output (16 bytes) */ -struct hwrm_selftest_retrieve_serdes_data_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le16 total_data_len; - __le16 copied_data_len; - u8 unused_0; - u8 unused_1; - u8 unused_2; - u8 valid; -}; - -/* Hardware Resource Manager Specification */ -/* Input (16 bytes) */ -struct input { - __le16 req_type; - __le16 cmpl_ring; - __le16 seq_id; - __le16 target_id; - __le64 resp_addr; -}; - -/* Output (8 bytes) */ -struct output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; -}; - -/* Short Command Structure (16 bytes) */ -struct hwrm_short_input { - __le16 req_type; - __le16 signature; - #define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL - __le16 unused_0; - __le16 size; - __le64 req_addr; -}; - -/* Command numbering (8 bytes) */ -struct cmd_nums { - __le16 req_type; - #define HWRM_VER_GET (0x0UL) - #define HWRM_FUNC_BUF_UNRGTR (0xeUL) - #define HWRM_FUNC_VF_CFG (0xfUL) - #define RESERVED1 (0x10UL) - #define HWRM_FUNC_RESET (0x11UL) - #define HWRM_FUNC_GETFID (0x12UL) - #define HWRM_FUNC_VF_ALLOC (0x13UL) - #define HWRM_FUNC_VF_FREE (0x14UL) - #define HWRM_FUNC_QCAPS (0x15UL) - #define HWRM_FUNC_QCFG (0x16UL) - #define HWRM_FUNC_CFG (0x17UL) - #define HWRM_FUNC_QSTATS (0x18UL) - #define HWRM_FUNC_CLR_STATS (0x19UL) - #define HWRM_FUNC_DRV_UNRGTR (0x1aUL) - #define HWRM_FUNC_VF_RESC_FREE (0x1bUL) - #define HWRM_FUNC_VF_VNIC_IDS_QUERY (0x1cUL) - #define HWRM_FUNC_DRV_RGTR (0x1dUL) - #define HWRM_FUNC_DRV_QVER (0x1eUL) - #define HWRM_FUNC_BUF_RGTR (0x1fUL) - #define HWRM_PORT_PHY_CFG (0x20UL) - #define HWRM_PORT_MAC_CFG (0x21UL) - #define HWRM_PORT_TS_QUERY (0x22UL) - #define HWRM_PORT_QSTATS (0x23UL) - #define HWRM_PORT_LPBK_QSTATS (0x24UL) - #define HWRM_PORT_CLR_STATS (0x25UL) - #define HWRM_PORT_LPBK_CLR_STATS (0x26UL) - #define HWRM_PORT_PHY_QCFG (0x27UL) - #define HWRM_PORT_MAC_QCFG (0x28UL) - #define HWRM_PORT_MAC_PTP_QCFG (0x29UL) - #define HWRM_PORT_PHY_QCAPS (0x2aUL) - #define HWRM_PORT_PHY_I2C_WRITE (0x2bUL) - #define HWRM_PORT_PHY_I2C_READ (0x2cUL) - #define HWRM_PORT_LED_CFG (0x2dUL) - #define HWRM_PORT_LED_QCFG (0x2eUL) - #define HWRM_PORT_LED_QCAPS (0x2fUL) - #define HWRM_QUEUE_QPORTCFG (0x30UL) - #define HWRM_QUEUE_QCFG (0x31UL) - #define HWRM_QUEUE_CFG (0x32UL) - #define HWRM_FUNC_VLAN_CFG (0x33UL) - #define HWRM_FUNC_VLAN_QCFG (0x34UL) - #define HWRM_QUEUE_PFCENABLE_QCFG (0x35UL) - #define HWRM_QUEUE_PFCENABLE_CFG (0x36UL) - #define HWRM_QUEUE_PRI2COS_QCFG (0x37UL) - #define HWRM_QUEUE_PRI2COS_CFG (0x38UL) - #define HWRM_QUEUE_COS2BW_QCFG (0x39UL) - #define HWRM_QUEUE_COS2BW_CFG (0x3aUL) - #define HWRM_QUEUE_DSCP_QCAPS (0x3bUL) - #define HWRM_QUEUE_DSCP2PRI_QCFG (0x3cUL) - #define HWRM_QUEUE_DSCP2PRI_CFG (0x3dUL) - #define HWRM_VNIC_ALLOC (0x40UL) - #define HWRM_VNIC_FREE (0x41UL) - #define HWRM_VNIC_CFG (0x42UL) - #define HWRM_VNIC_QCFG (0x43UL) - #define HWRM_VNIC_TPA_CFG (0x44UL) - #define HWRM_VNIC_TPA_QCFG (0x45UL) - #define HWRM_VNIC_RSS_CFG (0x46UL) - #define HWRM_VNIC_RSS_QCFG (0x47UL) - #define HWRM_VNIC_PLCMODES_CFG (0x48UL) - #define HWRM_VNIC_PLCMODES_QCFG (0x49UL) - #define HWRM_VNIC_QCAPS (0x4aUL) - #define HWRM_RING_ALLOC (0x50UL) - #define HWRM_RING_FREE (0x51UL) - #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS (0x52UL) - #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS (0x53UL) - #define HWRM_RING_RESET (0x5eUL) - #define HWRM_RING_GRP_ALLOC (0x60UL) - #define HWRM_RING_GRP_FREE (0x61UL) - #define RESERVED5 (0x64UL) - #define RESERVED6 (0x65UL) - #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (0x70UL) - #define HWRM_VNIC_RSS_COS_LB_CTX_FREE (0x71UL) - #define HWRM_CFA_L2_FILTER_ALLOC (0x90UL) - #define HWRM_CFA_L2_FILTER_FREE (0x91UL) - #define HWRM_CFA_L2_FILTER_CFG (0x92UL) - #define HWRM_CFA_L2_SET_RX_MASK (0x93UL) - #define HWRM_CFA_VLAN_ANTISPOOF_CFG (0x94UL) - #define HWRM_CFA_TUNNEL_FILTER_ALLOC (0x95UL) - #define HWRM_CFA_TUNNEL_FILTER_FREE (0x96UL) - #define HWRM_CFA_ENCAP_RECORD_ALLOC (0x97UL) - #define HWRM_CFA_ENCAP_RECORD_FREE (0x98UL) - #define HWRM_CFA_NTUPLE_FILTER_ALLOC (0x99UL) - #define HWRM_CFA_NTUPLE_FILTER_FREE (0x9aUL) - #define HWRM_CFA_NTUPLE_FILTER_CFG (0x9bUL) - #define HWRM_CFA_EM_FLOW_ALLOC (0x9cUL) - #define HWRM_CFA_EM_FLOW_FREE (0x9dUL) - #define HWRM_CFA_EM_FLOW_CFG (0x9eUL) - #define HWRM_TUNNEL_DST_PORT_QUERY (0xa0UL) - #define HWRM_TUNNEL_DST_PORT_ALLOC (0xa1UL) - #define HWRM_TUNNEL_DST_PORT_FREE (0xa2UL) - #define HWRM_STAT_CTX_ALLOC (0xb0UL) - #define HWRM_STAT_CTX_FREE (0xb1UL) - #define HWRM_STAT_CTX_QUERY (0xb2UL) - #define HWRM_STAT_CTX_CLR_STATS (0xb3UL) - #define HWRM_FW_RESET (0xc0UL) - #define HWRM_FW_QSTATUS (0xc1UL) - #define HWRM_FW_SET_TIME (0xc8UL) - #define HWRM_FW_GET_TIME (0xc9UL) - #define HWRM_FW_SET_STRUCTURED_DATA (0xcaUL) - #define HWRM_FW_GET_STRUCTURED_DATA (0xcbUL) - #define HWRM_FW_IPC_MAILBOX (0xccUL) - #define HWRM_EXEC_FWD_RESP (0xd0UL) - #define HWRM_REJECT_FWD_RESP (0xd1UL) - #define HWRM_FWD_RESP (0xd2UL) - #define HWRM_FWD_ASYNC_EVENT_CMPL (0xd3UL) - #define HWRM_TEMP_MONITOR_QUERY (0xe0UL) - #define HWRM_WOL_FILTER_ALLOC (0xf0UL) - #define HWRM_WOL_FILTER_FREE (0xf1UL) - #define HWRM_WOL_FILTER_QCFG (0xf2UL) - #define HWRM_WOL_REASON_QCFG (0xf3UL) - #define HWRM_CFA_METER_PROFILE_ALLOC (0xf5UL) - #define HWRM_CFA_METER_PROFILE_FREE (0xf6UL) - #define HWRM_CFA_METER_PROFILE_CFG (0xf7UL) - #define HWRM_CFA_METER_INSTANCE_ALLOC (0xf8UL) - #define HWRM_CFA_METER_INSTANCE_FREE (0xf9UL) - #define HWRM_CFA_VFR_ALLOC (0xfdUL) - #define HWRM_CFA_VFR_FREE (0xfeUL) - #define HWRM_CFA_VF_PAIR_ALLOC (0x100UL) - #define HWRM_CFA_VF_PAIR_FREE (0x101UL) - #define HWRM_CFA_VF_PAIR_INFO (0x102UL) - #define HWRM_CFA_FLOW_ALLOC (0x103UL) - #define HWRM_CFA_FLOW_FREE (0x104UL) - #define HWRM_CFA_FLOW_FLUSH (0x105UL) - #define HWRM_CFA_FLOW_STATS (0x106UL) - #define HWRM_CFA_FLOW_INFO (0x107UL) - #define HWRM_CFA_DECAP_FILTER_ALLOC (0x108UL) - #define HWRM_CFA_DECAP_FILTER_FREE (0x109UL) - #define HWRM_CFA_VLAN_ANTISPOOF_QCFG (0x10aUL) - #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC (0x10bUL) - #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE (0x10cUL) - #define HWRM_CFA_PAIR_ALLOC (0x10dUL) - #define HWRM_CFA_PAIR_FREE (0x10eUL) - #define HWRM_CFA_PAIR_INFO (0x10fUL) - #define HWRM_FW_IPC_MSG (0x110UL) - #define HWRM_SELFTEST_QLIST (0x200UL) - #define HWRM_SELFTEST_EXEC (0x201UL) - #define HWRM_SELFTEST_IRQ (0x202UL) - #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA (0x203UL) - #define HWRM_DBG_READ_DIRECT (0xff10UL) - #define HWRM_DBG_READ_INDIRECT (0xff11UL) - #define HWRM_DBG_WRITE_DIRECT (0xff12UL) - #define HWRM_DBG_WRITE_INDIRECT (0xff13UL) - #define HWRM_DBG_DUMP (0xff14UL) - #define HWRM_DBG_ERASE_NVM (0xff15UL) - #define HWRM_DBG_CFG (0xff16UL) - #define HWRM_DBG_COREDUMP_LIST (0xff17UL) - #define HWRM_DBG_COREDUMP_INITIATE (0xff18UL) - #define HWRM_DBG_COREDUMP_RETRIEVE (0xff19UL) - #define HWRM_NVM_FACTORY_DEFAULTS (0xffeeUL) - #define HWRM_NVM_VALIDATE_OPTION (0xffefUL) - #define HWRM_NVM_FLUSH (0xfff0UL) - #define HWRM_NVM_GET_VARIABLE (0xfff1UL) - #define HWRM_NVM_SET_VARIABLE (0xfff2UL) - #define HWRM_NVM_INSTALL_UPDATE (0xfff3UL) - #define HWRM_NVM_MODIFY (0xfff4UL) - #define HWRM_NVM_VERIFY_UPDATE (0xfff5UL) - #define HWRM_NVM_GET_DEV_INFO (0xfff6UL) - #define HWRM_NVM_ERASE_DIR_ENTRY (0xfff7UL) - #define HWRM_NVM_MOD_DIR_ENTRY (0xfff8UL) - #define HWRM_NVM_FIND_DIR_ENTRY (0xfff9UL) - #define HWRM_NVM_GET_DIR_ENTRIES (0xfffaUL) - #define HWRM_NVM_GET_DIR_INFO (0xfffbUL) - #define HWRM_NVM_RAW_DUMP (0xfffcUL) - #define HWRM_NVM_READ (0xfffdUL) - #define HWRM_NVM_WRITE (0xfffeUL) - #define HWRM_NVM_RAW_WRITE_BLK (0xffffUL) - __le16 unused_0[3]; -}; - -/* Return Codes (8 bytes) */ -struct ret_codes { - __le16 error_code; - #define HWRM_ERR_CODE_SUCCESS (0x0UL) - #define HWRM_ERR_CODE_FAIL (0x1UL) - #define HWRM_ERR_CODE_INVALID_PARAMS (0x2UL) - #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED (0x3UL) - #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR (0x4UL) - #define HWRM_ERR_CODE_INVALID_FLAGS (0x5UL) - #define HWRM_ERR_CODE_INVALID_ENABLES (0x6UL) - #define HWRM_ERR_CODE_HWRM_ERROR (0xfUL) - #define HWRM_ERR_CODE_UNKNOWN_ERR (0xfffeUL) - #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED (0xffffUL) - __le16 unused_0[3]; -}; - -/* Output (16 bytes) */ -struct hwrm_err_output { - __le16 error_code; - __le16 req_type; - __le16 seq_id; - __le16 resp_len; - __le32 opaque_0; - __le16 opaque_1; - u8 cmd_err; - u8 valid; -}; - -/* Port Tx Statistics Formats (408 bytes) */ -struct tx_port_stats { - __le64 tx_64b_frames; - __le64 tx_65b_127b_frames; - __le64 tx_128b_255b_frames; - __le64 tx_256b_511b_frames; - __le64 tx_512b_1023b_frames; - __le64 tx_1024b_1518_frames; - __le64 tx_good_vlan_frames; - __le64 tx_1519b_2047_frames; - __le64 tx_2048b_4095b_frames; - __le64 tx_4096b_9216b_frames; - __le64 tx_9217b_16383b_frames; - __le64 tx_good_frames; - __le64 tx_total_frames; - __le64 tx_ucast_frames; - __le64 tx_mcast_frames; - __le64 tx_bcast_frames; - __le64 tx_pause_frames; - __le64 tx_pfc_frames; - __le64 tx_jabber_frames; - __le64 tx_fcs_err_frames; - __le64 tx_control_frames; - __le64 tx_oversz_frames; - __le64 tx_single_dfrl_frames; - __le64 tx_multi_dfrl_frames; - __le64 tx_single_coll_frames; - __le64 tx_multi_coll_frames; - __le64 tx_late_coll_frames; - __le64 tx_excessive_coll_frames; - __le64 tx_frag_frames; - __le64 tx_err; - __le64 tx_tagged_frames; - __le64 tx_dbl_tagged_frames; - __le64 tx_runt_frames; - __le64 tx_fifo_underruns; - __le64 tx_pfc_ena_frames_pri0; - __le64 tx_pfc_ena_frames_pri1; - __le64 tx_pfc_ena_frames_pri2; - __le64 tx_pfc_ena_frames_pri3; - __le64 tx_pfc_ena_frames_pri4; - __le64 tx_pfc_ena_frames_pri5; - __le64 tx_pfc_ena_frames_pri6; - __le64 tx_pfc_ena_frames_pri7; - __le64 tx_eee_lpi_events; - __le64 tx_eee_lpi_duration; - __le64 tx_llfc_logical_msgs; - __le64 tx_hcfc_msgs; - __le64 tx_total_collisions; - __le64 tx_bytes; - __le64 tx_xthol_frames; - __le64 tx_stat_discard; - __le64 tx_stat_error; -}; - -/* Port Rx Statistics Formats (528 bytes) */ -struct rx_port_stats { - __le64 rx_64b_frames; - __le64 rx_65b_127b_frames; - __le64 rx_128b_255b_frames; - __le64 rx_256b_511b_frames; - __le64 rx_512b_1023b_frames; - __le64 rx_1024b_1518_frames; - __le64 rx_good_vlan_frames; - __le64 rx_1519b_2047b_frames; - __le64 rx_2048b_4095b_frames; - __le64 rx_4096b_9216b_frames; - __le64 rx_9217b_16383b_frames; - __le64 rx_total_frames; - __le64 rx_ucast_frames; - __le64 rx_mcast_frames; - __le64 rx_bcast_frames; - __le64 rx_fcs_err_frames; - __le64 rx_ctrl_frames; - __le64 rx_pause_frames; - __le64 rx_pfc_frames; - __le64 rx_unsupported_opcode_frames; - __le64 rx_unsupported_da_pausepfc_frames; - __le64 rx_wrong_sa_frames; - __le64 rx_align_err_frames; - __le64 rx_oor_len_frames; - __le64 rx_code_err_frames; - __le64 rx_false_carrier_frames; - __le64 rx_ovrsz_frames; - __le64 rx_jbr_frames; - __le64 rx_mtu_err_frames; - __le64 rx_match_crc_frames; - __le64 rx_promiscuous_frames; - __le64 rx_tagged_frames; - __le64 rx_double_tagged_frames; - __le64 rx_trunc_frames; - __le64 rx_good_frames; - __le64 rx_pfc_xon2xoff_frames_pri0; - __le64 rx_pfc_xon2xoff_frames_pri1; - __le64 rx_pfc_xon2xoff_frames_pri2; - __le64 rx_pfc_xon2xoff_frames_pri3; - __le64 rx_pfc_xon2xoff_frames_pri4; - __le64 rx_pfc_xon2xoff_frames_pri5; - __le64 rx_pfc_xon2xoff_frames_pri6; - __le64 rx_pfc_xon2xoff_frames_pri7; - __le64 rx_pfc_ena_frames_pri0; - __le64 rx_pfc_ena_frames_pri1; - __le64 rx_pfc_ena_frames_pri2; - __le64 rx_pfc_ena_frames_pri3; - __le64 rx_pfc_ena_frames_pri4; - __le64 rx_pfc_ena_frames_pri5; - __le64 rx_pfc_ena_frames_pri6; - __le64 rx_pfc_ena_frames_pri7; - __le64 rx_sch_crc_err_frames; - __le64 rx_undrsz_frames; - __le64 rx_frag_frames; - __le64 rx_eee_lpi_events; - __le64 rx_eee_lpi_duration; - __le64 rx_llfc_physical_msgs; - __le64 rx_llfc_logical_msgs; - __le64 rx_llfc_msgs_with_crc_err; - __le64 rx_hcfc_msgs; - __le64 rx_hcfc_msgs_with_crc_err; - __le64 rx_bytes; - __le64 rx_runt_bytes; - __le64 rx_runt_frames; - __le64 rx_stat_discard; - __le64 rx_stat_err; -}; - -/* VXLAN IPv4 encapsulation structure (16 bytes) */ -struct hwrm_vxlan_ipv4_hdr { - u8 ver_hlen; - #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK 0xfUL - #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0 - #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK 0xf0UL - #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4 - u8 tos; - __be16 ip_id; - __be16 flags_frag_offset; - u8 ttl; - u8 protocol; - __be32 src_ip_addr; - __be32 dest_ip_addr; -}; - -/* VXLAN IPv6 encapsulation structure (32 bytes) */ -struct hwrm_vxlan_ipv6_hdr { - __be32 ver_tc_flow_label; - #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT 0x1cUL - #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK 0xf0000000UL - #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT 0x14UL - #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK 0xff00000UL - #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT 0x0UL - #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK 0xfffffUL - __be16 payload_len; - u8 next_hdr; - u8 ttl; - __be32 src_ip_addr[4]; - __be32 dest_ip_addr[4]; -}; - -/* VXLAN encapsulation structure (72 bytes) */ -struct hwrm_cfa_encap_data_vxlan { - u8 src_mac_addr[6]; - __le16 unused_0; - u8 dst_mac_addr[6]; - u8 num_vlan_tags; - u8 unused_1; - __be16 ovlan_tpid; - __be16 ovlan_tci; - __be16 ivlan_tpid; - __be16 ivlan_tci; - __le32 l3[10]; - #define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK 0xfUL - #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 0x4UL - #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 0x6UL - __be16 src_port; - __be16 dst_port; - __be32 vni; -}; - -/* Periodic Statistics Context DMA to host (160 bytes) */ -struct ctx_hw_stats { - __le64 rx_ucast_pkts; - __le64 rx_mcast_pkts; - __le64 rx_bcast_pkts; - __le64 rx_discard_pkts; - __le64 rx_drop_pkts; - __le64 rx_ucast_bytes; - __le64 rx_mcast_bytes; - __le64 rx_bcast_bytes; - __le64 tx_ucast_pkts; - __le64 tx_mcast_pkts; - __le64 tx_bcast_pkts; - __le64 tx_discard_pkts; - __le64 tx_drop_pkts; - __le64 tx_ucast_bytes; - __le64 tx_mcast_bytes; - __le64 tx_bcast_bytes; - __le64 tpa_pkts; - __le64 tpa_bytes; - __le64 tpa_events; - __le64 tpa_aborts; -}; - -/* Structure data header (16 bytes) */ -struct hwrm_struct_hdr { - __le16 struct_id; - #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL - #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL - #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL - #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL - #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL - #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL - #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL - #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL - #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL - #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL - __le16 len; - u8 version; - u8 count; - __le16 subtype; - __le16 next_offset; - #define STRUCT_HDR_NEXT_OFFSET_LAST 0x0UL - __le16 unused_0[3]; -}; - -/* DCBX Application configuration structure (1057) (8 bytes) */ -struct hwrm_struct_data_dcbx_app { - __be16 protocol_id; - u8 protocol_selector; - #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL - #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL - #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL - #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL - u8 priority; - u8 valid; - u8 unused_0[3]; + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; }; -#endif +#endif /* _BNXT_HSI_H_ */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index c9617675f934..d87faad901fe 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -135,7 +135,10 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id, ivi->vf = vf_id; vf = &bp->pf.vf[vf_id]; - memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN); + if (is_valid_ether_addr(vf->mac_addr)) + memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN); + else + memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN); ivi->max_tx_rate = vf->max_tx_rate; ivi->min_tx_rate = vf->min_tx_rate; ivi->vlan = vf->vlan; @@ -416,29 +419,126 @@ static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } -/* only call by PF to reserve resources for VF */ +/* Only called by PF to reserve resources for VFs, returns actual number of + * VFs configured, or < 0 on error. + */ +static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) +{ + struct hwrm_func_vf_resource_cfg_input req = {0}; + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + u16 vf_tx_rings, vf_rx_rings, vf_cp_rings; + u16 vf_stat_ctx, vf_vnics, vf_ring_grps; + struct bnxt_pf_info *pf = &bp->pf; + int i, rc = 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1); + + vf_cp_rings = hw_resc->max_cp_rings - bp->cp_nr_rings; + vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; + if (bp->flags & BNXT_FLAG_AGG_RINGS) + vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2; + else + vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings; + vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings; + vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings; + vf_vnics = hw_resc->max_vnics - bp->nr_vnics; + vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); + + req.min_rsscos_ctx = cpu_to_le16(1); + req.max_rsscos_ctx = cpu_to_le16(1); + if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL) { + req.min_cmpl_rings = cpu_to_le16(1); + req.min_tx_rings = cpu_to_le16(1); + req.min_rx_rings = cpu_to_le16(1); + req.min_l2_ctxs = cpu_to_le16(1); + req.min_vnics = cpu_to_le16(1); + req.min_stat_ctx = cpu_to_le16(1); + req.min_hw_ring_grps = cpu_to_le16(1); + } else { + vf_cp_rings /= num_vfs; + vf_tx_rings /= num_vfs; + vf_rx_rings /= num_vfs; + vf_vnics /= num_vfs; + vf_stat_ctx /= num_vfs; + vf_ring_grps /= num_vfs; + + req.min_cmpl_rings = cpu_to_le16(vf_cp_rings); + req.min_tx_rings = cpu_to_le16(vf_tx_rings); + req.min_rx_rings = cpu_to_le16(vf_rx_rings); + req.min_l2_ctxs = cpu_to_le16(4); + req.min_vnics = cpu_to_le16(vf_vnics); + req.min_stat_ctx = cpu_to_le16(vf_stat_ctx); + req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps); + } + req.max_cmpl_rings = cpu_to_le16(vf_cp_rings); + req.max_tx_rings = cpu_to_le16(vf_tx_rings); + req.max_rx_rings = cpu_to_le16(vf_rx_rings); + req.max_l2_ctxs = cpu_to_le16(4); + req.max_vnics = cpu_to_le16(vf_vnics); + req.max_stat_ctx = cpu_to_le16(vf_stat_ctx); + req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps); + + mutex_lock(&bp->hwrm_cmd_lock); + for (i = 0; i < num_vfs; i++) { + req.vf_id = cpu_to_le16(pf->first_vf_id + i); + rc = _hwrm_send_message(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); + if (rc) { + rc = -ENOMEM; + break; + } + pf->active_vfs = i + 1; + pf->vf[i].fw_fid = pf->first_vf_id + i; + } + mutex_unlock(&bp->hwrm_cmd_lock); + if (pf->active_vfs) { + u16 n = 1; + + if (pf->vf_resv_strategy != BNXT_VF_RESV_STRATEGY_MINIMAL) + n = pf->active_vfs; + + hw_resc->max_tx_rings -= vf_tx_rings * n; + hw_resc->max_rx_rings -= vf_rx_rings * n; + hw_resc->max_hw_ring_grps -= vf_ring_grps * n; + hw_resc->max_cp_rings -= vf_cp_rings * n; + hw_resc->max_rsscos_ctxs -= pf->active_vfs; + hw_resc->max_stat_ctxs -= vf_stat_ctx * n; + hw_resc->max_vnics -= vf_vnics * n; + + rc = pf->active_vfs; + } + return rc; +} + +/* Only called by PF to reserve resources for VFs, returns actual number of + * VFs configured, or < 0 on error. + */ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) { u32 rc = 0, mtu, i; u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics; - u16 vf_ring_grps; + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + u16 vf_ring_grps, max_stat_ctxs; struct hwrm_func_cfg_input req = {0}; struct bnxt_pf_info *pf = &bp->pf; int total_vf_tx_rings = 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + max_stat_ctxs = hw_resc->max_stat_ctxs; + /* Remaining rings are distributed equally amongs VF's for now */ - vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs; - vf_stat_ctx = (pf->max_stat_ctxs - bp->num_stat_ctxs) / num_vfs; + vf_cp_rings = (hw_resc->max_cp_rings - bp->cp_nr_rings) / num_vfs; + vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs; if (bp->flags & BNXT_FLAG_AGG_RINGS) - vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings * 2) / + vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) / num_vfs; else - vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs; - vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs; - vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs; - vf_vnics = (pf->max_vnics - bp->nr_vnics) / num_vfs; + vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) / + num_vfs; + vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs; + vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs; + vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs; vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU | @@ -485,22 +585,34 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) total_vf_tx_rings += vf_tx_rsvd; } mutex_unlock(&bp->hwrm_cmd_lock); - if (!rc) { - pf->max_tx_rings -= total_vf_tx_rings; - pf->max_rx_rings -= vf_rx_rings * num_vfs; - pf->max_hw_ring_grps -= vf_ring_grps * num_vfs; - pf->max_cp_rings -= vf_cp_rings * num_vfs; - pf->max_rsscos_ctxs -= num_vfs; - pf->max_stat_ctxs -= vf_stat_ctx * num_vfs; - pf->max_vnics -= vf_vnics * num_vfs; + if (rc) + rc = -ENOMEM; + if (pf->active_vfs) { + hw_resc->max_tx_rings -= total_vf_tx_rings; + hw_resc->max_rx_rings -= vf_rx_rings * num_vfs; + hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs; + hw_resc->max_cp_rings -= vf_cp_rings * num_vfs; + hw_resc->max_rsscos_ctxs -= num_vfs; + hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs; + hw_resc->max_vnics -= vf_vnics * num_vfs; + rc = pf->active_vfs; } return rc; } +static int bnxt_func_cfg(struct bnxt *bp, int num_vfs) +{ + if (bp->flags & BNXT_FLAG_NEW_RM) + return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs); + else + return bnxt_hwrm_func_cfg(bp, num_vfs); +} + static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) { int rc = 0, vfs_supported; int min_rx_rings, min_tx_rings, min_rss_ctxs; + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; int tx_ok = 0, rx_ok = 0, rss_ok = 0; int avail_cp, avail_stat; @@ -510,8 +622,8 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) */ vfs_supported = *num_vfs; - avail_cp = bp->pf.max_cp_rings - bp->cp_nr_rings; - avail_stat = bp->pf.max_stat_ctxs - bp->num_stat_ctxs; + avail_cp = hw_resc->max_cp_rings - bp->cp_nr_rings; + avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; avail_cp = min_t(int, avail_cp, avail_stat); while (vfs_supported) { @@ -520,23 +632,24 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) min_rss_ctxs = vfs_supported; if (bp->flags & BNXT_FLAG_AGG_RINGS) { - if (bp->pf.max_rx_rings - bp->rx_nr_rings * 2 >= + if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >= min_rx_rings) rx_ok = 1; } else { - if (bp->pf.max_rx_rings - bp->rx_nr_rings >= + if (hw_resc->max_rx_rings - bp->rx_nr_rings >= min_rx_rings) rx_ok = 1; } - if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings || + if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings || avail_cp < min_rx_rings) rx_ok = 0; - if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings && + if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings && avail_cp >= min_tx_rings) tx_ok = 1; - if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs) + if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >= + min_rss_ctxs) rss_ok = 1; if (tx_ok && rx_ok && rss_ok) @@ -561,9 +674,16 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) goto err_out1; /* Reserve resources for VFs */ - rc = bnxt_hwrm_func_cfg(bp, *num_vfs); - if (rc) - goto err_out2; + rc = bnxt_func_cfg(bp, *num_vfs); + if (rc != *num_vfs) { + if (rc <= 0) { + netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n"); + *num_vfs = 0; + goto err_out2; + } + netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", rc); + *num_vfs = rc; + } /* Register buffers for VFs */ rc = bnxt_hwrm_func_buf_rgtr(bp); @@ -766,17 +886,51 @@ exec_fwd_resp_exit: return rc; } +static int bnxt_vf_store_mac(struct bnxt *bp, struct bnxt_vf_info *vf) +{ + u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input); + struct hwrm_func_vf_cfg_input *req = + (struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr; + + /* Only allow VF to set a valid MAC address if the PF assigned MAC + * address is zero + */ + if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) { + if (is_valid_ether_addr(req->dflt_mac_addr) && + !is_valid_ether_addr(vf->mac_addr)) { + ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr); + return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); + } + return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); + } + return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); +} + static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf) { u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input); struct hwrm_cfa_l2_filter_alloc_input *req = (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr; + bool mac_ok = false; - if (!is_valid_ether_addr(vf->mac_addr) || - ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr)) + /* VF MAC address must first match PF MAC address, if it is valid. + * Otherwise, it must match the VF MAC address if firmware spec >= + * 1.2.2 + */ + if (is_valid_ether_addr(vf->mac_addr)) { + if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr)) + mac_ok = true; + } else if (is_valid_ether_addr(vf->vf_mac_addr)) { + if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr)) + mac_ok = true; + } else if (bp->hwrm_spec_code < 0x10202) { + mac_ok = true; + } else { + mac_ok = true; + } + if (mac_ok) return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); - else - return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); + return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); } static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) @@ -838,6 +992,9 @@ static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf) u32 req_type = le16_to_cpu(encap_req->req_type); switch (req_type) { + case HWRM_FUNC_VF_CFG: + rc = bnxt_vf_store_mac(bp, vf); + break; case HWRM_CFA_L2_FILTER_ALLOC: rc = bnxt_vf_validate_set_mac(bp, vf); break; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index d8fee26cd45e..fbe6e208e17b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -43,7 +43,7 @@ static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev) } /* Is dev a VF-rep? */ - if (dev != pf_bp->dev) + if (bnxt_dev_is_vf_rep(dev)) return bnxt_vf_rep_get_fid(dev); bp = netdev_priv(dev); @@ -54,12 +54,10 @@ static int bnxt_tc_parse_redir(struct bnxt *bp, struct bnxt_tc_actions *actions, const struct tc_action *tc_act) { - int ifindex = tcf_mirred_ifindex(tc_act); - struct net_device *dev; + struct net_device *dev = tcf_mirred_dev(tc_act); - dev = __dev_get_by_index(dev_net(bp->dev), ifindex); if (!dev) { - netdev_info(bp->dev, "no dev for ifindex=%d", ifindex); + netdev_info(bp->dev, "no dev in mirred action"); return -EINVAL; } @@ -148,9 +146,6 @@ static int bnxt_tc_parse_actions(struct bnxt *bp, } } - if (rc) - return rc; - if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) { if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) { /* dst_fid is PF's fid */ @@ -164,7 +159,7 @@ static int bnxt_tc_parse_actions(struct bnxt *bp, } } - return rc; + return 0; } #define GET_KEY(flow_cmd, key_type) \ @@ -1417,11 +1412,7 @@ bnxt_tc_flow_stats_batch_prep(struct bnxt *bp, void *flow_node; int rc, i; - rc = rhashtable_walk_start(iter); - if (rc && rc != -EAGAIN) { - i = 0; - goto done; - } + rhashtable_walk_start(iter); rc = 0; for (i = 0; i < BNXT_FLOW_STATS_BATCH_MAX; i++) { @@ -1483,9 +1474,6 @@ int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, { int rc = 0; - if (cls_flower->common.chain_index) - return -EOPNOTSUPP; - switch (cls_flower->command) { case TC_CLSFLOWER_REPLACE: rc = bnxt_tc_add_flow(bp, src_fid, cls_flower); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index 69186d188c43..26290403f38f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -124,7 +124,8 @@ static int bnxt_vf_rep_setup_tc_block_cb(enum tc_setup_type type, struct bnxt *bp = vf_rep->bp; int vf_fid = bp->pf.vf[vf_rep->vf_idx].fw_fid; - if (!bnxt_tc_flower_enabled(vf_rep->bp) || !tc_can_offload(bp->dev)) + if (!bnxt_tc_flower_enabled(vf_rep->bp) || + !tc_cls_can_offload_and_chain0(bp->dev, type_data)) return -EOPNOTSUPP; switch (type) { @@ -241,6 +242,11 @@ static const struct net_device_ops bnxt_vf_rep_netdev_ops = { .ndo_get_phys_port_name = bnxt_vf_rep_get_phys_port_name }; +bool bnxt_dev_is_vf_rep(struct net_device *dev) +{ + return dev->netdev_ops == &bnxt_vf_rep_netdev_ops; +} + /* Called when the parent PF interface is closed: * As the mode transition from SWITCHDEV to LEGACY * happens under the rtnl_lock() this routine is safe @@ -376,6 +382,26 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, ether_addr_copy(dev->dev_addr, dev->perm_addr); } +static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) +{ + struct pci_dev *pdev = bp->pdev; + int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); + u32 dw; + + if (!pos) { + netdev_info(bp->dev, "Unable do read adapter's DSN"); + return -EOPNOTSUPP; + } + + /* DSN (two dw) is at an offset of 4 from the cap pos */ + pos += 4; + pci_read_config_dword(pdev, pos, &dw); + put_unaligned_le32(dw, &dsn[0]); + pci_read_config_dword(pdev, pos + 4, &dw); + put_unaligned_le32(dw, &dsn[4]); + return 0; +} + static int bnxt_vf_reps_create(struct bnxt *bp) { u16 *cfa_code_map = NULL, num_vfs = pci_num_vf(bp->pdev); @@ -440,6 +466,11 @@ static int bnxt_vf_reps_create(struct bnxt *bp) } } + /* Read the adapter's DSN to use as the eswitch switch_id */ + rc = bnxt_pcie_dsn_get(bp, bp->switch_id); + if (rc) + goto err; + /* publish cfa_code_map only after all VF-reps have been initialized */ bp->cfa_code_map = cfa_code_map; bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h index fb06bbe70e42..38b9a75ad724 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h @@ -28,6 +28,7 @@ static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev) return bp->pf.vf[vf_rep->vf_idx].fw_fid; } +bool bnxt_dev_is_vf_rep(struct net_device *dev); int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode); int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode); @@ -54,5 +55,10 @@ static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev) { return 0; } + +static inline bool bnxt_dev_is_vf_rep(struct net_device *dev) +{ + return false; +} #endif /* CONFIG_BNXT_SRIOV */ #endif /* BNXT_VFR_H */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 261e5847557a..1389ab5e05df 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -96,6 +96,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, xdp.data = *data_ptr; xdp_set_data_meta_invalid(&xdp); xdp.data_end = *data_ptr + *len; + xdp.rxq = &rxr->xdp_rxq; orig_data = xdp.data; mapping = rx_buf->mapping - bp->rx_dma_offset; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 24b4f4ceceef..b1e35a9accf1 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2527,9 +2527,10 @@ static void bcmgenet_irq_task(struct work_struct *work) spin_unlock_irq(&priv->lock); /* Link UP/DOWN event */ - if (status & UMAC_IRQ_LINK_EVENT) - phy_mac_interrupt(priv->dev->phydev, - !!(status & UMAC_IRQ_LINK_UP)); + if (status & UMAC_IRQ_LINK_EVENT) { + priv->dev->phydev->link = !!(status & UMAC_IRQ_LINK_UP); + phy_mac_interrupt(priv->dev->phydev); + } } /* bcmgenet_isr1: handle Rx and Tx priority queues */ diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 8995cfefbfcf..a77ee2f8fb8d 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -3227,7 +3227,7 @@ static int tg3_nvram_read_using_eeprom(struct tg3 *tp, return 0; } -#define NVRAM_CMD_TIMEOUT 5000 +#define NVRAM_CMD_TIMEOUT 10000 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) { @@ -14789,7 +14789,7 @@ static void tg3_get_5717_nvram_info(struct tg3 *tp) static void tg3_get_5720_nvram_info(struct tg3 *tp) { - u32 nvcfg1, nvmpinstrp; + u32 nvcfg1, nvmpinstrp, nv_status; nvcfg1 = tr32(NVRAM_CFG1); nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK; @@ -14801,6 +14801,23 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp) } switch (nvmpinstrp) { + case FLASH_5762_MX25L_100: + case FLASH_5762_MX25L_200: + case FLASH_5762_MX25L_400: + case FLASH_5762_MX25L_800: + case FLASH_5762_MX25L_160_320: + tp->nvram_pagesize = 4096; + tp->nvram_jedecnum = JEDEC_MACRONIX; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); + tg3_flag_set(tp, FLASH); + nv_status = tr32(NVRAM_AUTOSENSE_STATUS); + tp->nvram_size = + (1 << (nv_status >> AUTOSENSE_DEVID & + AUTOSENSE_DEVID_MASK) + << AUTOSENSE_SIZE_IN_MB); + return; + case FLASH_5762_EEPROM_HD: nvmpinstrp = FLASH_5720_EEPROM_HD; break; diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 1f0271fa7c74..47f51cc0566d 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -1863,7 +1863,7 @@ #define NVRAM_STAT 0x00007004 #define NVRAM_WRDATA 0x00007008 #define NVRAM_ADDR 0x0000700c -#define NVRAM_ADDR_MSK 0x00ffffff +#define NVRAM_ADDR_MSK 0x07ffffff #define NVRAM_RDDATA 0x00007010 #define NVRAM_CFG1 0x00007014 #define NVRAM_CFG1_FLASHIF_ENAB 0x00000001 @@ -1945,6 +1945,11 @@ #define FLASH_5720_EEPROM_LD 0x00000003 #define FLASH_5762_EEPROM_HD 0x02000001 #define FLASH_5762_EEPROM_LD 0x02000003 +#define FLASH_5762_MX25L_100 0x00800000 +#define FLASH_5762_MX25L_200 0x00800002 +#define FLASH_5762_MX25L_400 0x00800001 +#define FLASH_5762_MX25L_800 0x00800003 +#define FLASH_5762_MX25L_160_320 0x03800002 #define FLASH_5720VENDOR_M_ATMEL_DB011D 0x01000000 #define FLASH_5720VENDOR_M_ATMEL_DB021D 0x01000002 #define FLASH_5720VENDOR_M_ATMEL_DB041D 0x01000001 @@ -2009,7 +2014,11 @@ /* 0x702c unused */ #define NVRAM_ADDR_LOCKOUT 0x00007030 -/* 0x7034 --> 0x7500 unused */ +#define NVRAM_AUTOSENSE_STATUS 0x00007038 +#define AUTOSENSE_DEVID 0x00000010 +#define AUTOSENSE_DEVID_MASK 0x00000007 +#define AUTOSENSE_SIZE_IN_MB 17 +/* 0x703c --> 0x7500 unused */ #define OTP_MODE 0x00007500 #define OTP_MODE_OTP_THRU_GRC 0x00000001 @@ -3378,6 +3387,7 @@ struct tg3 { #define JEDEC_ST 0x20 #define JEDEC_SAIFUN 0x4f #define JEDEC_SST 0xbf +#define JEDEC_MACRONIX 0xc2 #define ATMEL_AT24C02_CHIP_SIZE TG3_NVRAM_SIZE_2KB #define ATMEL_AT24C02_PAGE_SIZE (8) diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index c93f3a2dc6c1..86659823b259 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -13,6 +13,7 @@ #include <linux/phy.h> #include <linux/ptp_clock_kernel.h> #include <linux/net_tstamp.h> +#include <linux/interrupt.h> #if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) || defined(CONFIG_MACB_USE_HWSTAMP) #define MACB_EXT_DESC @@ -164,14 +165,38 @@ #define GEM_DCFG5 0x0290 /* Design Config 5 */ #define GEM_DCFG6 0x0294 /* Design Config 6 */ #define GEM_DCFG7 0x0298 /* Design Config 7 */ +#define GEM_DCFG8 0x029C /* Design Config 8 */ #define GEM_TXBDCTRL 0x04cc /* TX Buffer Descriptor control register */ #define GEM_RXBDCTRL 0x04d0 /* RX Buffer Descriptor control register */ +/* Screener Type 2 match registers */ +#define GEM_SCRT2 0x540 + +/* EtherType registers */ +#define GEM_ETHT 0x06E0 + +/* Type 2 compare registers */ +#define GEM_T2CMPW0 0x0700 +#define GEM_T2CMPW1 0x0704 +#define T2CMP_OFST(t2idx) (t2idx * 2) + +/* type 2 compare registers + * each location requires 3 compare regs + */ +#define GEM_IP4SRC_CMP(idx) (idx * 3) +#define GEM_IP4DST_CMP(idx) (idx * 3 + 1) +#define GEM_PORT_CMP(idx) (idx * 3 + 2) + +/* Which screening type 2 EtherType register will be used (0 - 7) */ +#define SCRT2_ETHT 0 + #define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2)) #define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2)) #define GEM_TBQPH(hw_q) (0x04C8) #define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2)) +#define GEM_RBQS(hw_q) (0x04A0 + ((hw_q) << 2)) +#define GEM_RBQPH(hw_q) (0x04D4) #define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2)) #define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2)) #define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2)) @@ -455,6 +480,16 @@ #define GEM_DAW64_OFFSET 23 #define GEM_DAW64_SIZE 1 +/* Bitfields in DCFG8. */ +#define GEM_T1SCR_OFFSET 24 +#define GEM_T1SCR_SIZE 8 +#define GEM_T2SCR_OFFSET 16 +#define GEM_T2SCR_SIZE 8 +#define GEM_SCR2ETH_OFFSET 8 +#define GEM_SCR2ETH_SIZE 8 +#define GEM_SCR2CMP_OFFSET 0 +#define GEM_SCR2CMP_SIZE 8 + /* Bitfields in TISUBN */ #define GEM_SUBNSINCR_OFFSET 0 #define GEM_SUBNSINCR_SIZE 16 @@ -483,6 +518,66 @@ #define GEM_RXTSMODE_OFFSET 4 /* RX Descriptor Timestamp Insertion mode */ #define GEM_RXTSMODE_SIZE 2 +/* Bitfields in SCRT2 */ +#define GEM_QUEUE_OFFSET 0 /* Queue Number */ +#define GEM_QUEUE_SIZE 4 +#define GEM_VLANPR_OFFSET 4 /* VLAN Priority */ +#define GEM_VLANPR_SIZE 3 +#define GEM_VLANEN_OFFSET 8 /* VLAN Enable */ +#define GEM_VLANEN_SIZE 1 +#define GEM_ETHT2IDX_OFFSET 9 /* Index to screener type 2 EtherType register */ +#define GEM_ETHT2IDX_SIZE 3 +#define GEM_ETHTEN_OFFSET 12 /* EtherType Enable */ +#define GEM_ETHTEN_SIZE 1 +#define GEM_CMPA_OFFSET 13 /* Compare A - Index to screener type 2 Compare register */ +#define GEM_CMPA_SIZE 5 +#define GEM_CMPAEN_OFFSET 18 /* Compare A Enable */ +#define GEM_CMPAEN_SIZE 1 +#define GEM_CMPB_OFFSET 19 /* Compare B - Index to screener type 2 Compare register */ +#define GEM_CMPB_SIZE 5 +#define GEM_CMPBEN_OFFSET 24 /* Compare B Enable */ +#define GEM_CMPBEN_SIZE 1 +#define GEM_CMPC_OFFSET 25 /* Compare C - Index to screener type 2 Compare register */ +#define GEM_CMPC_SIZE 5 +#define GEM_CMPCEN_OFFSET 30 /* Compare C Enable */ +#define GEM_CMPCEN_SIZE 1 + +/* Bitfields in ETHT */ +#define GEM_ETHTCMP_OFFSET 0 /* EtherType compare value */ +#define GEM_ETHTCMP_SIZE 16 + +/* Bitfields in T2CMPW0 */ +#define GEM_T2CMP_OFFSET 16 /* 0xFFFF0000 compare value */ +#define GEM_T2CMP_SIZE 16 +#define GEM_T2MASK_OFFSET 0 /* 0x0000FFFF compare value or mask */ +#define GEM_T2MASK_SIZE 16 + +/* Bitfields in T2CMPW1 */ +#define GEM_T2DISMSK_OFFSET 9 /* disable mask */ +#define GEM_T2DISMSK_SIZE 1 +#define GEM_T2CMPOFST_OFFSET 7 /* compare offset */ +#define GEM_T2CMPOFST_SIZE 2 +#define GEM_T2OFST_OFFSET 0 /* offset value */ +#define GEM_T2OFST_SIZE 7 + +/* Offset for screener type 2 compare values (T2CMPOFST). + * Note the offset is applied after the specified point, + * e.g. GEM_T2COMPOFST_ETYPE denotes the EtherType field, so an offset + * of 12 bytes from this would be the source IP address in an IP header + */ +#define GEM_T2COMPOFST_SOF 0 +#define GEM_T2COMPOFST_ETYPE 1 +#define GEM_T2COMPOFST_IPHDR 2 +#define GEM_T2COMPOFST_TCPUDP 3 + +/* offset from EtherType to IP address */ +#define ETYPE_SRCIP_OFFSET 12 +#define ETYPE_DSTIP_OFFSET 16 + +/* offset from IP header to port */ +#define IPHDR_SRCPORT_OFFSET 0 +#define IPHDR_DSTPORT_OFFSET 2 + /* Transmit DMA buffer descriptor Word 1 */ #define GEM_DMA_TXVALID_OFFSET 23 /* timestamp has been captured in the Buffer Descriptor */ #define GEM_DMA_TXVALID_SIZE 1 @@ -583,6 +678,8 @@ #define gem_writel(port, reg, value) (port)->macb_reg_writel((port), GEM_##reg, (value)) #define queue_readl(queue, reg) (queue)->bp->macb_reg_readl((queue)->bp, (queue)->reg) #define queue_writel(queue, reg, value) (queue)->bp->macb_reg_writel((queue)->bp, (queue)->reg, (value)) +#define gem_readl_n(port, reg, idx) (port)->macb_reg_readl((port), GEM_##reg + idx * 4) +#define gem_writel_n(port, reg, idx, value) (port)->macb_reg_writel((port), GEM_##reg + idx * 4, (value)) #define PTP_TS_BUFFER_SIZE 128 /* must be power of 2 */ @@ -920,13 +1017,42 @@ static const struct gem_statistic gem_statistics[] = { #define GEM_STATS_LEN ARRAY_SIZE(gem_statistics) +#define QUEUE_STAT_TITLE(title) { \ + .stat_string = title, \ +} + +/* per queue statistics, each should be unsigned long type */ +struct queue_stats { + union { + unsigned long first; + unsigned long rx_packets; + }; + unsigned long rx_bytes; + unsigned long rx_dropped; + unsigned long tx_packets; + unsigned long tx_bytes; + unsigned long tx_dropped; +}; + +static const struct gem_statistic queue_statistics[] = { + QUEUE_STAT_TITLE("rx_packets"), + QUEUE_STAT_TITLE("rx_bytes"), + QUEUE_STAT_TITLE("rx_dropped"), + QUEUE_STAT_TITLE("tx_packets"), + QUEUE_STAT_TITLE("tx_bytes"), + QUEUE_STAT_TITLE("tx_dropped"), +}; + +#define QUEUE_STATS_LEN ARRAY_SIZE(queue_statistics) + struct macb; +struct macb_queue; struct macb_or_gem_ops { int (*mog_alloc_rx_buffers)(struct macb *bp); void (*mog_free_rx_buffers)(struct macb *bp); void (*mog_init_rings)(struct macb *bp); - int (*mog_rx)(struct macb *bp, int budget); + int (*mog_rx)(struct macb_queue *queue, int budget); }; /* MACB-PTP interface: adapt to platform needs. */ @@ -968,6 +1094,9 @@ struct macb_queue { unsigned int IMR; unsigned int TBQP; unsigned int TBQPH; + unsigned int RBQS; + unsigned int RBQP; + unsigned int RBQPH; unsigned int tx_head, tx_tail; struct macb_dma_desc *tx_ring; @@ -975,6 +1104,16 @@ struct macb_queue { dma_addr_t tx_ring_dma; struct work_struct tx_error_task; + dma_addr_t rx_ring_dma; + dma_addr_t rx_buffers_dma; + unsigned int rx_tail; + unsigned int rx_prepared_head; + struct macb_dma_desc *rx_ring; + struct sk_buff **rx_skbuff; + void *rx_buffers; + struct napi_struct napi; + struct queue_stats stats; + #ifdef CONFIG_MACB_USE_HWSTAMP struct work_struct tx_ts_task; unsigned int tx_ts_head, tx_ts_tail; @@ -982,6 +1121,16 @@ struct macb_queue { #endif }; +struct ethtool_rx_fs_item { + struct ethtool_rx_flow_spec fs; + struct list_head list; +}; + +struct ethtool_rx_fs_list { + struct list_head list; + unsigned int count; +}; + struct macb { void __iomem *regs; bool native_io; @@ -990,11 +1139,6 @@ struct macb { u32 (*macb_reg_readl)(struct macb *bp, int offset); void (*macb_reg_writel)(struct macb *bp, int offset, u32 value); - unsigned int rx_tail; - unsigned int rx_prepared_head; - struct macb_dma_desc *rx_ring; - struct sk_buff **rx_skbuff; - void *rx_buffers; size_t rx_buffer_size; unsigned int rx_ring_size; @@ -1011,15 +1155,11 @@ struct macb { struct clk *tx_clk; struct clk *rx_clk; struct net_device *dev; - struct napi_struct napi; union { struct macb_stats macb; struct gem_stats gem; } hw_stats; - dma_addr_t rx_ring_dma; - dma_addr_t rx_buffers_dma; - struct macb_or_gem_ops macbgem_ops; struct mii_bus *mii_bus; @@ -1032,7 +1172,6 @@ struct macb { unsigned int dma_burst_length; phy_interface_t phy_interface; - struct gpio_desc *reset_gpio; /* AT91RM9200 transmit */ struct sk_buff *skb; /* holds skb until xmit interrupt completes */ @@ -1040,7 +1179,7 @@ struct macb { int skb_length; /* saved skb length for pci_unmap_single */ unsigned int max_tx_length; - u64 ethtool_stats[GEM_STATS_LEN]; + u64 ethtool_stats[GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES]; unsigned int rx_frm_len_mask; unsigned int jumbo_max_len; @@ -1057,6 +1196,13 @@ struct macb { struct ptp_clock_info ptp_clock_info; struct tsu_incr tsu_incr; struct hwtstamp_config tstamp_config; + + /* RX queue filer rule set*/ + struct ethtool_rx_fs_list rx_fs_list; + spinlock_t rx_fs_lock; + unsigned int max_tuples; + + struct tasklet_struct hresp_err_tasklet; }; #ifdef CONFIG_MACB_USE_HWSTAMP diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 72a67f74b97b..e84afcf1ecb5 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -194,17 +194,17 @@ static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index) return index & (bp->rx_ring_size - 1); } -static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index) +static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index) { - index = macb_rx_ring_wrap(bp, index); - index = macb_adj_dma_desc_idx(bp, index); - return &bp->rx_ring[index]; + index = macb_rx_ring_wrap(queue->bp, index); + index = macb_adj_dma_desc_idx(queue->bp, index); + return &queue->rx_ring[index]; } -static void *macb_rx_buffer(struct macb *bp, unsigned int index) +static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index) { - return bp->rx_buffers + bp->rx_buffer_size * - macb_rx_ring_wrap(bp, index); + return queue->rx_buffers + queue->bp->rx_buffer_size * + macb_rx_ring_wrap(queue->bp, index); } /* I/O accessors */ @@ -759,7 +759,9 @@ static void macb_tx_error_task(struct work_struct *work) macb_tx_ring_wrap(bp, tail), skb->data); bp->dev->stats.tx_packets++; + queue->stats.tx_packets++; bp->dev->stats.tx_bytes += skb->len; + queue->stats.tx_bytes += skb->len; } } else { /* "Buffers exhausted mid-frame" errors may only happen @@ -859,7 +861,9 @@ static void macb_tx_interrupt(struct macb_queue *queue) macb_tx_ring_wrap(bp, tail), skb->data); bp->dev->stats.tx_packets++; + queue->stats.tx_packets++; bp->dev->stats.tx_bytes += skb->len; + queue->stats.tx_bytes += skb->len; } /* Now we can safely release resources */ @@ -881,24 +885,25 @@ static void macb_tx_interrupt(struct macb_queue *queue) netif_wake_subqueue(bp->dev, queue_index); } -static void gem_rx_refill(struct macb *bp) +static void gem_rx_refill(struct macb_queue *queue) { unsigned int entry; struct sk_buff *skb; dma_addr_t paddr; + struct macb *bp = queue->bp; struct macb_dma_desc *desc; - while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, - bp->rx_ring_size) > 0) { - entry = macb_rx_ring_wrap(bp, bp->rx_prepared_head); + while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail, + bp->rx_ring_size) > 0) { + entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head); /* Make hw descriptor updates visible to CPU */ rmb(); - bp->rx_prepared_head++; - desc = macb_rx_desc(bp, entry); + queue->rx_prepared_head++; + desc = macb_rx_desc(queue, entry); - if (!bp->rx_skbuff[entry]) { + if (!queue->rx_skbuff[entry]) { /* allocate sk_buff for this free entry in ring */ skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); if (unlikely(!skb)) { @@ -916,7 +921,7 @@ static void gem_rx_refill(struct macb *bp) break; } - bp->rx_skbuff[entry] = skb; + queue->rx_skbuff[entry] = skb; if (entry == bp->rx_ring_size - 1) paddr |= MACB_BIT(RX_WRAP); @@ -934,18 +939,18 @@ static void gem_rx_refill(struct macb *bp) /* Make descriptor updates visible to hardware */ wmb(); - netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n", - bp->rx_prepared_head, bp->rx_tail); + netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n", + queue, queue->rx_prepared_head, queue->rx_tail); } /* Mark DMA descriptors from begin up to and not including end as unused */ -static void discard_partial_frame(struct macb *bp, unsigned int begin, +static void discard_partial_frame(struct macb_queue *queue, unsigned int begin, unsigned int end) { unsigned int frag; for (frag = begin; frag != end; frag++) { - struct macb_dma_desc *desc = macb_rx_desc(bp, frag); + struct macb_dma_desc *desc = macb_rx_desc(queue, frag); desc->addr &= ~MACB_BIT(RX_USED); } @@ -959,8 +964,9 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin, */ } -static int gem_rx(struct macb *bp, int budget) +static int gem_rx(struct macb_queue *queue, int budget) { + struct macb *bp = queue->bp; unsigned int len; unsigned int entry; struct sk_buff *skb; @@ -972,8 +978,8 @@ static int gem_rx(struct macb *bp, int budget) dma_addr_t addr; bool rxused; - entry = macb_rx_ring_wrap(bp, bp->rx_tail); - desc = macb_rx_desc(bp, entry); + entry = macb_rx_ring_wrap(bp, queue->rx_tail); + desc = macb_rx_desc(queue, entry); /* Make hw descriptor updates visible to CPU */ rmb(); @@ -985,24 +991,26 @@ static int gem_rx(struct macb *bp, int budget) if (!rxused) break; - bp->rx_tail++; + queue->rx_tail++; count++; if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) { netdev_err(bp->dev, "not whole frame pointed by descriptor\n"); bp->dev->stats.rx_dropped++; + queue->stats.rx_dropped++; break; } - skb = bp->rx_skbuff[entry]; + skb = queue->rx_skbuff[entry]; if (unlikely(!skb)) { netdev_err(bp->dev, "inconsistent Rx descriptor chain\n"); bp->dev->stats.rx_dropped++; + queue->stats.rx_dropped++; break; } /* now everything is ready for receiving packet */ - bp->rx_skbuff[entry] = NULL; + queue->rx_skbuff[entry] = NULL; len = ctrl & bp->rx_frm_len_mask; netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); @@ -1019,7 +1027,9 @@ static int gem_rx(struct macb *bp, int budget) skb->ip_summed = CHECKSUM_UNNECESSARY; bp->dev->stats.rx_packets++; + queue->stats.rx_packets++; bp->dev->stats.rx_bytes += skb->len; + queue->stats.rx_bytes += skb->len; gem_ptp_do_rxstamp(bp, skb, desc); @@ -1035,12 +1045,12 @@ static int gem_rx(struct macb *bp, int budget) netif_receive_skb(skb); } - gem_rx_refill(bp); + gem_rx_refill(queue); return count; } -static int macb_rx_frame(struct macb *bp, unsigned int first_frag, +static int macb_rx_frame(struct macb_queue *queue, unsigned int first_frag, unsigned int last_frag) { unsigned int len; @@ -1048,8 +1058,9 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, unsigned int offset; struct sk_buff *skb; struct macb_dma_desc *desc; + struct macb *bp = queue->bp; - desc = macb_rx_desc(bp, last_frag); + desc = macb_rx_desc(queue, last_frag); len = desc->ctrl & bp->rx_frm_len_mask; netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", @@ -1068,7 +1079,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, if (!skb) { bp->dev->stats.rx_dropped++; for (frag = first_frag; ; frag++) { - desc = macb_rx_desc(bp, frag); + desc = macb_rx_desc(queue, frag); desc->addr &= ~MACB_BIT(RX_USED); if (frag == last_frag) break; @@ -1096,10 +1107,10 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, frag_len = len - offset; } skb_copy_to_linear_data_offset(skb, offset, - macb_rx_buffer(bp, frag), + macb_rx_buffer(queue, frag), frag_len); offset += bp->rx_buffer_size; - desc = macb_rx_desc(bp, frag); + desc = macb_rx_desc(queue, frag); desc->addr &= ~MACB_BIT(RX_USED); if (frag == last_frag) @@ -1121,32 +1132,34 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, return 0; } -static inline void macb_init_rx_ring(struct macb *bp) +static inline void macb_init_rx_ring(struct macb_queue *queue) { + struct macb *bp = queue->bp; dma_addr_t addr; struct macb_dma_desc *desc = NULL; int i; - addr = bp->rx_buffers_dma; + addr = queue->rx_buffers_dma; for (i = 0; i < bp->rx_ring_size; i++) { - desc = macb_rx_desc(bp, i); + desc = macb_rx_desc(queue, i); macb_set_addr(bp, desc, addr); desc->ctrl = 0; addr += bp->rx_buffer_size; } desc->addr |= MACB_BIT(RX_WRAP); - bp->rx_tail = 0; + queue->rx_tail = 0; } -static int macb_rx(struct macb *bp, int budget) +static int macb_rx(struct macb_queue *queue, int budget) { + struct macb *bp = queue->bp; bool reset_rx_queue = false; int received = 0; unsigned int tail; int first_frag = -1; - for (tail = bp->rx_tail; budget > 0; tail++) { - struct macb_dma_desc *desc = macb_rx_desc(bp, tail); + for (tail = queue->rx_tail; budget > 0; tail++) { + struct macb_dma_desc *desc = macb_rx_desc(queue, tail); u32 ctrl; /* Make hw descriptor updates visible to CPU */ @@ -1159,7 +1172,7 @@ static int macb_rx(struct macb *bp, int budget) if (ctrl & MACB_BIT(RX_SOF)) { if (first_frag != -1) - discard_partial_frame(bp, first_frag, tail); + discard_partial_frame(queue, first_frag, tail); first_frag = tail; } @@ -1171,7 +1184,7 @@ static int macb_rx(struct macb *bp, int budget) continue; } - dropped = macb_rx_frame(bp, first_frag, tail); + dropped = macb_rx_frame(queue, first_frag, tail); first_frag = -1; if (unlikely(dropped < 0)) { reset_rx_queue = true; @@ -1195,8 +1208,8 @@ static int macb_rx(struct macb *bp, int budget) ctrl = macb_readl(bp, NCR); macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); - macb_init_rx_ring(bp); - macb_writel(bp, RBQP, bp->rx_ring_dma); + macb_init_rx_ring(queue); + queue_writel(queue, RBQP, queue->rx_ring_dma); macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); @@ -1205,16 +1218,17 @@ static int macb_rx(struct macb *bp, int budget) } if (first_frag != -1) - bp->rx_tail = first_frag; + queue->rx_tail = first_frag; else - bp->rx_tail = tail; + queue->rx_tail = tail; return received; } static int macb_poll(struct napi_struct *napi, int budget) { - struct macb *bp = container_of(napi, struct macb, napi); + struct macb_queue *queue = container_of(napi, struct macb_queue, napi); + struct macb *bp = queue->bp; int work_done; u32 status; @@ -1224,7 +1238,7 @@ static int macb_poll(struct napi_struct *napi, int budget) netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", (unsigned long)status, budget); - work_done = bp->macbgem_ops.mog_rx(bp, budget); + work_done = bp->macbgem_ops.mog_rx(queue, budget); if (work_done < budget) { napi_complete_done(napi, work_done); @@ -1232,10 +1246,10 @@ static int macb_poll(struct napi_struct *napi, int budget) status = macb_readl(bp, RSR); if (status) { if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) - macb_writel(bp, ISR, MACB_BIT(RCOMP)); + queue_writel(queue, ISR, MACB_BIT(RCOMP)); napi_reschedule(napi); } else { - macb_writel(bp, IER, MACB_RX_INT_FLAGS); + queue_writel(queue, IER, MACB_RX_INT_FLAGS); } } @@ -1244,6 +1258,57 @@ static int macb_poll(struct napi_struct *napi, int budget) return work_done; } +static void macb_hresp_error_task(unsigned long data) +{ + struct macb *bp = (struct macb *)data; + struct net_device *dev = bp->dev; + struct macb_queue *queue = bp->queues; + unsigned int q; + u32 ctrl; + + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { + queue_writel(queue, IDR, MACB_RX_INT_FLAGS | + MACB_TX_INT_FLAGS | + MACB_BIT(HRESP)); + } + ctrl = macb_readl(bp, NCR); + ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE)); + macb_writel(bp, NCR, ctrl); + + netif_tx_stop_all_queues(dev); + netif_carrier_off(dev); + + bp->macbgem_ops.mog_init_rings(bp); + + /* Initialize TX and RX buffers */ + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { + queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + if (bp->hw_dma_cap & HW_DMA_CAP_64B) + queue_writel(queue, RBQPH, + upper_32_bits(queue->rx_ring_dma)); +#endif + queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + if (bp->hw_dma_cap & HW_DMA_CAP_64B) + queue_writel(queue, TBQPH, + upper_32_bits(queue->tx_ring_dma)); +#endif + + /* Enable interrupts */ + queue_writel(queue, IER, + MACB_RX_INT_FLAGS | + MACB_TX_INT_FLAGS | + MACB_BIT(HRESP)); + } + + ctrl |= MACB_BIT(RE) | MACB_BIT(TE); + macb_writel(bp, NCR, ctrl); + + netif_carrier_on(dev); + netif_tx_start_all_queues(dev); +} + static irqreturn_t macb_interrupt(int irq, void *dev_id) { struct macb_queue *queue = dev_id; @@ -1282,9 +1347,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) queue_writel(queue, ISR, MACB_BIT(RCOMP)); - if (napi_schedule_prep(&bp->napi)) { + if (napi_schedule_prep(&queue->napi)) { netdev_vdbg(bp->dev, "scheduling RX softirq\n"); - __napi_schedule(&bp->napi); + __napi_schedule(&queue->napi); } } @@ -1333,10 +1398,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) } if (status & MACB_BIT(HRESP)) { - /* TODO: Reset the hardware, and maybe move the - * netdev_err to a lower-priority context as well - * (work queue?) - */ + tasklet_schedule(&bp->hresp_err_tasklet); netdev_err(dev, "DMA bus error: HRESP not OK\n"); if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) @@ -1708,38 +1770,44 @@ static void gem_free_rx_buffers(struct macb *bp) { struct sk_buff *skb; struct macb_dma_desc *desc; + struct macb_queue *queue; dma_addr_t addr; + unsigned int q; int i; - if (!bp->rx_skbuff) - return; + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { + if (!queue->rx_skbuff) + continue; - for (i = 0; i < bp->rx_ring_size; i++) { - skb = bp->rx_skbuff[i]; + for (i = 0; i < bp->rx_ring_size; i++) { + skb = queue->rx_skbuff[i]; - if (!skb) - continue; + if (!skb) + continue; - desc = macb_rx_desc(bp, i); - addr = macb_get_addr(bp, desc); + desc = macb_rx_desc(queue, i); + addr = macb_get_addr(bp, desc); - dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, - DMA_FROM_DEVICE); - dev_kfree_skb_any(skb); - skb = NULL; - } + dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + skb = NULL; + } - kfree(bp->rx_skbuff); - bp->rx_skbuff = NULL; + kfree(queue->rx_skbuff); + queue->rx_skbuff = NULL; + } } static void macb_free_rx_buffers(struct macb *bp) { - if (bp->rx_buffers) { + struct macb_queue *queue = &bp->queues[0]; + + if (queue->rx_buffers) { dma_free_coherent(&bp->pdev->dev, bp->rx_ring_size * bp->rx_buffer_size, - bp->rx_buffers, bp->rx_buffers_dma); - bp->rx_buffers = NULL; + queue->rx_buffers, queue->rx_buffers_dma); + queue->rx_buffers = NULL; } } @@ -1748,11 +1816,12 @@ static void macb_free_consistent(struct macb *bp) struct macb_queue *queue; unsigned int q; + queue = &bp->queues[0]; bp->macbgem_ops.mog_free_rx_buffers(bp); - if (bp->rx_ring) { + if (queue->rx_ring) { dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp), - bp->rx_ring, bp->rx_ring_dma); - bp->rx_ring = NULL; + queue->rx_ring, queue->rx_ring_dma); + queue->rx_ring = NULL; } for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { @@ -1768,32 +1837,37 @@ static void macb_free_consistent(struct macb *bp) static int gem_alloc_rx_buffers(struct macb *bp) { + struct macb_queue *queue; + unsigned int q; int size; - size = bp->rx_ring_size * sizeof(struct sk_buff *); - bp->rx_skbuff = kzalloc(size, GFP_KERNEL); - if (!bp->rx_skbuff) - return -ENOMEM; - else - netdev_dbg(bp->dev, - "Allocated %d RX struct sk_buff entries at %p\n", - bp->rx_ring_size, bp->rx_skbuff); + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { + size = bp->rx_ring_size * sizeof(struct sk_buff *); + queue->rx_skbuff = kzalloc(size, GFP_KERNEL); + if (!queue->rx_skbuff) + return -ENOMEM; + else + netdev_dbg(bp->dev, + "Allocated %d RX struct sk_buff entries at %p\n", + bp->rx_ring_size, queue->rx_skbuff); + } return 0; } static int macb_alloc_rx_buffers(struct macb *bp) { + struct macb_queue *queue = &bp->queues[0]; int size; size = bp->rx_ring_size * bp->rx_buffer_size; - bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, - &bp->rx_buffers_dma, GFP_KERNEL); - if (!bp->rx_buffers) + queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, + &queue->rx_buffers_dma, GFP_KERNEL); + if (!queue->rx_buffers) return -ENOMEM; netdev_dbg(bp->dev, "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", - size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); + size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers); return 0; } @@ -1819,17 +1893,16 @@ static int macb_alloc_consistent(struct macb *bp) queue->tx_skb = kmalloc(size, GFP_KERNEL); if (!queue->tx_skb) goto out_err; - } - - size = RX_RING_BYTES(bp); - bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, - &bp->rx_ring_dma, GFP_KERNEL); - if (!bp->rx_ring) - goto out_err; - netdev_dbg(bp->dev, - "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", - size, (unsigned long)bp->rx_ring_dma, bp->rx_ring); + size = RX_RING_BYTES(bp); + queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, + &queue->rx_ring_dma, GFP_KERNEL); + if (!queue->rx_ring) + goto out_err; + netdev_dbg(bp->dev, + "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", + size, (unsigned long)queue->rx_ring_dma, queue->rx_ring); + } if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) goto out_err; @@ -1856,12 +1929,13 @@ static void gem_init_rings(struct macb *bp) desc->ctrl |= MACB_BIT(TX_WRAP); queue->tx_head = 0; queue->tx_tail = 0; - } - bp->rx_tail = 0; - bp->rx_prepared_head = 0; + queue->rx_tail = 0; + queue->rx_prepared_head = 0; + + gem_rx_refill(queue); + } - gem_rx_refill(bp); } static void macb_init_rings(struct macb *bp) @@ -1869,7 +1943,7 @@ static void macb_init_rings(struct macb *bp) int i; struct macb_dma_desc *desc = NULL; - macb_init_rx_ring(bp); + macb_init_rx_ring(&bp->queues[0]); for (i = 0; i < bp->tx_ring_size; i++) { desc = macb_tx_desc(&bp->queues[0], i); @@ -1978,11 +2052,20 @@ static u32 macb_dbw(struct macb *bp) */ static void macb_configure_dma(struct macb *bp) { + struct macb_queue *queue; + u32 buffer_size; + unsigned int q; u32 dmacfg; + buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE; if (macb_is_gem(bp)) { dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); - dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE); + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { + if (q) + queue_writel(queue, RBQS, buffer_size); + else + dmacfg |= GEM_BF(RXBS, buffer_size); + } if (bp->dma_burst_length) dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); @@ -2051,12 +2134,12 @@ static void macb_init_hw(struct macb *bp) macb_configure_dma(bp); /* Initialize TX and RX buffers */ - macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma)); + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { + queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - if (bp->hw_dma_cap & HW_DMA_CAP_64B) - macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma)); + if (bp->hw_dma_cap & HW_DMA_CAP_64B) + queue_writel(queue, RBQPH, upper_32_bits(queue->rx_ring_dma)); #endif - for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT if (bp->hw_dma_cap & HW_DMA_CAP_64B) @@ -2197,6 +2280,8 @@ static int macb_open(struct net_device *dev) { struct macb *bp = netdev_priv(dev); size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN; + struct macb_queue *queue; + unsigned int q; int err; netdev_dbg(bp->dev, "open\n"); @@ -2218,11 +2303,12 @@ static int macb_open(struct net_device *dev) return err; } - napi_enable(&bp->napi); - bp->macbgem_ops.mog_init_rings(bp); macb_init_hw(bp); + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) + napi_enable(&queue->napi); + /* schedule a link state check */ phy_start(dev->phydev); @@ -2237,10 +2323,14 @@ static int macb_open(struct net_device *dev) static int macb_close(struct net_device *dev) { struct macb *bp = netdev_priv(dev); + struct macb_queue *queue; unsigned long flags; + unsigned int q; netif_tx_stop_all_queues(dev); - napi_disable(&bp->napi); + + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) + napi_disable(&queue->napi); if (dev->phydev) phy_stop(dev->phydev); @@ -2270,7 +2360,10 @@ static int macb_change_mtu(struct net_device *dev, int new_mtu) static void gem_update_stats(struct macb *bp) { - unsigned int i; + struct macb_queue *queue; + unsigned int i, q, idx; + unsigned long *stat; + u32 *p = &bp->hw_stats.gem.tx_octets_31_0; for (i = 0; i < GEM_STATS_LEN; ++i, ++p) { @@ -2287,6 +2380,11 @@ static void gem_update_stats(struct macb *bp) *(++p) += val; } } + + idx = GEM_STATS_LEN; + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) + for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat) + bp->ethtool_stats[idx++] = *stat; } static struct net_device_stats *gem_get_stats(struct macb *bp) @@ -2334,14 +2432,17 @@ static void gem_get_ethtool_stats(struct net_device *dev, bp = netdev_priv(dev); gem_update_stats(bp); - memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN); + memcpy(data, &bp->ethtool_stats, sizeof(u64) + * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES)); } static int gem_get_sset_count(struct net_device *dev, int sset) { + struct macb *bp = netdev_priv(dev); + switch (sset) { case ETH_SS_STATS: - return GEM_STATS_LEN; + return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN; default: return -EOPNOTSUPP; } @@ -2349,13 +2450,25 @@ static int gem_get_sset_count(struct net_device *dev, int sset) static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p) { + char stat_string[ETH_GSTRING_LEN]; + struct macb *bp = netdev_priv(dev); + struct macb_queue *queue; unsigned int i; + unsigned int q; switch (sset) { case ETH_SS_STATS: for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN) memcpy(p, gem_statistics[i].stat_string, ETH_GSTRING_LEN); + + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { + for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) { + snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s", + q, queue_statistics[i].stat_string); + memcpy(p, stat_string, ETH_GSTRING_LEN); + } + } break; } } @@ -2603,6 +2716,307 @@ static int macb_get_ts_info(struct net_device *netdev, return ethtool_op_get_ts_info(netdev, info); } +static void gem_enable_flow_filters(struct macb *bp, bool enable) +{ + struct ethtool_rx_fs_item *item; + u32 t2_scr; + int num_t2_scr; + + num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8)); + + list_for_each_entry(item, &bp->rx_fs_list.list, list) { + struct ethtool_rx_flow_spec *fs = &item->fs; + struct ethtool_tcpip4_spec *tp4sp_m; + + if (fs->location >= num_t2_scr) + continue; + + t2_scr = gem_readl_n(bp, SCRT2, fs->location); + + /* enable/disable screener regs for the flow entry */ + t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr); + + /* only enable fields with no masking */ + tp4sp_m = &(fs->m_u.tcp_ip4_spec); + + if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF)) + t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr); + else + t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr); + + if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF)) + t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr); + else + t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr); + + if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF))) + t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr); + else + t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr); + + gem_writel_n(bp, SCRT2, fs->location, t2_scr); + } +} + +static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs) +{ + struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m; + uint16_t index = fs->location; + u32 w0, w1, t2_scr; + bool cmp_a = false; + bool cmp_b = false; + bool cmp_c = false; + + tp4sp_v = &(fs->h_u.tcp_ip4_spec); + tp4sp_m = &(fs->m_u.tcp_ip4_spec); + + /* ignore field if any masking set */ + if (tp4sp_m->ip4src == 0xFFFFFFFF) { + /* 1st compare reg - IP source address */ + w0 = 0; + w1 = 0; + w0 = tp4sp_v->ip4src; + w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ + w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1); + w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1); + gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0); + gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1); + cmp_a = true; + } + + /* ignore field if any masking set */ + if (tp4sp_m->ip4dst == 0xFFFFFFFF) { + /* 2nd compare reg - IP destination address */ + w0 = 0; + w1 = 0; + w0 = tp4sp_v->ip4dst; + w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ + w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1); + w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1); + gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0); + gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1); + cmp_b = true; + } + + /* ignore both port fields if masking set in both */ + if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) { + /* 3rd compare reg - source port, destination port */ + w0 = 0; + w1 = 0; + w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1); + if (tp4sp_m->psrc == tp4sp_m->pdst) { + w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0); + w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0); + w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ + w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1); + } else { + /* only one port definition */ + w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */ + w0 = GEM_BFINS(T2MASK, 0xFFFF, w0); + if (tp4sp_m->psrc == 0xFFFF) { /* src port */ + w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0); + w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1); + } else { /* dst port */ + w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0); + w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1); + } + } + gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0); + gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1); + cmp_c = true; + } + + t2_scr = 0; + t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr); + t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr); + if (cmp_a) + t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr); + if (cmp_b) + t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr); + if (cmp_c) + t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr); + gem_writel_n(bp, SCRT2, index, t2_scr); +} + +static int gem_add_flow_filter(struct net_device *netdev, + struct ethtool_rxnfc *cmd) +{ + struct macb *bp = netdev_priv(netdev); + struct ethtool_rx_flow_spec *fs = &cmd->fs; + struct ethtool_rx_fs_item *item, *newfs; + unsigned long flags; + int ret = -EINVAL; + bool added = false; + + newfs = kmalloc(sizeof(*newfs), GFP_KERNEL); + if (newfs == NULL) + return -ENOMEM; + memcpy(&newfs->fs, fs, sizeof(newfs->fs)); + + netdev_dbg(netdev, + "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n", + fs->flow_type, (int)fs->ring_cookie, fs->location, + htonl(fs->h_u.tcp_ip4_spec.ip4src), + htonl(fs->h_u.tcp_ip4_spec.ip4dst), + htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst)); + + spin_lock_irqsave(&bp->rx_fs_lock, flags); + + /* find correct place to add in list */ + list_for_each_entry(item, &bp->rx_fs_list.list, list) { + if (item->fs.location > newfs->fs.location) { + list_add_tail(&newfs->list, &item->list); + added = true; + break; + } else if (item->fs.location == fs->location) { + netdev_err(netdev, "Rule not added: location %d not free!\n", + fs->location); + ret = -EBUSY; + goto err; + } + } + if (!added) + list_add_tail(&newfs->list, &bp->rx_fs_list.list); + + gem_prog_cmp_regs(bp, fs); + bp->rx_fs_list.count++; + /* enable filtering if NTUPLE on */ + if (netdev->features & NETIF_F_NTUPLE) + gem_enable_flow_filters(bp, 1); + + spin_unlock_irqrestore(&bp->rx_fs_lock, flags); + return 0; + +err: + spin_unlock_irqrestore(&bp->rx_fs_lock, flags); + kfree(newfs); + return ret; +} + +static int gem_del_flow_filter(struct net_device *netdev, + struct ethtool_rxnfc *cmd) +{ + struct macb *bp = netdev_priv(netdev); + struct ethtool_rx_fs_item *item; + struct ethtool_rx_flow_spec *fs; + unsigned long flags; + + spin_lock_irqsave(&bp->rx_fs_lock, flags); + + list_for_each_entry(item, &bp->rx_fs_list.list, list) { + if (item->fs.location == cmd->fs.location) { + /* disable screener regs for the flow entry */ + fs = &(item->fs); + netdev_dbg(netdev, + "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n", + fs->flow_type, (int)fs->ring_cookie, fs->location, + htonl(fs->h_u.tcp_ip4_spec.ip4src), + htonl(fs->h_u.tcp_ip4_spec.ip4dst), + htons(fs->h_u.tcp_ip4_spec.psrc), + htons(fs->h_u.tcp_ip4_spec.pdst)); + + gem_writel_n(bp, SCRT2, fs->location, 0); + + list_del(&item->list); + bp->rx_fs_list.count--; + spin_unlock_irqrestore(&bp->rx_fs_lock, flags); + kfree(item); + return 0; + } + } + + spin_unlock_irqrestore(&bp->rx_fs_lock, flags); + return -EINVAL; +} + +static int gem_get_flow_entry(struct net_device *netdev, + struct ethtool_rxnfc *cmd) +{ + struct macb *bp = netdev_priv(netdev); + struct ethtool_rx_fs_item *item; + + list_for_each_entry(item, &bp->rx_fs_list.list, list) { + if (item->fs.location == cmd->fs.location) { + memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs)); + return 0; + } + } + return -EINVAL; +} + +static int gem_get_all_flow_entries(struct net_device *netdev, + struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct macb *bp = netdev_priv(netdev); + struct ethtool_rx_fs_item *item; + uint32_t cnt = 0; + + list_for_each_entry(item, &bp->rx_fs_list.list, list) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + rule_locs[cnt] = item->fs.location; + cnt++; + } + cmd->data = bp->max_tuples; + cmd->rule_cnt = cnt; + + return 0; +} + +static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct macb *bp = netdev_priv(netdev); + int ret = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = bp->num_queues; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = bp->rx_fs_list.count; + break; + case ETHTOOL_GRXCLSRULE: + ret = gem_get_flow_entry(netdev, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = gem_get_all_flow_entries(netdev, cmd, rule_locs); + break; + default: + netdev_err(netdev, + "Command parameter %d is not supported\n", cmd->cmd); + ret = -EOPNOTSUPP; + } + + return ret; +} + +static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + struct macb *bp = netdev_priv(netdev); + int ret; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + if ((cmd->fs.location >= bp->max_tuples) + || (cmd->fs.ring_cookie >= bp->num_queues)) { + ret = -EINVAL; + break; + } + ret = gem_add_flow_filter(netdev, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = gem_del_flow_filter(netdev, cmd); + break; + default: + netdev_err(netdev, + "Command parameter %d is not supported\n", cmd->cmd); + ret = -EOPNOTSUPP; + } + + return ret; +} + static const struct ethtool_ops macb_ethtool_ops = { .get_regs_len = macb_get_regs_len, .get_regs = macb_get_regs, @@ -2628,6 +3042,8 @@ static const struct ethtool_ops gem_ethtool_ops = { .set_link_ksettings = phy_ethtool_set_link_ksettings, .get_ringparam = macb_get_ringparam, .set_ringparam = macb_set_ringparam, + .get_rxnfc = gem_get_rxnfc, + .set_rxnfc = gem_set_rxnfc, }; static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) @@ -2685,6 +3101,12 @@ static int macb_set_features(struct net_device *netdev, gem_writel(bp, NCFGR, netcfg); } + /* RX Flow Filters */ + if ((changed & NETIF_F_NTUPLE) && macb_is_gem(bp)) { + bool turn_on = features & NETIF_F_NTUPLE; + + gem_enable_flow_filters(bp, turn_on); + } return 0; } @@ -2850,7 +3272,7 @@ static int macb_init(struct platform_device *pdev) struct macb *bp = netdev_priv(dev); struct macb_queue *queue; int err; - u32 val; + u32 val, reg; bp->tx_ring_size = DEFAULT_TX_RING_SIZE; bp->rx_ring_size = DEFAULT_RX_RING_SIZE; @@ -2865,15 +3287,20 @@ static int macb_init(struct platform_device *pdev) queue = &bp->queues[q]; queue->bp = bp; + netif_napi_add(dev, &queue->napi, macb_poll, 64); if (hw_q) { queue->ISR = GEM_ISR(hw_q - 1); queue->IER = GEM_IER(hw_q - 1); queue->IDR = GEM_IDR(hw_q - 1); queue->IMR = GEM_IMR(hw_q - 1); queue->TBQP = GEM_TBQP(hw_q - 1); + queue->RBQP = GEM_RBQP(hw_q - 1); + queue->RBQS = GEM_RBQS(hw_q - 1); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - if (bp->hw_dma_cap & HW_DMA_CAP_64B) + if (bp->hw_dma_cap & HW_DMA_CAP_64B) { queue->TBQPH = GEM_TBQPH(hw_q - 1); + queue->RBQPH = GEM_RBQPH(hw_q - 1); + } #endif } else { /* queue0 uses legacy registers */ @@ -2882,9 +3309,12 @@ static int macb_init(struct platform_device *pdev) queue->IDR = MACB_IDR; queue->IMR = MACB_IMR; queue->TBQP = MACB_TBQP; + queue->RBQP = MACB_RBQP; #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - if (bp->hw_dma_cap & HW_DMA_CAP_64B) + if (bp->hw_dma_cap & HW_DMA_CAP_64B) { queue->TBQPH = MACB_TBQPH; + queue->RBQPH = MACB_RBQPH; + } #endif } @@ -2908,7 +3338,6 @@ static int macb_init(struct platform_device *pdev) } dev->netdev_ops = &macb_netdev_ops; - netif_napi_add(dev, &bp->napi, macb_poll, 64); /* setup appropriated routines according to adapter type */ if (macb_is_gem(bp)) { @@ -2941,6 +3370,30 @@ static int macb_init(struct platform_device *pdev) dev->hw_features &= ~NETIF_F_SG; dev->features = dev->hw_features; + /* Check RX Flow Filters support. + * Max Rx flows set by availability of screeners & compare regs: + * each 4-tuple define requires 1 T2 screener reg + 3 compare regs + */ + reg = gem_readl(bp, DCFG8); + bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3), + GEM_BFEXT(T2SCR, reg)); + if (bp->max_tuples > 0) { + /* also needs one ethtype match to check IPv4 */ + if (GEM_BFEXT(SCR2ETH, reg) > 0) { + /* program this reg now */ + reg = 0; + reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg); + gem_writel_n(bp, ETHT, SCRT2_ETHT, reg); + /* Filtering is supported in hw but don't enable it in kernel now */ + dev->hw_features |= NETIF_F_NTUPLE; + /* init Rx flow definitions */ + INIT_LIST_HEAD(&bp->rx_fs_list.list); + bp->rx_fs_list.count = 0; + spin_lock_init(&bp->rx_fs_lock); + } else + bp->max_tuples = 0; + } + if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { val = 0; if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) @@ -2977,34 +3430,35 @@ static int macb_init(struct platform_device *pdev) static int at91ether_start(struct net_device *dev) { struct macb *lp = netdev_priv(dev); + struct macb_queue *q = &lp->queues[0]; struct macb_dma_desc *desc; dma_addr_t addr; u32 ctl; int i; - lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev, + q->rx_ring = dma_alloc_coherent(&lp->pdev->dev, (AT91ETHER_MAX_RX_DESCR * macb_dma_desc_get_size(lp)), - &lp->rx_ring_dma, GFP_KERNEL); - if (!lp->rx_ring) + &q->rx_ring_dma, GFP_KERNEL); + if (!q->rx_ring) return -ENOMEM; - lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev, + q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev, AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ, - &lp->rx_buffers_dma, GFP_KERNEL); - if (!lp->rx_buffers) { + &q->rx_buffers_dma, GFP_KERNEL); + if (!q->rx_buffers) { dma_free_coherent(&lp->pdev->dev, AT91ETHER_MAX_RX_DESCR * macb_dma_desc_get_size(lp), - lp->rx_ring, lp->rx_ring_dma); - lp->rx_ring = NULL; + q->rx_ring, q->rx_ring_dma); + q->rx_ring = NULL; return -ENOMEM; } - addr = lp->rx_buffers_dma; + addr = q->rx_buffers_dma; for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) { - desc = macb_rx_desc(lp, i); + desc = macb_rx_desc(q, i); macb_set_addr(lp, desc, addr); desc->ctrl = 0; addr += AT91ETHER_MAX_RBUFF_SZ; @@ -3014,10 +3468,10 @@ static int at91ether_start(struct net_device *dev) desc->addr |= MACB_BIT(RX_WRAP); /* Reset buffer index */ - lp->rx_tail = 0; + q->rx_tail = 0; /* Program address of descriptor list in Rx Buffer Queue register */ - macb_writel(lp, RBQP, lp->rx_ring_dma); + macb_writel(lp, RBQP, q->rx_ring_dma); /* Enable Receive and Transmit */ ctl = macb_readl(lp, NCR); @@ -3064,6 +3518,7 @@ static int at91ether_open(struct net_device *dev) static int at91ether_close(struct net_device *dev) { struct macb *lp = netdev_priv(dev); + struct macb_queue *q = &lp->queues[0]; u32 ctl; /* Disable Receiver and Transmitter */ @@ -3084,13 +3539,13 @@ static int at91ether_close(struct net_device *dev) dma_free_coherent(&lp->pdev->dev, AT91ETHER_MAX_RX_DESCR * macb_dma_desc_get_size(lp), - lp->rx_ring, lp->rx_ring_dma); - lp->rx_ring = NULL; + q->rx_ring, q->rx_ring_dma); + q->rx_ring = NULL; dma_free_coherent(&lp->pdev->dev, AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ, - lp->rx_buffers, lp->rx_buffers_dma); - lp->rx_buffers = NULL; + q->rx_buffers, q->rx_buffers_dma); + q->rx_buffers = NULL; return 0; } @@ -3134,14 +3589,15 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) static void at91ether_rx(struct net_device *dev) { struct macb *lp = netdev_priv(dev); + struct macb_queue *q = &lp->queues[0]; struct macb_dma_desc *desc; unsigned char *p_recv; struct sk_buff *skb; unsigned int pktlen; - desc = macb_rx_desc(lp, lp->rx_tail); + desc = macb_rx_desc(q, q->rx_tail); while (desc->addr & MACB_BIT(RX_USED)) { - p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ; + p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ; pktlen = MACB_BF(RX_FRMLEN, desc->ctrl); skb = netdev_alloc_skb(dev, pktlen + 2); if (skb) { @@ -3163,12 +3619,12 @@ static void at91ether_rx(struct net_device *dev) desc->addr &= ~MACB_BIT(RX_USED); /* wrap after last buffer */ - if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) - lp->rx_tail = 0; + if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) + q->rx_tail = 0; else - lp->rx_tail++; + q->rx_tail++; - desc = macb_rx_desc(lp, lp->rx_tail); + desc = macb_rx_desc(q, q->rx_tail); } } @@ -3394,7 +3850,6 @@ static int macb_probe(struct platform_device *pdev) = macb_config->clk_init; int (*init)(struct platform_device *) = macb_config->init; struct device_node *np = pdev->dev.of_node; - struct device_node *phy_node; struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL; unsigned int queue_mask, num_queues; struct macb_platform_data *pdata; @@ -3500,18 +3955,6 @@ static int macb_probe(struct platform_device *pdev) else macb_get_hwaddr(bp); - /* Power up the PHY if there is a GPIO reset */ - phy_node = of_get_next_available_child(np, NULL); - if (phy_node) { - int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0); - - if (gpio_is_valid(gpio)) { - bp->reset_gpio = gpio_to_desc(gpio); - gpiod_direction_output(bp->reset_gpio, 1); - } - } - of_node_put(phy_node); - err = of_get_phy_mode(np); if (err < 0) { pdata = dev_get_platdata(&pdev->dev); @@ -3542,6 +3985,9 @@ static int macb_probe(struct platform_device *pdev) goto err_out_unregister_mdio; } + tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task, + (unsigned long)bp); + phy_attached_info(phydev); netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n", @@ -3558,10 +4004,6 @@ err_out_unregister_mdio: of_phy_deregister_fixed_link(np); mdiobus_free(bp->mii_bus); - /* Shutdown the PHY if there is a GPIO reset */ - if (bp->reset_gpio) - gpiod_set_value(bp->reset_gpio, 0); - err_out_free_netdev: free_netdev(dev); @@ -3592,10 +4034,6 @@ static int macb_remove(struct platform_device *pdev) dev->phydev = NULL; mdiobus_free(bp->mii_bus); - /* Shutdown the PHY if there is a GPIO reset */ - if (bp->reset_gpio) - gpiod_set_value(bp->reset_gpio, 0); - unregister_netdev(dev); clk_disable_unprepare(bp->tx_clk); clk_disable_unprepare(bp->hclk); diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index 63be75eb34d2..043e3c11c42b 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@ -27,6 +27,7 @@ config THUNDER_NIC_PF config THUNDER_NIC_VF tristate "Thunder Virtual function driver" + imply CAVIUM_PTP depends on 64BIT ---help--- This driver supports Thunder's NIC virtual function @@ -50,6 +51,18 @@ config THUNDER_NIC_RGX This driver supports configuring XCV block of RGX interface present on CN81XX chip. +config CAVIUM_PTP + tristate "Cavium PTP coprocessor as PTP clock" + depends on 64BIT + imply PTP_1588_CLOCK + default y + ---help--- + This driver adds support for the Precision Time Protocol Clocks and + Timestamping coprocessor (PTP) found on Cavium processors. + PTP provides timestamping mechanism that is suitable for use in IEEE 1588 + Precision Time Protocol or other purposes. Timestamps can be used in + BGX, TNS, GTI, and NIC blocks. + config LIQUIDIO tristate "Cavium LiquidIO support" depends on 64BIT diff --git a/drivers/net/ethernet/cavium/Makefile b/drivers/net/ethernet/cavium/Makefile index 872da9f7c31a..946bba84e81d 100644 --- a/drivers/net/ethernet/cavium/Makefile +++ b/drivers/net/ethernet/cavium/Makefile @@ -1,6 +1,7 @@ # # Makefile for the Cavium ethernet device drivers. # +obj-$(CONFIG_NET_VENDOR_CAVIUM) += common/ obj-$(CONFIG_NET_VENDOR_CAVIUM) += thunder/ obj-$(CONFIG_NET_VENDOR_CAVIUM) += liquidio/ obj-$(CONFIG_NET_VENDOR_CAVIUM) += octeon/ diff --git a/drivers/net/ethernet/cavium/common/Makefile b/drivers/net/ethernet/cavium/common/Makefile new file mode 100644 index 000000000000..dd8561b8060b --- /dev/null +++ b/drivers/net/ethernet/cavium/common/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_CAVIUM_PTP) += cavium_ptp.o diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c new file mode 100644 index 000000000000..c87c9c684a33 --- /dev/null +++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c @@ -0,0 +1,353 @@ +// SPDX-License-Identifier: GPL-2.0 +/* cavium_ptp.c - PTP 1588 clock on Cavium hardware + * Copyright (c) 2003-2015, 2017 Cavium, Inc. + */ + +#include <linux/device.h> +#include <linux/module.h> +#include <linux/timecounter.h> +#include <linux/pci.h> + +#include "cavium_ptp.h" + +#define DRV_NAME "Cavium PTP Driver" + +#define PCI_DEVICE_ID_CAVIUM_PTP 0xA00C +#define PCI_DEVICE_ID_CAVIUM_RST 0xA00E + +#define PCI_PTP_BAR_NO 0 +#define PCI_RST_BAR_NO 0 + +#define PTP_CLOCK_CFG 0xF00ULL +#define PTP_CLOCK_CFG_PTP_EN BIT(0) +#define PTP_CLOCK_LO 0xF08ULL +#define PTP_CLOCK_HI 0xF10ULL +#define PTP_CLOCK_COMP 0xF18ULL + +#define RST_BOOT 0x1600ULL +#define CLOCK_BASE_RATE 50000000ULL + +static u64 ptp_cavium_clock_get(void) +{ + struct pci_dev *pdev; + void __iomem *base; + u64 ret = CLOCK_BASE_RATE * 16; + + pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, + PCI_DEVICE_ID_CAVIUM_RST, NULL); + if (!pdev) + goto error; + + base = pci_ioremap_bar(pdev, PCI_RST_BAR_NO); + if (!base) + goto error_put_pdev; + + ret = CLOCK_BASE_RATE * ((readq(base + RST_BOOT) >> 33) & 0x3f); + + iounmap(base); + +error_put_pdev: + pci_dev_put(pdev); + +error: + return ret; +} + +struct cavium_ptp *cavium_ptp_get(void) +{ + struct cavium_ptp *ptp; + struct pci_dev *pdev; + + pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, + PCI_DEVICE_ID_CAVIUM_PTP, NULL); + if (!pdev) + return ERR_PTR(-ENODEV); + + ptp = pci_get_drvdata(pdev); + if (!ptp) + ptp = ERR_PTR(-EPROBE_DEFER); + if (IS_ERR(ptp)) + pci_dev_put(pdev); + + return ptp; +} +EXPORT_SYMBOL(cavium_ptp_get); + +void cavium_ptp_put(struct cavium_ptp *ptp) +{ + pci_dev_put(ptp->pdev); +} +EXPORT_SYMBOL(cavium_ptp_put); + +/** + * cavium_ptp_adjfine() - Adjust ptp frequency + * @ptp: PTP clock info + * @scaled_ppm: how much to adjust by, in parts per million, but with a + * 16 bit binary fractional field + */ +static int cavium_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm) +{ + struct cavium_ptp *clock = + container_of(ptp_info, struct cavium_ptp, ptp_info); + unsigned long flags; + u64 comp; + u64 adj; + bool neg_adj = false; + + if (scaled_ppm < 0) { + neg_adj = true; + scaled_ppm = -scaled_ppm; + } + + /* The hardware adds the clock compensation value to the PTP clock + * on every coprocessor clock cycle. Typical convention is that it + * represent number of nanosecond betwen each cycle. In this + * convention compensation value is in 64 bit fixed-point + * representation where upper 32 bits are number of nanoseconds + * and lower is fractions of nanosecond. + * The scaled_ppm represent the ratio in "parts per bilion" by which the + * compensation value should be corrected. + * To calculate new compenstation value we use 64bit fixed point + * arithmetic on following formula + * comp = tbase + tbase * scaled_ppm / (1M * 2^16) + * where tbase is the basic compensation value calculated initialy + * in cavium_ptp_init() -> tbase = 1/Hz. Then we use endian + * independent structure definition to write data to PTP register. + */ + comp = ((u64)1000000000ull << 32) / clock->clock_rate; + adj = comp * scaled_ppm; + adj >>= 16; + adj = div_u64(adj, 1000000ull); + comp = neg_adj ? comp - adj : comp + adj; + + spin_lock_irqsave(&clock->spin_lock, flags); + writeq(comp, clock->reg_base + PTP_CLOCK_COMP); + spin_unlock_irqrestore(&clock->spin_lock, flags); + + return 0; +} + +/** + * cavium_ptp_adjtime() - Adjust ptp time + * @ptp: PTP clock info + * @delta: how much to adjust by, in nanosecs + */ +static int cavium_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta) +{ + struct cavium_ptp *clock = + container_of(ptp_info, struct cavium_ptp, ptp_info); + unsigned long flags; + + spin_lock_irqsave(&clock->spin_lock, flags); + timecounter_adjtime(&clock->time_counter, delta); + spin_unlock_irqrestore(&clock->spin_lock, flags); + + /* Sync, for network driver to get latest value */ + smp_mb(); + + return 0; +} + +/** + * cavium_ptp_gettime() - Get hardware clock time with adjustment + * @ptp: PTP clock info + * @ts: timespec + */ +static int cavium_ptp_gettime(struct ptp_clock_info *ptp_info, + struct timespec64 *ts) +{ + struct cavium_ptp *clock = + container_of(ptp_info, struct cavium_ptp, ptp_info); + unsigned long flags; + u64 nsec; + + spin_lock_irqsave(&clock->spin_lock, flags); + nsec = timecounter_read(&clock->time_counter); + spin_unlock_irqrestore(&clock->spin_lock, flags); + + *ts = ns_to_timespec64(nsec); + + return 0; +} + +/** + * cavium_ptp_settime() - Set hardware clock time. Reset adjustment + * @ptp: PTP clock info + * @ts: timespec + */ +static int cavium_ptp_settime(struct ptp_clock_info *ptp_info, + const struct timespec64 *ts) +{ + struct cavium_ptp *clock = + container_of(ptp_info, struct cavium_ptp, ptp_info); + unsigned long flags; + u64 nsec; + + nsec = timespec64_to_ns(ts); + + spin_lock_irqsave(&clock->spin_lock, flags); + timecounter_init(&clock->time_counter, &clock->cycle_counter, nsec); + spin_unlock_irqrestore(&clock->spin_lock, flags); + + return 0; +} + +/** + * cavium_ptp_enable() - Request to enable or disable an ancillary feature. + * @ptp: PTP clock info + * @rq: request + * @on: is it on + */ +static int cavium_ptp_enable(struct ptp_clock_info *ptp_info, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +static u64 cavium_ptp_cc_read(const struct cyclecounter *cc) +{ + struct cavium_ptp *clock = + container_of(cc, struct cavium_ptp, cycle_counter); + + return readq(clock->reg_base + PTP_CLOCK_HI); +} + +static int cavium_ptp_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct device *dev = &pdev->dev; + struct cavium_ptp *clock; + struct cyclecounter *cc; + u64 clock_cfg; + u64 clock_comp; + int err; + + clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL); + if (!clock) { + err = -ENOMEM; + goto error; + } + + clock->pdev = pdev; + + err = pcim_enable_device(pdev); + if (err) + goto error_free; + + err = pcim_iomap_regions(pdev, 1 << PCI_PTP_BAR_NO, pci_name(pdev)); + if (err) + goto error_free; + + clock->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO]; + + spin_lock_init(&clock->spin_lock); + + cc = &clock->cycle_counter; + cc->read = cavium_ptp_cc_read; + cc->mask = CYCLECOUNTER_MASK(64); + cc->mult = 1; + cc->shift = 0; + + timecounter_init(&clock->time_counter, &clock->cycle_counter, + ktime_to_ns(ktime_get_real())); + + clock->clock_rate = ptp_cavium_clock_get(); + + clock->ptp_info = (struct ptp_clock_info) { + .owner = THIS_MODULE, + .name = "ThunderX PTP", + .max_adj = 1000000000ull, + .n_ext_ts = 0, + .n_pins = 0, + .pps = 0, + .adjfine = cavium_ptp_adjfine, + .adjtime = cavium_ptp_adjtime, + .gettime64 = cavium_ptp_gettime, + .settime64 = cavium_ptp_settime, + .enable = cavium_ptp_enable, + }; + + clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG); + clock_cfg |= PTP_CLOCK_CFG_PTP_EN; + writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG); + + clock_comp = ((u64)1000000000ull << 32) / clock->clock_rate; + writeq(clock_comp, clock->reg_base + PTP_CLOCK_COMP); + + clock->ptp_clock = ptp_clock_register(&clock->ptp_info, dev); + if (!clock->ptp_clock) { + err = -ENODEV; + goto error_stop; + } + if (IS_ERR(clock->ptp_clock)) { + err = PTR_ERR(clock->ptp_clock); + goto error_stop; + } + + pci_set_drvdata(pdev, clock); + return 0; + +error_stop: + clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG); + clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN; + writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG); + pcim_iounmap_regions(pdev, 1 << PCI_PTP_BAR_NO); + +error_free: + devm_kfree(dev, clock); + +error: + /* For `cavium_ptp_get()` we need to differentiate between the case + * when the core has not tried to probe this device and the case when + * the probe failed. In the later case we pretend that the + * initialization was successful and keep the error in + * `dev->driver_data`. + */ + pci_set_drvdata(pdev, ERR_PTR(err)); + return 0; +} + +static void cavium_ptp_remove(struct pci_dev *pdev) +{ + struct cavium_ptp *clock = pci_get_drvdata(pdev); + u64 clock_cfg; + + if (IS_ERR_OR_NULL(clock)) + return; + + ptp_clock_unregister(clock->ptp_clock); + + clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG); + clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN; + writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG); +} + +static const struct pci_device_id cavium_ptp_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_PTP) }, + { 0, } +}; + +static struct pci_driver cavium_ptp_driver = { + .name = DRV_NAME, + .id_table = cavium_ptp_id_table, + .probe = cavium_ptp_probe, + .remove = cavium_ptp_remove, +}; + +static int __init cavium_ptp_init_module(void) +{ + return pci_register_driver(&cavium_ptp_driver); +} + +static void __exit cavium_ptp_cleanup_module(void) +{ + pci_unregister_driver(&cavium_ptp_driver); +} + +module_init(cavium_ptp_init_module); +module_exit(cavium_ptp_cleanup_module); + +MODULE_DESCRIPTION(DRV_NAME); +MODULE_AUTHOR("Cavium Networks <support@cavium.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_DEVICE_TABLE(pci, cavium_ptp_id_table); diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.h b/drivers/net/ethernet/cavium/common/cavium_ptp.h new file mode 100644 index 000000000000..be2bafc7beeb --- /dev/null +++ b/drivers/net/ethernet/cavium/common/cavium_ptp.h @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0 +/* cavium_ptp.h - PTP 1588 clock on Cavium hardware + * Copyright (c) 2003-2015, 2017 Cavium, Inc. + */ + +#ifndef CAVIUM_PTP_H +#define CAVIUM_PTP_H + +#include <linux/ptp_clock_kernel.h> +#include <linux/timecounter.h> + +struct cavium_ptp { + struct pci_dev *pdev; + + /* Serialize access to cycle_counter, time_counter and hw_registers */ + spinlock_t spin_lock; + struct cyclecounter cycle_counter; + struct timecounter time_counter; + void __iomem *reg_base; + + u32 clock_rate; + + struct ptp_clock_info ptp_info; + struct ptp_clock *ptp_clock; +}; + +#if IS_ENABLED(CONFIG_CAVIUM_PTP) + +struct cavium_ptp *cavium_ptp_get(void); +void cavium_ptp_put(struct cavium_ptp *ptp); + +static inline u64 cavium_ptp_tstamp2time(struct cavium_ptp *ptp, u64 tstamp) +{ + unsigned long flags; + u64 ret; + + spin_lock_irqsave(&ptp->spin_lock, flags); + ret = timecounter_cyc2time(&ptp->time_counter, tstamp); + spin_unlock_irqrestore(&ptp->spin_lock, flags); + + return ret; +} + +static inline int cavium_ptp_clock_index(struct cavium_ptp *clock) +{ + return ptp_clock_index(clock->ptp_clock); +} + +#else + +static inline struct cavium_ptp *cavium_ptp_get(void) +{ + return ERR_PTR(-ENODEV); +} + +static inline void cavium_ptp_put(struct cavium_ptp *ptp) {} + +static inline u64 cavium_ptp_tstamp2time(struct cavium_ptp *ptp, u64 tstamp) +{ + return 0; +} + +static inline int cavium_ptp_clock_index(struct cavium_ptp *clock) +{ + return -1; +} + +#endif + +#endif diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index 2c615ab09e64..f38abf626412 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -702,12 +702,10 @@ static struct octeon_device *octeon_allocate_device_mem(u32 pci_id, size = octdevsize + priv_size + configsize + (sizeof(struct octeon_dispatch) * DISPATCH_LIST_SIZE); - buf = vmalloc(size); + buf = vzalloc(size); if (!buf) return NULL; - memset(buf, 0, size); - oct = (struct octeon_device *)buf; oct->priv = (void *)(buf + octdevsize); oct->chip = (void *)(buf + octdevsize + priv_size); @@ -840,10 +838,9 @@ octeon_allocate_ioq_vector(struct octeon_device *oct) size = sizeof(struct octeon_ioq_vector) * num_ioqs; - oct->ioq_vector = vmalloc(size); + oct->ioq_vector = vzalloc(size); if (!oct->ioq_vector) return 1; - memset(oct->ioq_vector, 0, size); for (i = 0; i < num_ioqs; i++) { ioq_vector = &oct->ioq_vector[i]; ioq_vector->oct_dev = oct; diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index 4a02e618e318..4cacce5d2b16 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -263,6 +263,8 @@ struct nicvf_drv_stats { struct u64_stats_sync syncp; }; +struct cavium_ptp; + struct nicvf { struct nicvf *pnicvf; struct net_device *netdev; @@ -312,6 +314,33 @@ struct nicvf { struct tasklet_struct qs_err_task; struct work_struct reset_task; + /* PTP timestamp */ + struct cavium_ptp *ptp_clock; + /* Inbound timestamping is on */ + bool hw_rx_tstamp; + /* When the packet that requires timestamping is sent, hardware inserts + * two entries to the completion queue. First is the regular + * CQE_TYPE_SEND entry that signals that the packet was sent. + * The second is CQE_TYPE_SEND_PTP that contains the actual timestamp + * for that packet. + * `ptp_skb` is initialized in the handler for the CQE_TYPE_SEND + * entry and is used and zeroed in the handler for the CQE_TYPE_SEND_PTP + * entry. + * So `ptp_skb` is used to hold the pointer to the packet between + * the calls to CQE_TYPE_SEND and CQE_TYPE_SEND_PTP handlers. + */ + struct sk_buff *ptp_skb; + /* `tx_ptp_skbs` is set when the hardware is sending a packet that + * requires timestamping. Cavium hardware can not process more than one + * such packet at once so this is set each time the driver submits + * a packet that requires timestamping to the send queue and clears + * each time it receives the entry on the completion queue saying + * that such packet was sent. + * So `tx_ptp_skbs` prevents driver from submitting more than one + * packet that requires timestamping to the hardware for transmitting. + */ + atomic_t tx_ptp_skbs; + /* Interrupt coalescing settings */ u32 cq_coalesce_usecs; u32 msg_enable; @@ -371,6 +400,7 @@ struct nicvf { #define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */ #define NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17 /* Reset statistics counters */ #define NIC_MBOX_MSG_PFC 0x18 /* Pause frame control */ +#define NIC_MBOX_MSG_PTP_CFG 0x19 /* HW packet timestamp */ #define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */ #define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */ @@ -521,6 +551,11 @@ struct pfc { u8 fc_tx; }; +struct set_ptp { + u8 msg; + bool enable; +}; + /* 128 bit shared memory between PF and each VF */ union nic_mbx { struct { u8 msg; } msg; @@ -540,6 +575,7 @@ union nic_mbx { struct set_loopback lbk; struct reset_stat_cfg reset_stat; struct pfc pfc; + struct set_ptp ptp; }; #define NIC_NODE_ID_MASK 0x03 diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 8f1dd55b3e08..7ff66a8194e2 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -18,7 +18,7 @@ #include "q_struct.h" #include "thunder_bgx.h" -#define DRV_NAME "thunder-nic" +#define DRV_NAME "nicpf" #define DRV_VERSION "1.0" struct hw_info { @@ -426,13 +426,22 @@ static void nic_init_hw(struct nicpf *nic) /* Enable backpressure */ nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03); - /* TNS and TNS bypass modes are present only on 88xx */ + /* TNS and TNS bypass modes are present only on 88xx + * Also offset of this CSR has changed in 81xx and 83xx. + */ if (nic->pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF) { /* Disable TNS mode on both interfaces */ nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, - (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK); + (NIC_TNS_BYPASS_MODE << 7) | + BGX0_BLOCK | (1ULL << 16)); nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), - (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK); + (NIC_TNS_BYPASS_MODE << 7) | + BGX1_BLOCK | (1ULL << 16)); + } else { + /* Configure timestamp generation timeout to 10us */ + for (i = 0; i < nic->hw->bgx_cnt; i++) + nic_reg_write(nic, NIC_PF_INTFX_SEND_CFG | (i << 3), + (1ULL << 16)); } nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG, @@ -880,6 +889,44 @@ static void nic_pause_frame(struct nicpf *nic, int vf, struct pfc *cfg) } } +/* Enable or disable HW timestamping by BGX for pkts received on a LMAC */ +static void nic_config_timestamp(struct nicpf *nic, int vf, struct set_ptp *ptp) +{ + struct pkind_cfg *pkind; + u8 lmac, bgx_idx; + u64 pkind_val, pkind_idx; + + if (vf >= nic->num_vf_en) + return; + + bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + + pkind_idx = lmac + bgx_idx * MAX_LMAC_PER_BGX; + pkind_val = nic_reg_read(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3)); + pkind = (struct pkind_cfg *)&pkind_val; + + if (ptp->enable && !pkind->hdr_sl) { + /* Skiplen to exclude 8byte timestamp while parsing pkt + * If not configured, will result in L2 errors. + */ + pkind->hdr_sl = 4; + /* Adjust max packet length allowed */ + pkind->maxlen += (pkind->hdr_sl * 2); + bgx_config_timestamping(nic->node, bgx_idx, lmac, true); + nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7 | (1 << 3), + (ETYPE_ALG_ENDPARSE << 16) | ETH_P_1588); + } else if (!ptp->enable && pkind->hdr_sl) { + pkind->maxlen -= (pkind->hdr_sl * 2); + pkind->hdr_sl = 0; + bgx_config_timestamping(nic->node, bgx_idx, lmac, false); + nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7 | (1 << 3), + (ETYPE_ALG_SKIP << 16) | ETH_P_8021Q); + } + + nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3), pkind_val); +} + /* Interrupt handler to handle mailbox messages from VFs */ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) { @@ -1022,6 +1069,9 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) case NIC_MBOX_MSG_PFC: nic_pause_frame(nic, vf, &mbx.pfc); goto unlock; + case NIC_MBOX_MSG_PTP_CFG: + nic_config_timestamp(nic, vf, &mbx.ptp); + break; default: dev_err(&nic->pdev->dev, "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h index 80d46337cf29..a16c48a1ebb2 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_reg.h +++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h @@ -99,6 +99,7 @@ #define NIC_PF_ECC3_DBE_INT_W1S (0x2708) #define NIC_PF_ECC3_DBE_ENA_W1C (0x2710) #define NIC_PF_ECC3_DBE_ENA_W1S (0x2718) +#define NIC_PF_INTFX_SEND_CFG (0x4000) #define NIC_PF_MCAM_0_191_ENA (0x100000) #define NIC_PF_MCAM_0_191_M_0_5_DATA (0x110000) #define NIC_PF_MCAM_CTRL (0x120000) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index b9ece9cbf98b..5603f5ab1fee 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -9,14 +9,16 @@ /* ETHTOOL Support for VNIC_VF Device*/ #include <linux/pci.h> +#include <linux/net_tstamp.h> #include "nic_reg.h" #include "nic.h" #include "nicvf_queues.h" #include "q_struct.h" #include "thunder_bgx.h" +#include "../common/cavium_ptp.h" -#define DRV_NAME "thunder-nicvf" +#define DRV_NAME "nicvf" #define DRV_VERSION "1.0" struct nicvf_stat { @@ -824,6 +826,31 @@ static int nicvf_set_pauseparam(struct net_device *dev, return 0; } +static int nicvf_get_ts_info(struct net_device *netdev, + struct ethtool_ts_info *info) +{ + struct nicvf *nic = netdev_priv(netdev); + + if (!nic->ptp_clock) + return ethtool_op_get_ts_info(netdev, info); + + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->phc_index = cavium_ptp_clock_index(nic->ptp_clock); + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); + + return 0; +} + static const struct ethtool_ops nicvf_ethtool_ops = { .get_link = nicvf_get_link, .get_drvinfo = nicvf_get_drvinfo, @@ -847,7 +874,7 @@ static const struct ethtool_ops nicvf_ethtool_ops = { .set_channels = nicvf_set_channels, .get_pauseparam = nicvf_get_pauseparam, .set_pauseparam = nicvf_set_pauseparam, - .get_ts_info = ethtool_op_get_ts_info, + .get_ts_info = nicvf_get_ts_info, .get_link_ksettings = nicvf_get_link_ksettings, }; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index a063c36c4c58..b68cde9f17d2 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -20,13 +20,15 @@ #include <linux/bpf.h> #include <linux/bpf_trace.h> #include <linux/filter.h> +#include <linux/net_tstamp.h> #include "nic_reg.h" #include "nic.h" #include "nicvf_queues.h" #include "thunder_bgx.h" +#include "../common/cavium_ptp.h" -#define DRV_NAME "thunder-nicvf" +#define DRV_NAME "nicvf" #define DRV_VERSION "1.0" /* Supported devices */ @@ -65,6 +67,11 @@ module_param(cpi_alg, int, S_IRUGO); MODULE_PARM_DESC(cpi_alg, "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); +struct nicvf_xdp_tx { + u64 dma_addr; + u8 qidx; +}; + static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) { if (nic->sqs_mode) @@ -500,14 +507,29 @@ static int nicvf_init_resources(struct nicvf *nic) return 0; } +static void nicvf_unmap_page(struct nicvf *nic, struct page *page, u64 dma_addr) +{ + /* Check if it's a recycled page, if not unmap the DMA mapping. + * Recycled page holds an extra reference. + */ + if (page_ref_count(page) == 1) { + dma_addr &= PAGE_MASK; + dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, + RCV_FRAG_LEN + XDP_HEADROOM, + DMA_FROM_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + } +} + static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, struct cqe_rx_t *cqe_rx, struct snd_queue *sq, - struct sk_buff **skb) + struct rcv_queue *rq, struct sk_buff **skb) { struct xdp_buff xdp; struct page *page; + struct nicvf_xdp_tx *xdp_tx = NULL; u32 action; - u16 len, offset = 0; + u16 len, err, offset = 0; u64 dma_addr, cpu_addr; void *orig_data; @@ -521,10 +543,11 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, cpu_addr = (u64)phys_to_virt(cpu_addr); page = virt_to_page((void *)cpu_addr); - xdp.data_hard_start = page_address(page); + xdp.data_hard_start = page_address(page) + RCV_BUF_HEADROOM; xdp.data = (void *)cpu_addr; xdp_set_data_meta_invalid(&xdp); xdp.data_end = xdp.data + len; + xdp.rxq = &rq->xdp_rxq; orig_data = xdp.data; rcu_read_lock(); @@ -540,18 +563,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, switch (action) { case XDP_PASS: - /* Check if it's a recycled page, if not - * unmap the DMA mapping. - * - * Recycled page holds an extra reference. - */ - if (page_ref_count(page) == 1) { - dma_addr &= PAGE_MASK; - dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, - RCV_FRAG_LEN + XDP_PACKET_HEADROOM, - DMA_FROM_DEVICE, - DMA_ATTR_SKIP_CPU_SYNC); - } + nicvf_unmap_page(nic, page, dma_addr); /* Build SKB and pass on packet to network stack */ *skb = build_skb(xdp.data, @@ -564,6 +576,20 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, case XDP_TX: nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len); return true; + case XDP_REDIRECT: + /* Save DMA address for use while transmitting */ + xdp_tx = (struct nicvf_xdp_tx *)page_address(page); + xdp_tx->dma_addr = dma_addr; + xdp_tx->qidx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx); + + err = xdp_do_redirect(nic->pnicvf->netdev, &xdp, prog); + if (!err) + return true; + + /* Free the page on error */ + nicvf_unmap_page(nic, page, dma_addr); + put_page(page); + break; default: bpf_warn_invalid_xdp_action(action); /* fall through */ @@ -571,24 +597,51 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, trace_xdp_exception(nic->netdev, prog, action); /* fall through */ case XDP_DROP: - /* Check if it's a recycled page, if not - * unmap the DMA mapping. - * - * Recycled page holds an extra reference. - */ - if (page_ref_count(page) == 1) { - dma_addr &= PAGE_MASK; - dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, - RCV_FRAG_LEN + XDP_PACKET_HEADROOM, - DMA_FROM_DEVICE, - DMA_ATTR_SKIP_CPU_SYNC); - } + nicvf_unmap_page(nic, page, dma_addr); put_page(page); return true; } return false; } +static void nicvf_snd_ptp_handler(struct net_device *netdev, + struct cqe_send_t *cqe_tx) +{ + struct nicvf *nic = netdev_priv(netdev); + struct skb_shared_hwtstamps ts; + u64 ns; + + nic = nic->pnicvf; + + /* Sync for 'ptp_skb' */ + smp_rmb(); + + /* New timestamp request can be queued now */ + atomic_set(&nic->tx_ptp_skbs, 0); + + /* Check for timestamp requested skb */ + if (!nic->ptp_skb) + return; + + /* Check if timestamping is timedout, which is set to 10us */ + if (cqe_tx->send_status == CQ_TX_ERROP_TSTMP_TIMEOUT || + cqe_tx->send_status == CQ_TX_ERROP_TSTMP_CONFLICT) + goto no_tstamp; + + /* Get the timestamp */ + memset(&ts, 0, sizeof(ts)); + ns = cavium_ptp_tstamp2time(nic->ptp_clock, cqe_tx->ptp_timestamp); + ts.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(nic->ptp_skb, &ts); + +no_tstamp: + /* Free the original skb */ + dev_kfree_skb_any(nic->ptp_skb); + nic->ptp_skb = NULL; + /* Sync 'ptp_skb' */ + smp_wmb(); +} + static void nicvf_snd_pkt_handler(struct net_device *netdev, struct cqe_send_t *cqe_tx, int budget, int *subdesc_cnt, @@ -645,7 +698,12 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev, prefetch(skb); (*tx_pkts)++; *tx_bytes += skb->len; - napi_consume_skb(skb, budget); + /* If timestamp is requested for this skb, don't free it */ + if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS && + !nic->pnicvf->ptp_skb) + nic->pnicvf->ptp_skb = skb; + else + napi_consume_skb(skb, budget); sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL; } else { /* In case of SW TSO on 88xx, only last segment will have @@ -684,9 +742,25 @@ static inline void nicvf_set_rxhash(struct net_device *netdev, skb_set_hash(skb, hash, hash_type); } +static inline void nicvf_set_rxtstamp(struct nicvf *nic, struct sk_buff *skb) +{ + u64 ns; + + if (!nic->ptp_clock || !nic->hw_rx_tstamp) + return; + + /* The first 8 bytes is the timestamp */ + ns = cavium_ptp_tstamp2time(nic->ptp_clock, + be64_to_cpu(*(__be64 *)skb->data)); + skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); + + __skb_pull(skb, 8); +} + static void nicvf_rcv_pkt_handler(struct net_device *netdev, struct napi_struct *napi, - struct cqe_rx_t *cqe_rx, struct snd_queue *sq) + struct cqe_rx_t *cqe_rx, + struct snd_queue *sq, struct rcv_queue *rq) { struct sk_buff *skb = NULL; struct nicvf *nic = netdev_priv(netdev); @@ -712,7 +786,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev, /* For XDP, ignore pkts spanning multiple pages */ if (nic->xdp_prog && (cqe_rx->rb_cnt == 1)) { /* Packet consumed by XDP */ - if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, &skb)) + if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, rq, &skb)) return; } else { skb = nicvf_get_rcv_skb(snic, cqe_rx, @@ -734,6 +808,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev, return; } + nicvf_set_rxtstamp(nic, skb); nicvf_set_rxhash(netdev, cqe_rx, skb); skb_record_rx_queue(skb, rq_idx); @@ -769,6 +844,7 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx, struct cqe_rx_t *cq_desc; struct netdev_queue *txq; struct snd_queue *sq = &qs->sq[cq_idx]; + struct rcv_queue *rq = &qs->rq[cq_idx]; unsigned int tx_pkts = 0, tx_bytes = 0, txq_idx; spin_lock_bh(&cq->lock); @@ -799,7 +875,7 @@ loop: switch (cq_desc->cqe_type) { case CQE_TYPE_RX: - nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq); + nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq, rq); work_done++; break; case CQE_TYPE_SEND: @@ -808,10 +884,12 @@ loop: &tx_pkts, &tx_bytes); tx_done++; break; + case CQE_TYPE_SEND_PTP: + nicvf_snd_ptp_handler(netdev, (void *)cq_desc); + break; case CQE_TYPE_INVALID: case CQE_TYPE_RX_SPLIT: case CQE_TYPE_RX_TCP: - case CQE_TYPE_SEND_PTP: /* Ignore for now */ break; } @@ -1307,12 +1385,28 @@ int nicvf_stop(struct net_device *netdev) nicvf_free_cq_poll(nic); + /* Free any pending SKB saved to receive timestamp */ + if (nic->ptp_skb) { + dev_kfree_skb_any(nic->ptp_skb); + nic->ptp_skb = NULL; + } + /* Clear multiqset info */ nic->pnicvf = nic; return 0; } +static int nicvf_config_hw_rx_tstamp(struct nicvf *nic, bool enable) +{ + union nic_mbx mbx = {}; + + mbx.ptp.msg = NIC_MBOX_MSG_PTP_CFG; + mbx.ptp.enable = enable; + + return nicvf_send_msg_to_pf(nic, &mbx); +} + static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu) { union nic_mbx mbx = {}; @@ -1382,6 +1476,12 @@ int nicvf_open(struct net_device *netdev) if (nic->sqs_mode) nicvf_get_primary_vf_struct(nic); + /* Configure PTP timestamp */ + if (nic->ptp_clock) + nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp); + atomic_set(&nic->tx_ptp_skbs, 0); + nic->ptp_skb = NULL; + /* Configure receive side scaling and MTU */ if (!nic->sqs_mode) { nicvf_rss_init(nic); @@ -1764,6 +1864,117 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp) } } +static int nicvf_xdp_xmit(struct net_device *netdev, struct xdp_buff *xdp) +{ + struct nicvf *nic = netdev_priv(netdev); + struct nicvf *snic = nic; + struct nicvf_xdp_tx *xdp_tx; + struct snd_queue *sq; + struct page *page; + int err, qidx; + + if (!netif_running(netdev) || !nic->xdp_prog) + return -EINVAL; + + page = virt_to_page(xdp->data); + xdp_tx = (struct nicvf_xdp_tx *)page_address(page); + qidx = xdp_tx->qidx; + + if (xdp_tx->qidx >= nic->xdp_tx_queues) + return -EINVAL; + + /* Get secondary Qset's info */ + if (xdp_tx->qidx >= MAX_SND_QUEUES_PER_QS) { + qidx = xdp_tx->qidx / MAX_SND_QUEUES_PER_QS; + snic = (struct nicvf *)nic->snicvf[qidx - 1]; + if (!snic) + return -EINVAL; + qidx = xdp_tx->qidx % MAX_SND_QUEUES_PER_QS; + } + + sq = &snic->qs->sq[qidx]; + err = nicvf_xdp_sq_append_pkt(snic, sq, (u64)xdp->data, + xdp_tx->dma_addr, + xdp->data_end - xdp->data); + if (err) + return -ENOMEM; + + nicvf_xdp_sq_doorbell(snic, sq, qidx); + return 0; +} + +static void nicvf_xdp_flush(struct net_device *dev) +{ + return; +} + +static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) +{ + struct hwtstamp_config config; + struct nicvf *nic = netdev_priv(netdev); + + if (!nic->ptp_clock) + return -ENODEV; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + /* reserved for future extensions */ + if (config.flags) + return -EINVAL; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + nic->hw_rx_tstamp = false; + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + nic->hw_rx_tstamp = true; + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + if (netif_running(netdev)) + nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp); + + if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) + return -EFAULT; + + return 0; +} + +static int nicvf_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) +{ + switch (cmd) { + case SIOCSHWTSTAMP: + return nicvf_config_hwtstamp(netdev, req); + default: + return -EOPNOTSUPP; + } +} + static const struct net_device_ops nicvf_netdev_ops = { .ndo_open = nicvf_open, .ndo_stop = nicvf_stop, @@ -1775,6 +1986,9 @@ static const struct net_device_ops nicvf_netdev_ops = { .ndo_fix_features = nicvf_fix_features, .ndo_set_features = nicvf_set_features, .ndo_bpf = nicvf_xdp, + .ndo_xdp_xmit = nicvf_xdp_xmit, + .ndo_xdp_flush = nicvf_xdp_flush, + .ndo_do_ioctl = nicvf_ioctl, }; static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -1784,6 +1998,16 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct nicvf *nic; int err, qcount; u16 sdevid; + struct cavium_ptp *ptp_clock; + + ptp_clock = cavium_ptp_get(); + if (IS_ERR(ptp_clock)) { + if (PTR_ERR(ptp_clock) == -ENODEV) + /* In virtualized environment we proceed without ptp */ + ptp_clock = NULL; + else + return PTR_ERR(ptp_clock); + } err = pci_enable_device(pdev); if (err) { @@ -1833,6 +2057,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) nic->pdev = pdev; nic->pnicvf = nic; nic->max_queues = qcount; + /* If no of CPUs are too low, there won't be any queues left + * for XDP_TX, hence double it. + */ + if (!nic->t88) + nic->max_queues *= 2; + nic->ptp_clock = ptp_clock; /* MAP VF's configuration registers */ nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); @@ -1946,6 +2176,7 @@ static void nicvf_remove(struct pci_dev *pdev) pci_set_drvdata(pdev, NULL); if (nic->drv_stats) free_percpu(nic->drv_stats); + cavium_ptp_put(nic->ptp_clock); free_netdev(netdev); pci_release_regions(pdev); pci_disable_device(pdev); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index a3d12dbde95b..3eae9ff9b53a 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -204,7 +204,7 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr, /* Reserve space for header modifications by BPF program */ if (rbdr->is_xdp) - buf_len += XDP_PACKET_HEADROOM; + buf_len += XDP_HEADROOM; /* Check if it's recycled */ if (pgcache) @@ -224,8 +224,9 @@ ret: nic->rb_page = NULL; return -ENOMEM; } + if (pgcache) - pgcache->dma_addr = *rbuf + XDP_PACKET_HEADROOM; + pgcache->dma_addr = *rbuf + XDP_HEADROOM; nic->rb_page_offset += buf_len; } @@ -759,6 +760,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, if (!rq->enable) { nicvf_reclaim_rcv_queue(nic, qs, qidx); + xdp_rxq_info_unreg(&rq->xdp_rxq); return; } @@ -771,6 +773,9 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, /* all writes of RBDR data to be loaded into L2 Cache as well*/ rq->caching = 1; + /* Driver have no proper error path for failed XDP RX-queue info reg */ + WARN_ON(xdp_rxq_info_reg(&rq->xdp_rxq, nic->netdev, qidx) < 0); + /* Send a mailbox msg to PF to config RQ */ mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; mbx.rq.qs_num = qs->vnic_id; @@ -977,6 +982,9 @@ void nicvf_qset_config(struct nicvf *nic, bool enable) qs_cfg->be = 1; #endif qs_cfg->vnic = qs->vnic_id; + /* Enable Tx timestamping capability */ + if (nic->ptp_clock) + qs_cfg->send_tstmp_ena = 1; } nicvf_send_msg_to_pf(nic, &mbx); } @@ -1236,7 +1244,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq, int qentry; if (subdesc_cnt > sq->xdp_free_cnt) - return 0; + return -1; qentry = nicvf_get_sq_desc(sq, subdesc_cnt); @@ -1247,7 +1255,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq, sq->xdp_desc_cnt += subdesc_cnt; - return 1; + return 0; } /* Calculate no of SQ subdescriptors needed to transmit all @@ -1384,6 +1392,29 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, hdr->inner_l3_offset = skb_network_offset(skb) - 2; this_cpu_inc(nic->pnicvf->drv_stats->tx_tso); } + + /* Check if timestamp is requested */ + if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + skb_tx_timestamp(skb); + return; + } + + /* Tx timestamping not supported along with TSO, so ignore request */ + if (skb_shinfo(skb)->gso_size) + return; + + /* HW supports only a single outstanding packet to timestamp */ + if (!atomic_add_unless(&nic->pnicvf->tx_ptp_skbs, 1, 1)) + return; + + /* Mark the SKB for later reference */ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + + /* Finally enable timestamp generation + * Since 'post_cqe' is also set, two CQEs will be posted + * for this packet i.e CQE_TYPE_SEND and CQE_TYPE_SEND_PTP. + */ + hdr->tstmp = 1; } /* SQ GATHER subdescriptor @@ -1625,7 +1656,7 @@ static void nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr, if (page_ref_count(page) != 1) return; - len += XDP_PACKET_HEADROOM; + len += XDP_HEADROOM; /* Receive buffers in XDP mode are mapped from page start */ dma_addr &= PAGE_MASK; } diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index 67d1a3230773..7d1e4e2aaad0 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -11,6 +11,8 @@ #include <linux/netdevice.h> #include <linux/iommu.h> +#include <linux/bpf.h> +#include <net/xdp.h> #include "q_struct.h" #define MAX_QUEUE_SET 128 @@ -92,6 +94,9 @@ #define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define RCV_BUF_HEADROOM 128 /* To store dma address for XDP redirect */ +#define XDP_HEADROOM (XDP_PACKET_HEADROOM + RCV_BUF_HEADROOM) + #define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \ MAX_CQE_PER_PKT_XMIT) @@ -251,6 +256,7 @@ struct rcv_queue { u8 start_qs_rbdr_idx; /* RBDR idx in the above QS */ u8 caching; struct rx_tx_queue_stats stats; + struct xdp_rxq_info xdp_rxq; } ____cacheline_aligned_in_smp; struct cmp_queue { diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 5e5c4d7796b8..91d34ea40e2c 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -21,7 +21,7 @@ #include "nic.h" #include "thunder_bgx.h" -#define DRV_NAME "thunder-BGX" +#define DRV_NAME "thunder_bgx" #define DRV_VERSION "1.0" struct lmac { @@ -245,6 +245,35 @@ void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable) } EXPORT_SYMBOL(bgx_lmac_rx_tx_enable); +/* Enables or disables timestamp insertion by BGX for Rx packets */ +void bgx_config_timestamping(int node, int bgx_idx, int lmacid, bool enable) +{ + struct bgx *bgx = get_bgx(node, bgx_idx); + struct lmac *lmac; + u64 csr_offset, cfg; + + if (!bgx) + return; + + lmac = &bgx->lmac[lmacid]; + + if (lmac->lmac_type == BGX_MODE_SGMII || + lmac->lmac_type == BGX_MODE_QSGMII || + lmac->lmac_type == BGX_MODE_RGMII) + csr_offset = BGX_GMP_GMI_RXX_FRM_CTL; + else + csr_offset = BGX_SMUX_RX_FRM_CTL; + + cfg = bgx_reg_read(bgx, lmacid, csr_offset); + + if (enable) + cfg |= BGX_PKT_RX_PTP_EN; + else + cfg &= ~BGX_PKT_RX_PTP_EN; + bgx_reg_write(bgx, lmacid, csr_offset, cfg); +} +EXPORT_SYMBOL(bgx_config_timestamping); + void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause) { struct pfc *pfc = (struct pfc *)pause; diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h index 23acdc5ab896..5a7567d31138 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h @@ -122,6 +122,8 @@ #define SPU_DBG_CTL_AN_NONCE_MCT_DIS BIT_ULL(29) #define BGX_SMUX_RX_INT 0x20000 +#define BGX_SMUX_RX_FRM_CTL 0x20020 +#define BGX_PKT_RX_PTP_EN BIT_ULL(12) #define BGX_SMUX_RX_JABBER 0x20030 #define BGX_SMUX_RX_CTL 0x20048 #define SMU_RX_CTL_STATUS (3ull << 0) @@ -172,6 +174,7 @@ #define GMI_PORT_CFG_SPEED_MSB BIT_ULL(8) #define GMI_PORT_CFG_RX_IDLE BIT_ULL(12) #define GMI_PORT_CFG_TX_IDLE BIT_ULL(13) +#define BGX_GMP_GMI_RXX_FRM_CTL 0x38028 #define BGX_GMP_GMI_RXX_JABBER 0x38038 #define BGX_GMP_GMI_TXX_THRESH 0x38210 #define BGX_GMP_GMI_TXX_APPEND 0x38218 @@ -223,6 +226,7 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac); void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status); void bgx_lmac_internal_loopback(int node, int bgx_idx, int lmac_idx, bool enable); +void bgx_config_timestamping(int node, int bgx_idx, int lmacid, bool enable); void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause); void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause); diff --git a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c index 578c7f8f11bf..2d5e8dab1f70 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c @@ -20,7 +20,7 @@ #include "nic.h" #include "thunder_bgx.h" -#define DRV_NAME "thunder-xcv" +#define DRV_NAME "thunder_xcv" #define DRV_VERSION "1.0" /* Register offsets */ diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig index 5713e83be08c..e2cdfa75673f 100644 --- a/drivers/net/ethernet/chelsio/Kconfig +++ b/drivers/net/ethernet/chelsio/Kconfig @@ -69,6 +69,7 @@ config CHELSIO_T4 depends on PCI && (IPV6 || IPV6=n) select FW_LOADER select MDIO + select ZLIB_DEFLATE ---help--- This driver supports Chelsio T4, T5 & T6 based gigabit, 10Gb Ethernet adapter and T5/T6 based 40Gb and T6 based 25Gb, 50Gb and 100Gb diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 6a015362c340..185fe8df7628 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -3304,6 +3304,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->ethtool_ops = &cxgb_ethtool_ops; netdev->min_mtu = 81; netdev->max_mtu = ETH_MAX_MTU; + netdev->dev_port = pi->port_id; } pci_set_drvdata(pdev, adapter); diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile index 8c9c6b0d2e5d..53b6a02c778e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/Makefile +++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile @@ -8,7 +8,7 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \ cxgb4_uld.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \ cxgb4_ptp.o cxgb4_tc_flower.o cxgb4_cudbg.o \ - cudbg_common.o cudbg_lib.o + cudbg_common.o cudbg_lib.o cudbg_zlib.o cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c index f78ba1743b5a..8edc49827af0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c @@ -19,7 +19,8 @@ #include "cudbg_if.h" #include "cudbg_lib_common.h" -int cudbg_get_buff(struct cudbg_buffer *pdbg_buff, u32 size, +int cudbg_get_buff(struct cudbg_init *pdbg_init, + struct cudbg_buffer *pdbg_buff, u32 size, struct cudbg_buffer *pin_buff) { u32 offset; @@ -28,17 +29,30 @@ int cudbg_get_buff(struct cudbg_buffer *pdbg_buff, u32 size, if (offset + size > pdbg_buff->size) return CUDBG_STATUS_NO_MEM; + if (pdbg_init->compress_type != CUDBG_COMPRESSION_NONE) { + if (size > pdbg_init->compress_buff_size) + return CUDBG_STATUS_NO_MEM; + + pin_buff->data = (char *)pdbg_init->compress_buff; + pin_buff->offset = 0; + pin_buff->size = size; + return 0; + } + pin_buff->data = (char *)pdbg_buff->data + offset; pin_buff->offset = offset; pin_buff->size = size; - pdbg_buff->size -= size; return 0; } -void cudbg_put_buff(struct cudbg_buffer *pin_buff, - struct cudbg_buffer *pdbg_buff) +void cudbg_put_buff(struct cudbg_init *pdbg_init, + struct cudbg_buffer *pin_buff) { - pdbg_buff->size += pin_buff->size; + /* Clear compression buffer for re-use */ + if (pdbg_init->compress_type != CUDBG_COMPRESSION_NONE) + memset(pdbg_init->compress_buff, 0, + pdbg_init->compress_buff_size); + pin_buff->data = NULL; pin_buff->offset = 0; pin_buff->size = 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index 605689957496..b57acb8dc35b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -18,17 +18,15 @@ #ifndef __CUDBG_ENTITY_H__ #define __CUDBG_ENTITY_H__ -#define EDC0_FLAG 3 -#define EDC1_FLAG 4 +#define EDC0_FLAG 0 +#define EDC1_FLAG 1 +#define MC_FLAG 2 +#define MC0_FLAG 3 +#define MC1_FLAG 4 +#define HMA_FLAG 5 #define CUDBG_ENTITY_SIGNATURE 0xCCEDB001 -struct card_mem { - u16 size_edc0; - u16 size_edc1; - u16 mem_flag; -}; - struct cudbg_mbox_log { struct mbox_cmd entry; u32 hi[MBOX_LEN / 8]; @@ -87,6 +85,48 @@ struct cudbg_tp_la { u8 data[0]; }; +static const char * const cudbg_region[] = { + "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:", + "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:", + "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:", + "TDDP region:", "TPT region:", "STAG region:", "RQ region:", + "RQUDP region:", "PBL region:", "TXPBL region:", + "DBVFIFO region:", "ULPRX state:", "ULPTX state:", + "On-chip queues:" +}; + +/* Memory region info relative to current memory (i.e. wrt 0). */ +struct cudbg_region_info { + bool exist; /* Does region exists in current memory? */ + u32 start; /* Start wrt 0 */ + u32 end; /* End wrt 0 */ +}; + +struct cudbg_mem_desc { + u32 base; + u32 limit; + u32 idx; +}; + +struct cudbg_meminfo { + struct cudbg_mem_desc avail[4]; + struct cudbg_mem_desc mem[ARRAY_SIZE(cudbg_region) + 3]; + u32 avail_c; + u32 mem_c; + u32 up_ram_lo; + u32 up_ram_hi; + u32 up_extmem2_lo; + u32 up_extmem2_hi; + u32 rx_pages_data[3]; + u32 tx_pages_data[4]; + u32 p_structs; + u32 reserved[12]; + u32 port_used[4]; + u32 port_alloc[4]; + u32 loopback_used[NCHAN]; + u32 loopback_alloc[NCHAN]; +}; + struct cudbg_cim_pif_la { int size; u8 data[0]; @@ -145,6 +185,7 @@ struct cudbg_tid_info_region_rev1 { u32 reserved[16]; }; +#define CUDBG_LOWMEM_MAX_CTXT_QIDS 256 #define CUDBG_MAX_FL_QIDS 1024 struct cudbg_ch_cntxt { @@ -334,6 +375,25 @@ static const u32 t5_pm_tx_array[][IREG_NUM_ELEM] = { {0x8FF0, 0x8FF4, 0x10021, 0x1D}, /* t5_pm_tx_regs_10021_to_1003c */ }; +#define CUDBG_NUM_PCIE_CONFIG_REGS 0x61 + +static const u32 t5_pcie_config_array[][2] = { + {0x0, 0x34}, + {0x3c, 0x40}, + {0x50, 0x64}, + {0x70, 0x80}, + {0x94, 0xa0}, + {0xb0, 0xb8}, + {0xd0, 0xd4}, + {0x100, 0x128}, + {0x140, 0x148}, + {0x150, 0x164}, + {0x170, 0x178}, + {0x180, 0x194}, + {0x1a0, 0x1b8}, + {0x1c0, 0x208}, +}; + static const u32 t6_ma_ireg_array[][IREG_NUM_ELEM] = { {0x78f8, 0x78fc, 0xa000, 23}, /* t6_ma_regs_a000_to_a016 */ {0x78f8, 0x78fc, 0xa400, 30}, /* t6_ma_regs_a400_to_a41e */ @@ -345,37 +405,55 @@ static const u32 t6_ma_ireg_array2[][IREG_NUM_ELEM] = { {0x78f8, 0x78fc, 0xe640, 13} /* t6_ma_regs_e640_to_e7c0 */ }; -static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM] = { - {0x7b50, 0x7b54, 0x2000, 0x20}, /* up_cim_2000_to_207c */ - {0x7b50, 0x7b54, 0x2080, 0x1d}, /* up_cim_2080_to_20fc */ - {0x7b50, 0x7b54, 0x00, 0x20}, /* up_cim_00_to_7c */ - {0x7b50, 0x7b54, 0x80, 0x20}, /* up_cim_80_to_fc */ - {0x7b50, 0x7b54, 0x100, 0x11}, /* up_cim_100_to_14c */ - {0x7b50, 0x7b54, 0x200, 0x10}, /* up_cim_200_to_23c */ - {0x7b50, 0x7b54, 0x240, 0x2}, /* up_cim_240_to_244 */ - {0x7b50, 0x7b54, 0x250, 0x2}, /* up_cim_250_to_254 */ - {0x7b50, 0x7b54, 0x260, 0x2}, /* up_cim_260_to_264 */ - {0x7b50, 0x7b54, 0x270, 0x2}, /* up_cim_270_to_274 */ - {0x7b50, 0x7b54, 0x280, 0x20}, /* up_cim_280_to_2fc */ - {0x7b50, 0x7b54, 0x300, 0x20}, /* up_cim_300_to_37c */ - {0x7b50, 0x7b54, 0x380, 0x14}, /* up_cim_380_to_3cc */ - -}; - -static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM] = { - {0x7b50, 0x7b54, 0x2000, 0x20}, /* up_cim_2000_to_207c */ - {0x7b50, 0x7b54, 0x2080, 0x19}, /* up_cim_2080_to_20ec */ - {0x7b50, 0x7b54, 0x00, 0x20}, /* up_cim_00_to_7c */ - {0x7b50, 0x7b54, 0x80, 0x20}, /* up_cim_80_to_fc */ - {0x7b50, 0x7b54, 0x100, 0x11}, /* up_cim_100_to_14c */ - {0x7b50, 0x7b54, 0x200, 0x10}, /* up_cim_200_to_23c */ - {0x7b50, 0x7b54, 0x240, 0x2}, /* up_cim_240_to_244 */ - {0x7b50, 0x7b54, 0x250, 0x2}, /* up_cim_250_to_254 */ - {0x7b50, 0x7b54, 0x260, 0x2}, /* up_cim_260_to_264 */ - {0x7b50, 0x7b54, 0x270, 0x2}, /* up_cim_270_to_274 */ - {0x7b50, 0x7b54, 0x280, 0x20}, /* up_cim_280_to_2fc */ - {0x7b50, 0x7b54, 0x300, 0x20}, /* up_cim_300_to_37c */ - {0x7b50, 0x7b54, 0x380, 0x14}, /* up_cim_380_to_3cc */ +static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = { + {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */ + {0x7b50, 0x7b54, 0x2080, 0x1d, 0}, /* up_cim_2080_to_20fc */ + {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */ + {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */ + {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */ + {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */ + {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */ + {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */ + {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */ + {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */ + {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ + {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ + {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ + {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */ + {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */ + {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */ + {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */ + {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */ + {0x7b50, 0x7b54, 0x2920, 0x10, 0x10}, /* up_cim_2920_to_2a10 */ + {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2a14 */ + {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */ + {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */ +}; + +static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = { + {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */ + {0x7b50, 0x7b54, 0x2080, 0x19, 0}, /* up_cim_2080_to_20ec */ + {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */ + {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */ + {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */ + {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */ + {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */ + {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */ + {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */ + {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */ + {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ + {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ + {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ + {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */ + {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */ + {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */ + {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */ + {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */ + {0x7b50, 0x7b54, 0x2918, 0x4, 0x4}, /* up_cim_2918_to_3d54 */ + {0x7b50, 0x7b54, 0x291c, 0x4, 0x4}, /* up_cim_291c_to_3d58 */ + {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2914 */ + {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */ + {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */ }; static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h index e10ff1ee62c5..8568a51f6414 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h @@ -21,6 +21,7 @@ /* Error codes */ #define CUDBG_STATUS_NO_MEM -19 #define CUDBG_STATUS_ENTITY_NOT_FOUND -24 +#define CUDBG_STATUS_NOT_IMPLEMENTED -28 #define CUDBG_SYSTEM_ERROR -29 #define CUDBG_STATUS_CCLK_NOT_DEFINED -32 @@ -47,6 +48,8 @@ enum cudbg_dbg_entity_type { CUDBG_CIM_OBQ_NCSI = 17, CUDBG_EDC0 = 18, CUDBG_EDC1 = 19, + CUDBG_MC0 = 20, + CUDBG_MC1 = 21, CUDBG_RSS = 22, CUDBG_RSS_VF_CONF = 25, CUDBG_PATH_MTU = 27, @@ -56,6 +59,7 @@ enum cudbg_dbg_entity_type { CUDBG_SGE_INDIRECT = 37, CUDBG_ULPRX_LA = 41, CUDBG_TP_LA = 43, + CUDBG_MEMINFO = 44, CUDBG_CIM_PIF_LA = 45, CUDBG_CLK = 46, CUDBG_CIM_OBQ_RXQ0 = 47, @@ -63,6 +67,7 @@ enum cudbg_dbg_entity_type { CUDBG_PCIE_INDIRECT = 50, CUDBG_PM_INDIRECT = 51, CUDBG_TID_INFO = 54, + CUDBG_PCIE_CONFIG = 55, CUDBG_DUMP_CONTEXT = 56, CUDBG_MPS_TCAM = 57, CUDBG_VPD_DATA = 58, @@ -74,6 +79,7 @@ enum cudbg_dbg_entity_type { CUDBG_PBT_TABLE = 65, CUDBG_MBOX_LOG = 66, CUDBG_HMA_INDIRECT = 67, + CUDBG_HMA = 68, CUDBG_MAX_ENTITY = 70, }; @@ -81,6 +87,10 @@ struct cudbg_init { struct adapter *adap; /* Pointer to adapter structure */ void *outbuf; /* Output buffer */ u32 outbuf_size; /* Output buffer size */ + u8 compress_type; /* Type of compression to use */ + void *compress_buff; /* Compression buffer */ + u32 compress_buff_size; /* Compression buffer size */ + void *workspace; /* Workspace for zlib */ }; static inline unsigned int cudbg_mbytes_to_bytes(unsigned int size) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index d699bf88d18f..557fd8bfd54e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -15,18 +15,65 @@ * */ +#include <linux/sort.h> + #include "t4_regs.h" #include "cxgb4.h" #include "cudbg_if.h" #include "cudbg_lib_common.h" -#include "cudbg_lib.h" #include "cudbg_entity.h" +#include "cudbg_lib.h" +#include "cudbg_zlib.h" -static void cudbg_write_and_release_buff(struct cudbg_buffer *pin_buff, - struct cudbg_buffer *dbg_buff) +static int cudbg_do_compression(struct cudbg_init *pdbg_init, + struct cudbg_buffer *pin_buff, + struct cudbg_buffer *dbg_buff) { - cudbg_update_buff(pin_buff, dbg_buff); - cudbg_put_buff(pin_buff, dbg_buff); + struct cudbg_buffer temp_in_buff = { 0 }; + int bytes_left, bytes_read, bytes; + u32 offset = dbg_buff->offset; + int rc; + + temp_in_buff.offset = pin_buff->offset; + temp_in_buff.data = pin_buff->data; + temp_in_buff.size = pin_buff->size; + + bytes_left = pin_buff->size; + bytes_read = 0; + while (bytes_left > 0) { + /* Do compression in smaller chunks */ + bytes = min_t(unsigned long, bytes_left, + (unsigned long)CUDBG_CHUNK_SIZE); + temp_in_buff.data = (char *)pin_buff->data + bytes_read; + temp_in_buff.size = bytes; + rc = cudbg_compress_buff(pdbg_init, &temp_in_buff, dbg_buff); + if (rc) + return rc; + bytes_left -= bytes; + bytes_read += bytes; + } + + pin_buff->size = dbg_buff->offset - offset; + return 0; +} + +static int cudbg_write_and_release_buff(struct cudbg_init *pdbg_init, + struct cudbg_buffer *pin_buff, + struct cudbg_buffer *dbg_buff) +{ + int rc = 0; + + if (pdbg_init->compress_type == CUDBG_COMPRESSION_NONE) { + cudbg_update_buff(pin_buff, dbg_buff); + } else { + rc = cudbg_do_compression(pdbg_init, pin_buff, dbg_buff); + if (rc) + goto out; + } + +out: + cudbg_put_buff(pdbg_init, pin_buff); + return rc; } static int is_fw_attached(struct cudbg_init *pdbg_init) @@ -84,6 +131,277 @@ static int cudbg_read_vpd_reg(struct adapter *padap, u32 addr, u32 len, return 0; } +static int cudbg_mem_desc_cmp(const void *a, const void *b) +{ + return ((const struct cudbg_mem_desc *)a)->base - + ((const struct cudbg_mem_desc *)b)->base; +} + +int cudbg_fill_meminfo(struct adapter *padap, + struct cudbg_meminfo *meminfo_buff) +{ + struct cudbg_mem_desc *md; + u32 lo, hi, used, alloc; + int n, i; + + memset(meminfo_buff->avail, 0, + ARRAY_SIZE(meminfo_buff->avail) * + sizeof(struct cudbg_mem_desc)); + memset(meminfo_buff->mem, 0, + (ARRAY_SIZE(cudbg_region) + 3) * sizeof(struct cudbg_mem_desc)); + md = meminfo_buff->mem; + + for (i = 0; i < ARRAY_SIZE(meminfo_buff->mem); i++) { + meminfo_buff->mem[i].limit = 0; + meminfo_buff->mem[i].idx = i; + } + + /* Find and sort the populated memory ranges */ + i = 0; + lo = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A); + if (lo & EDRAM0_ENABLE_F) { + hi = t4_read_reg(padap, MA_EDRAM0_BAR_A); + meminfo_buff->avail[i].base = + cudbg_mbytes_to_bytes(EDRAM0_BASE_G(hi)); + meminfo_buff->avail[i].limit = + meminfo_buff->avail[i].base + + cudbg_mbytes_to_bytes(EDRAM0_SIZE_G(hi)); + meminfo_buff->avail[i].idx = 0; + i++; + } + + if (lo & EDRAM1_ENABLE_F) { + hi = t4_read_reg(padap, MA_EDRAM1_BAR_A); + meminfo_buff->avail[i].base = + cudbg_mbytes_to_bytes(EDRAM1_BASE_G(hi)); + meminfo_buff->avail[i].limit = + meminfo_buff->avail[i].base + + cudbg_mbytes_to_bytes(EDRAM1_SIZE_G(hi)); + meminfo_buff->avail[i].idx = 1; + i++; + } + + if (is_t5(padap->params.chip)) { + if (lo & EXT_MEM0_ENABLE_F) { + hi = t4_read_reg(padap, MA_EXT_MEMORY0_BAR_A); + meminfo_buff->avail[i].base = + cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi)); + meminfo_buff->avail[i].limit = + meminfo_buff->avail[i].base + + cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi)); + meminfo_buff->avail[i].idx = 3; + i++; + } + + if (lo & EXT_MEM1_ENABLE_F) { + hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A); + meminfo_buff->avail[i].base = + cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi)); + meminfo_buff->avail[i].limit = + meminfo_buff->avail[i].base + + cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi)); + meminfo_buff->avail[i].idx = 4; + i++; + } + } else { + if (lo & EXT_MEM_ENABLE_F) { + hi = t4_read_reg(padap, MA_EXT_MEMORY_BAR_A); + meminfo_buff->avail[i].base = + cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi)); + meminfo_buff->avail[i].limit = + meminfo_buff->avail[i].base + + cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi)); + meminfo_buff->avail[i].idx = 2; + i++; + } + + if (lo & HMA_MUX_F) { + hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A); + meminfo_buff->avail[i].base = + cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi)); + meminfo_buff->avail[i].limit = + meminfo_buff->avail[i].base + + cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi)); + meminfo_buff->avail[i].idx = 5; + i++; + } + } + + if (!i) /* no memory available */ + return CUDBG_STATUS_ENTITY_NOT_FOUND; + + meminfo_buff->avail_c = i; + sort(meminfo_buff->avail, i, sizeof(struct cudbg_mem_desc), + cudbg_mem_desc_cmp, NULL); + (md++)->base = t4_read_reg(padap, SGE_DBQ_CTXT_BADDR_A); + (md++)->base = t4_read_reg(padap, SGE_IMSG_CTXT_BADDR_A); + (md++)->base = t4_read_reg(padap, SGE_FLM_CACHE_BADDR_A); + (md++)->base = t4_read_reg(padap, TP_CMM_TCB_BASE_A); + (md++)->base = t4_read_reg(padap, TP_CMM_MM_BASE_A); + (md++)->base = t4_read_reg(padap, TP_CMM_TIMER_BASE_A); + (md++)->base = t4_read_reg(padap, TP_CMM_MM_RX_FLST_BASE_A); + (md++)->base = t4_read_reg(padap, TP_CMM_MM_TX_FLST_BASE_A); + (md++)->base = t4_read_reg(padap, TP_CMM_MM_PS_FLST_BASE_A); + + /* the next few have explicit upper bounds */ + md->base = t4_read_reg(padap, TP_PMM_TX_BASE_A); + md->limit = md->base - 1 + + t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A) * + PMTXMAXPAGE_G(t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A)); + md++; + + md->base = t4_read_reg(padap, TP_PMM_RX_BASE_A); + md->limit = md->base - 1 + + t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) * + PMRXMAXPAGE_G(t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A)); + md++; + + if (t4_read_reg(padap, LE_DB_CONFIG_A) & HASHEN_F) { + if (CHELSIO_CHIP_VERSION(padap->params.chip) <= CHELSIO_T5) { + hi = t4_read_reg(padap, LE_DB_TID_HASHBASE_A) / 4; + md->base = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A); + } else { + hi = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A); + md->base = t4_read_reg(padap, + LE_DB_HASH_TBL_BASE_ADDR_A); + } + md->limit = 0; + } else { + md->base = 0; + md->idx = ARRAY_SIZE(cudbg_region); /* hide it */ + } + md++; + +#define ulp_region(reg) do { \ + md->base = t4_read_reg(padap, ULP_ ## reg ## _LLIMIT_A);\ + (md++)->limit = t4_read_reg(padap, ULP_ ## reg ## _ULIMIT_A);\ +} while (0) + + ulp_region(RX_ISCSI); + ulp_region(RX_TDDP); + ulp_region(TX_TPT); + ulp_region(RX_STAG); + ulp_region(RX_RQ); + ulp_region(RX_RQUDP); + ulp_region(RX_PBL); + ulp_region(TX_PBL); +#undef ulp_region + md->base = 0; + md->idx = ARRAY_SIZE(cudbg_region); + if (!is_t4(padap->params.chip)) { + u32 fifo_size = t4_read_reg(padap, SGE_DBVFIFO_SIZE_A); + u32 sge_ctrl = t4_read_reg(padap, SGE_CONTROL2_A); + u32 size = 0; + + if (is_t5(padap->params.chip)) { + if (sge_ctrl & VFIFO_ENABLE_F) + size = DBVFIFO_SIZE_G(fifo_size); + } else { + size = T6_DBVFIFO_SIZE_G(fifo_size); + } + + if (size) { + md->base = BASEADDR_G(t4_read_reg(padap, + SGE_DBVFIFO_BADDR_A)); + md->limit = md->base + (size << 2) - 1; + } + } + + md++; + + md->base = t4_read_reg(padap, ULP_RX_CTX_BASE_A); + md->limit = 0; + md++; + md->base = t4_read_reg(padap, ULP_TX_ERR_TABLE_BASE_A); + md->limit = 0; + md++; + + md->base = padap->vres.ocq.start; + if (padap->vres.ocq.size) + md->limit = md->base + padap->vres.ocq.size - 1; + else + md->idx = ARRAY_SIZE(cudbg_region); /* hide it */ + md++; + + /* add any address-space holes, there can be up to 3 */ + for (n = 0; n < i - 1; n++) + if (meminfo_buff->avail[n].limit < + meminfo_buff->avail[n + 1].base) + (md++)->base = meminfo_buff->avail[n].limit; + + if (meminfo_buff->avail[n].limit) + (md++)->base = meminfo_buff->avail[n].limit; + + n = md - meminfo_buff->mem; + meminfo_buff->mem_c = n; + + sort(meminfo_buff->mem, n, sizeof(struct cudbg_mem_desc), + cudbg_mem_desc_cmp, NULL); + + lo = t4_read_reg(padap, CIM_SDRAM_BASE_ADDR_A); + hi = t4_read_reg(padap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1; + meminfo_buff->up_ram_lo = lo; + meminfo_buff->up_ram_hi = hi; + + lo = t4_read_reg(padap, CIM_EXTMEM2_BASE_ADDR_A); + hi = t4_read_reg(padap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1; + meminfo_buff->up_extmem2_lo = lo; + meminfo_buff->up_extmem2_hi = hi; + + lo = t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A); + meminfo_buff->rx_pages_data[0] = PMRXMAXPAGE_G(lo); + meminfo_buff->rx_pages_data[1] = + t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) >> 10; + meminfo_buff->rx_pages_data[2] = (lo & PMRXNUMCHN_F) ? 2 : 1; + + lo = t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A); + hi = t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A); + meminfo_buff->tx_pages_data[0] = PMTXMAXPAGE_G(lo); + meminfo_buff->tx_pages_data[1] = + hi >= (1 << 20) ? (hi >> 20) : (hi >> 10); + meminfo_buff->tx_pages_data[2] = + hi >= (1 << 20) ? 'M' : 'K'; + meminfo_buff->tx_pages_data[3] = 1 << PMTXNUMCHN_G(lo); + + meminfo_buff->p_structs = t4_read_reg(padap, TP_CMM_MM_MAX_PSTRUCT_A); + + for (i = 0; i < 4; i++) { + if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) + lo = t4_read_reg(padap, + MPS_RX_MAC_BG_PG_CNT0_A + i * 4); + else + lo = t4_read_reg(padap, MPS_RX_PG_RSV0_A + i * 4); + if (is_t5(padap->params.chip)) { + used = T5_USED_G(lo); + alloc = T5_ALLOC_G(lo); + } else { + used = USED_G(lo); + alloc = ALLOC_G(lo); + } + meminfo_buff->port_used[i] = used; + meminfo_buff->port_alloc[i] = alloc; + } + + for (i = 0; i < padap->params.arch.nchan; i++) { + if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) + lo = t4_read_reg(padap, + MPS_RX_LPBK_BG_PG_CNT0_A + i * 4); + else + lo = t4_read_reg(padap, MPS_RX_PG_RSV4_A + i * 4); + if (is_t5(padap->params.chip)) { + used = T5_USED_G(lo); + alloc = T5_ALLOC_G(lo); + } else { + used = USED_G(lo); + alloc = ALLOC_G(lo); + } + meminfo_buff->loopback_used[i] = used; + meminfo_buff->loopback_alloc[i] = alloc; + } + + return 0; +} + int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err) @@ -98,12 +416,11 @@ int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init, else if (is_t5(padap->params.chip) || is_t6(padap->params.chip)) buf_size = T5_REGMAP_SIZE; - rc = cudbg_get_buff(dbg_buff, buf_size, &temp_buff); + rc = cudbg_get_buff(pdbg_init, dbg_buff, buf_size, &temp_buff); if (rc) return rc; t4_get_regs(padap, (void *)temp_buff.data, temp_buff.size); - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init, @@ -122,7 +439,7 @@ int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init, } dparams = &padap->params.devlog; - rc = cudbg_get_buff(dbg_buff, dparams->size, &temp_buff); + rc = cudbg_get_buff(pdbg_init, dbg_buff, dparams->size, &temp_buff); if (rc) return rc; @@ -137,12 +454,11 @@ int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init, spin_unlock(&padap->win0_lock); if (rc) { cudbg_err->sys_err = rc; - cudbg_put_buff(&temp_buff, dbg_buff); + cudbg_put_buff(pdbg_init, &temp_buff); return rc; } } - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } int cudbg_collect_cim_la(struct cudbg_init *pdbg_init, @@ -163,14 +479,14 @@ int cudbg_collect_cim_la(struct cudbg_init *pdbg_init, } size += sizeof(cfg); - rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); if (rc) return rc; rc = t4_cim_read(padap, UP_UP_DBG_LA_CFG_A, 1, &cfg); if (rc) { cudbg_err->sys_err = rc; - cudbg_put_buff(&temp_buff, dbg_buff); + cudbg_put_buff(pdbg_init, &temp_buff); return rc; } @@ -180,11 +496,10 @@ int cudbg_collect_cim_la(struct cudbg_init *pdbg_init, NULL); if (rc < 0) { cudbg_err->sys_err = rc; - cudbg_put_buff(&temp_buff, dbg_buff); + cudbg_put_buff(pdbg_init, &temp_buff); return rc; } - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init, @@ -196,7 +511,7 @@ int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init, int size, rc; size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32); - rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); if (rc) return rc; @@ -204,8 +519,7 @@ int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init, (u32 *)temp_buff.data, (u32 *)((char *)temp_buff.data + 5 * CIM_MALA_SIZE)); - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init, @@ -217,7 +531,7 @@ int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init, struct cudbg_cim_qcfg *cim_qcfg_data; int rc; - rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_cim_qcfg), + rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_cim_qcfg), &temp_buff); if (rc) return rc; @@ -228,7 +542,7 @@ int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init, ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat); if (rc) { cudbg_err->sys_err = rc; - cudbg_put_buff(&temp_buff, dbg_buff); + cudbg_put_buff(pdbg_init, &temp_buff); return rc; } @@ -237,14 +551,13 @@ int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init, cim_qcfg_data->obq_wr); if (rc) { cudbg_err->sys_err = rc; - cudbg_put_buff(&temp_buff, dbg_buff); + cudbg_put_buff(pdbg_init, &temp_buff); return rc; } t4_read_cimq_cfg(padap, cim_qcfg_data->base, cim_qcfg_data->size, cim_qcfg_data->thres); - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init, @@ -258,7 +571,7 @@ static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init, /* collect CIM IBQ */ qsize = CIM_IBQ_SIZE * 4 * sizeof(u32); - rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff); + rc = cudbg_get_buff(pdbg_init, dbg_buff, qsize, &temp_buff); if (rc) return rc; @@ -272,11 +585,10 @@ static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init, else rc = no_of_read_words; cudbg_err->sys_err = rc; - cudbg_put_buff(&temp_buff, dbg_buff); + cudbg_put_buff(pdbg_init, &temp_buff); return rc; } - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init, @@ -343,7 +655,7 @@ static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init, /* collect CIM OBQ */ qsize = cudbg_cim_obq_size(padap, qid); - rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff); + rc = cudbg_get_buff(pdbg_init, dbg_buff, qsize, &temp_buff); if (rc) return rc; @@ -357,11 +669,10 @@ static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init, else rc = no_of_read_words; cudbg_err->sys_err = rc; - cudbg_put_buff(&temp_buff, dbg_buff); + cudbg_put_buff(pdbg_init, &temp_buff); return rc; } - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init, @@ -420,23 +731,211 @@ int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init, return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7); } +static int cudbg_meminfo_get_mem_index(struct adapter *padap, + struct cudbg_meminfo *mem_info, + u8 mem_type, u8 *idx) +{ + u8 i, flag; + + switch (mem_type) { + case MEM_EDC0: + flag = EDC0_FLAG; + break; + case MEM_EDC1: + flag = EDC1_FLAG; + break; + case MEM_MC0: + /* Some T5 cards have both MC0 and MC1. */ + flag = is_t5(padap->params.chip) ? MC0_FLAG : MC_FLAG; + break; + case MEM_MC1: + flag = MC1_FLAG; + break; + case MEM_HMA: + flag = HMA_FLAG; + break; + default: + return CUDBG_STATUS_ENTITY_NOT_FOUND; + } + + for (i = 0; i < mem_info->avail_c; i++) { + if (mem_info->avail[i].idx == flag) { + *idx = i; + return 0; + } + } + + return CUDBG_STATUS_ENTITY_NOT_FOUND; +} + +/* Fetch the @region_name's start and end from @meminfo. */ +static int cudbg_get_mem_region(struct adapter *padap, + struct cudbg_meminfo *meminfo, + u8 mem_type, const char *region_name, + struct cudbg_mem_desc *mem_desc) +{ + u8 mc, found = 0; + u32 i, idx = 0; + int rc; + + rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc); + if (rc) + return rc; + + for (i = 0; i < ARRAY_SIZE(cudbg_region); i++) { + if (!strcmp(cudbg_region[i], region_name)) { + found = 1; + idx = i; + break; + } + } + if (!found) + return -EINVAL; + + found = 0; + for (i = 0; i < meminfo->mem_c; i++) { + if (meminfo->mem[i].idx >= ARRAY_SIZE(cudbg_region)) + continue; /* Skip holes */ + + if (!(meminfo->mem[i].limit)) + meminfo->mem[i].limit = + i < meminfo->mem_c - 1 ? + meminfo->mem[i + 1].base - 1 : ~0; + + if (meminfo->mem[i].idx == idx) { + /* Check if the region exists in @mem_type memory */ + if (meminfo->mem[i].base < meminfo->avail[mc].base && + meminfo->mem[i].limit < meminfo->avail[mc].base) + return -EINVAL; + + if (meminfo->mem[i].base > meminfo->avail[mc].limit) + return -EINVAL; + + memcpy(mem_desc, &meminfo->mem[i], + sizeof(struct cudbg_mem_desc)); + found = 1; + break; + } + } + if (!found) + return -EINVAL; + + return 0; +} + +/* Fetch and update the start and end of the requested memory region w.r.t 0 + * in the corresponding EDC/MC/HMA. + */ +static int cudbg_get_mem_relative(struct adapter *padap, + struct cudbg_meminfo *meminfo, + u8 mem_type, u32 *out_base, u32 *out_end) +{ + u8 mc_idx; + int rc; + + rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc_idx); + if (rc) + return rc; + + if (*out_base < meminfo->avail[mc_idx].base) + *out_base = 0; + else + *out_base -= meminfo->avail[mc_idx].base; + + if (*out_end > meminfo->avail[mc_idx].limit) + *out_end = meminfo->avail[mc_idx].limit; + else + *out_end -= meminfo->avail[mc_idx].base; + + return 0; +} + +/* Get TX and RX Payload region */ +static int cudbg_get_payload_range(struct adapter *padap, u8 mem_type, + const char *region_name, + struct cudbg_region_info *payload) +{ + struct cudbg_mem_desc mem_desc = { 0 }; + struct cudbg_meminfo meminfo; + int rc; + + rc = cudbg_fill_meminfo(padap, &meminfo); + if (rc) + return rc; + + rc = cudbg_get_mem_region(padap, &meminfo, mem_type, region_name, + &mem_desc); + if (rc) { + payload->exist = false; + return 0; + } + + payload->exist = true; + payload->start = mem_desc.base; + payload->end = mem_desc.limit; + + return cudbg_get_mem_relative(padap, &meminfo, mem_type, + &payload->start, &payload->end); +} + +#define CUDBG_YIELD_ITERATION 256 + static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, u8 mem_type, unsigned long tot_len, struct cudbg_error *cudbg_err) { + static const char * const region_name[] = { "Tx payload:", + "Rx payload:" }; unsigned long bytes, bytes_left, bytes_read = 0; struct adapter *padap = pdbg_init->adap; struct cudbg_buffer temp_buff = { 0 }; + struct cudbg_region_info payload[2]; + u32 yield_count = 0; int rc = 0; + u8 i; + + /* Get TX/RX Payload region range if they exist */ + memset(payload, 0, sizeof(payload)); + for (i = 0; i < ARRAY_SIZE(region_name); i++) { + rc = cudbg_get_payload_range(padap, mem_type, region_name[i], + &payload[i]); + if (rc) + return rc; + + if (payload[i].exist) { + /* Align start and end to avoid wrap around */ + payload[i].start = roundup(payload[i].start, + CUDBG_CHUNK_SIZE); + payload[i].end = rounddown(payload[i].end, + CUDBG_CHUNK_SIZE); + } + } bytes_left = tot_len; while (bytes_left > 0) { + /* As MC size is huge and read through PIO access, this + * loop will hold cpu for a longer time. OS may think that + * the process is hanged and will generate CPU stall traces. + * So yield the cpu regularly. + */ + yield_count++; + if (!(yield_count % CUDBG_YIELD_ITERATION)) + schedule(); + bytes = min_t(unsigned long, bytes_left, (unsigned long)CUDBG_CHUNK_SIZE); - rc = cudbg_get_buff(dbg_buff, bytes, &temp_buff); + rc = cudbg_get_buff(pdbg_init, dbg_buff, bytes, &temp_buff); if (rc) return rc; + + for (i = 0; i < ARRAY_SIZE(payload); i++) + if (payload[i].exist && + bytes_read >= payload[i].start && + bytes_read + bytes <= payload[i].end) + /* TX and RX Payload regions can't overlap */ + goto skip_read; + spin_lock(&padap->win0_lock); rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type, bytes_read, bytes, @@ -445,37 +944,23 @@ static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init, spin_unlock(&padap->win0_lock); if (rc) { cudbg_err->sys_err = rc; - cudbg_put_buff(&temp_buff, dbg_buff); + cudbg_put_buff(pdbg_init, &temp_buff); return rc; } + +skip_read: bytes_left -= bytes; bytes_read += bytes; - cudbg_write_and_release_buff(&temp_buff, dbg_buff); + rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff, + dbg_buff); + if (rc) { + cudbg_put_buff(pdbg_init, &temp_buff); + return rc; + } } return rc; } -static void cudbg_collect_mem_info(struct cudbg_init *pdbg_init, - struct card_mem *mem_info) -{ - struct adapter *padap = pdbg_init->adap; - u32 value; - - value = t4_read_reg(padap, MA_EDRAM0_BAR_A); - value = EDRAM0_SIZE_G(value); - mem_info->size_edc0 = (u16)value; - - value = t4_read_reg(padap, MA_EDRAM1_BAR_A); - value = EDRAM1_SIZE_G(value); - mem_info->size_edc1 = (u16)value; - - value = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A); - if (value & EDRAM0_ENABLE_F) - mem_info->mem_flag |= (1 << EDC0_FLAG); - if (value & EDRAM1_ENABLE_F) - mem_info->mem_flag |= (1 << EDC1_FLAG); -} - static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init, struct cudbg_error *cudbg_err) { @@ -495,37 +980,25 @@ static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init, struct cudbg_error *cudbg_err, u8 mem_type) { - struct card_mem mem_info = {0}; - unsigned long flag, size; + struct adapter *padap = pdbg_init->adap; + struct cudbg_meminfo mem_info; + unsigned long size; + u8 mc_idx; int rc; + memset(&mem_info, 0, sizeof(struct cudbg_meminfo)); + rc = cudbg_fill_meminfo(padap, &mem_info); + if (rc) + return rc; + cudbg_t4_fwcache(pdbg_init, cudbg_err); - cudbg_collect_mem_info(pdbg_init, &mem_info); - switch (mem_type) { - case MEM_EDC0: - flag = (1 << EDC0_FLAG); - size = cudbg_mbytes_to_bytes(mem_info.size_edc0); - break; - case MEM_EDC1: - flag = (1 << EDC1_FLAG); - size = cudbg_mbytes_to_bytes(mem_info.size_edc1); - break; - default: - rc = CUDBG_STATUS_ENTITY_NOT_FOUND; - goto err; - } + rc = cudbg_meminfo_get_mem_index(padap, &mem_info, mem_type, &mc_idx); + if (rc) + return rc; - if (mem_info.mem_flag & flag) { - rc = cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, - size, cudbg_err); - if (rc) - goto err; - } else { - rc = CUDBG_STATUS_ENTITY_NOT_FOUND; - goto err; - } -err: - return rc; + size = mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base; + return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size, + cudbg_err); } int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init, @@ -544,26 +1017,51 @@ int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init, MEM_EDC1); } +int cudbg_collect_mc0_meminfo(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err, + MEM_MC0); +} + +int cudbg_collect_mc1_meminfo(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err, + MEM_MC1); +} + +int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err, + MEM_HMA); +} + int cudbg_collect_rss(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err) { struct adapter *padap = pdbg_init->adap; struct cudbg_buffer temp_buff = { 0 }; - int rc; + int rc, nentries; - rc = cudbg_get_buff(dbg_buff, RSS_NENTRIES * sizeof(u16), &temp_buff); + nentries = t4_chip_rss_size(padap); + rc = cudbg_get_buff(pdbg_init, dbg_buff, nentries * sizeof(u16), + &temp_buff); if (rc) return rc; rc = t4_read_rss(padap, (u16 *)temp_buff.data); if (rc) { cudbg_err->sys_err = rc; - cudbg_put_buff(&temp_buff, dbg_buff); + cudbg_put_buff(pdbg_init, &temp_buff); return rc; } - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init, @@ -576,7 +1074,7 @@ int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init, int vf, rc, vf_count; vf_count = padap->params.arch.vfcount; - rc = cudbg_get_buff(dbg_buff, + rc = cudbg_get_buff(pdbg_init, dbg_buff, vf_count * sizeof(struct cudbg_rss_vf_conf), &temp_buff); if (rc) @@ -586,8 +1084,7 @@ int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init, for (vf = 0; vf < vf_count; vf++) t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl, &vfconf[vf].rss_vf_vfh, true); - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init, @@ -598,13 +1095,13 @@ int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init, struct cudbg_buffer temp_buff = { 0 }; int rc; - rc = cudbg_get_buff(dbg_buff, NMTUS * sizeof(u16), &temp_buff); + rc = cudbg_get_buff(pdbg_init, dbg_buff, NMTUS * sizeof(u16), + &temp_buff); if (rc) return rc; t4_read_mtu_tbl(padap, (u16 *)temp_buff.data, NULL); - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init, @@ -616,7 +1113,7 @@ int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init, struct cudbg_pm_stats *pm_stats_buff; int rc; - rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pm_stats), + rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_pm_stats), &temp_buff); if (rc) return rc; @@ -624,8 +1121,7 @@ int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init, pm_stats_buff = (struct cudbg_pm_stats *)temp_buff.data; t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc); t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc); - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init, @@ -640,7 +1136,7 @@ int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init, if (!padap->params.vpd.cclk) return CUDBG_STATUS_CCLK_NOT_DEFINED; - rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_hw_sched), + rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_hw_sched), &temp_buff); hw_sched_buff = (struct cudbg_hw_sched *)temp_buff.data; hw_sched_buff->map = t4_read_reg(padap, TP_TX_MOD_QUEUE_REQ_MAP_A); @@ -649,8 +1145,7 @@ int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init, for (i = 0; i < NTX_SCHED; ++i) t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i], &hw_sched_buff->ipg[i], true); - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init, @@ -674,7 +1169,7 @@ int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init, n = n / (IREG_NUM_ELEM * sizeof(u32)); size = sizeof(struct ireg_buf) * n; - rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); if (rc) return rc; @@ -763,8 +1258,7 @@ int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init, tp_pio->ireg_local_offset, true); ch_tp_pio++; } - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init, @@ -776,7 +1270,8 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init, struct ireg_buf *ch_sge_dbg; int i, rc; - rc = cudbg_get_buff(dbg_buff, sizeof(*ch_sge_dbg) * 2, &temp_buff); + rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(*ch_sge_dbg) * 2, + &temp_buff); if (rc) return rc; @@ -797,8 +1292,7 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init, sge_pio->ireg_local_offset); ch_sge_dbg++; } - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init, @@ -810,7 +1304,7 @@ int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init, struct cudbg_ulprx_la *ulprx_la_buff; int rc; - rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulprx_la), + rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulprx_la), &temp_buff); if (rc) return rc; @@ -818,8 +1312,7 @@ int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init, ulprx_la_buff = (struct cudbg_ulprx_la *)temp_buff.data; t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data); ulprx_la_buff->size = ULPRX_LA_SIZE; - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } int cudbg_collect_tp_la(struct cudbg_init *pdbg_init, @@ -832,15 +1325,39 @@ int cudbg_collect_tp_la(struct cudbg_init *pdbg_init, int size, rc; size = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64); - rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); if (rc) return rc; tp_la_buff = (struct cudbg_tp_la *)temp_buff.data; tp_la_buff->mode = DBGLAMODE_G(t4_read_reg(padap, TP_DBG_LA_CONFIG_A)); t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL); - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); +} + +int cudbg_collect_meminfo(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + struct cudbg_meminfo *meminfo_buff; + int rc; + + rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_meminfo), + &temp_buff); + if (rc) + return rc; + + meminfo_buff = (struct cudbg_meminfo *)temp_buff.data; + rc = cudbg_fill_meminfo(padap, meminfo_buff); + if (rc) { + cudbg_err->sys_err = rc; + cudbg_put_buff(pdbg_init, &temp_buff); + return rc; + } + + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init, @@ -854,7 +1371,7 @@ int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init, size = sizeof(struct cudbg_cim_pif_la) + 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32); - rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); if (rc) return rc; @@ -863,8 +1380,7 @@ int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init, t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data, (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE, NULL, NULL); - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } int cudbg_collect_clk_info(struct cudbg_init *pdbg_init, @@ -880,7 +1396,7 @@ int cudbg_collect_clk_info(struct cudbg_init *pdbg_init, if (!padap->params.vpd.cclk) return CUDBG_STATUS_CCLK_NOT_DEFINED; - rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_clk_info), + rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_clk_info), &temp_buff); if (rc) return rc; @@ -912,8 +1428,7 @@ int cudbg_collect_clk_info(struct cudbg_init *pdbg_init, clk_info_buff->finwait2_timer = tp_tick_us * t4_read_reg(padap, TP_FINWAIT2_TIMER_A); - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init, @@ -928,7 +1443,7 @@ int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init, n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32)); size = sizeof(struct ireg_buf) * n * 2; - rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); if (rc) return rc; @@ -969,8 +1484,7 @@ int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init, pcie_pio->ireg_local_offset); ch_pcie++; } - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init, @@ -985,7 +1499,7 @@ int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init, n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32)); size = sizeof(struct ireg_buf) * n * 2; - rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); if (rc) return rc; @@ -1026,8 +1540,7 @@ int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init, pm_pio->ireg_local_offset); ch_pm++; } - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } int cudbg_collect_tid(struct cudbg_init *pdbg_init, @@ -1041,7 +1554,8 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init, u32 para[2], val[2]; int rc; - rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_tid_info_region_rev1), + rc = cudbg_get_buff(pdbg_init, dbg_buff, + sizeof(struct cudbg_tid_info_region_rev1), &temp_buff); if (rc) return rc; @@ -1053,6 +1567,12 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init, tid1->ver_hdr.size = sizeof(struct cudbg_tid_info_region_rev1) - sizeof(struct cudbg_ver_hdr); + /* If firmware is not attached/alive, use backdoor register + * access to collect dump. + */ + if (!is_fw_attached(pdbg_init)) + goto fill_tid; + #define FW_PARAM_PFVF_A(param) \ (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \ @@ -1064,7 +1584,7 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init, rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2, para, val); if (rc < 0) { cudbg_err->sys_err = rc; - cudbg_put_buff(&temp_buff, dbg_buff); + cudbg_put_buff(pdbg_init, &temp_buff); return rc; } tid->uotid_base = val[0]; @@ -1083,13 +1603,16 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init, para, val); if (rc < 0) { cudbg_err->sys_err = rc; - cudbg_put_buff(&temp_buff, dbg_buff); + cudbg_put_buff(pdbg_init, &temp_buff); return rc; } tid->hpftid_base = val[0]; tid->nhpftids = val[1] - val[0] + 1; } +#undef FW_PARAM_PFVF_A + +fill_tid: tid->ntids = padap->tids.ntids; tid->nstids = padap->tids.nstids; tid->stid_base = padap->tids.stid_base; @@ -1109,28 +1632,137 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init, tid->ip_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV4_A); tid->ipv6_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV6_A); -#undef FW_PARAM_PFVF_A + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); +} - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; +int cudbg_collect_pcie_config(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + u32 size, *value, j; + int i, rc, n; + + size = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS; + n = sizeof(t5_pcie_config_array) / (2 * sizeof(u32)); + rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); + if (rc) + return rc; + + value = (u32 *)temp_buff.data; + for (i = 0; i < n; i++) { + for (j = t5_pcie_config_array[i][0]; + j <= t5_pcie_config_array[i][1]; j += 4) { + t4_hw_pci_read_cfg4(padap, j, value); + value++; + } + } + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } -int cudbg_dump_context_size(struct adapter *padap) +static int cudbg_sge_ctxt_check_valid(u32 *buf, int type) +{ + int index, bit, bit_pos = 0; + + switch (type) { + case CTXT_EGRESS: + bit_pos = 176; + break; + case CTXT_INGRESS: + bit_pos = 141; + break; + case CTXT_FLM: + bit_pos = 89; + break; + } + index = bit_pos / 32; + bit = bit_pos % 32; + return buf[index] & (1U << bit); +} + +static int cudbg_get_ctxt_region_info(struct adapter *padap, + struct cudbg_region_info *ctx_info, + u8 *mem_type) { - u32 value, size; + struct cudbg_mem_desc mem_desc; + struct cudbg_meminfo meminfo; + u32 i, j, value, found; u8 flq; + int rc; + + rc = cudbg_fill_meminfo(padap, &meminfo); + if (rc) + return rc; + + /* Get EGRESS and INGRESS context region size */ + for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) { + found = 0; + memset(&mem_desc, 0, sizeof(struct cudbg_mem_desc)); + for (j = 0; j < ARRAY_SIZE(meminfo.avail); j++) { + rc = cudbg_get_mem_region(padap, &meminfo, j, + cudbg_region[i], + &mem_desc); + if (!rc) { + found = 1; + rc = cudbg_get_mem_relative(padap, &meminfo, j, + &mem_desc.base, + &mem_desc.limit); + if (rc) { + ctx_info[i].exist = false; + break; + } + ctx_info[i].exist = true; + ctx_info[i].start = mem_desc.base; + ctx_info[i].end = mem_desc.limit; + mem_type[i] = j; + break; + } + } + if (!found) + ctx_info[i].exist = false; + } + /* Get FLM and CNM max qid. */ value = t4_read_reg(padap, SGE_FLM_CFG_A); /* Get number of data freelist queues */ flq = HDRSTARTFLQ_G(value); - size = CUDBG_MAX_FL_QIDS >> flq; + ctx_info[CTXT_FLM].exist = true; + ctx_info[CTXT_FLM].end = (CUDBG_MAX_FL_QIDS >> flq) * SGE_CTXT_SIZE; - /* Add extra space for congestion manager contexts. - * The number of CONM contexts are same as number of freelist + /* The number of CONM contexts are same as number of freelist * queues. */ - size += size; + ctx_info[CTXT_CNM].exist = true; + ctx_info[CTXT_CNM].end = ctx_info[CTXT_FLM].end; + + return 0; +} + +int cudbg_dump_context_size(struct adapter *padap) +{ + struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} }; + u8 mem_type[CTXT_INGRESS + 1] = { 0 }; + u32 i, size = 0; + int rc; + + /* Get max valid qid for each type of queue */ + rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type); + if (rc) + return rc; + + for (i = 0; i < CTXT_CNM; i++) { + if (!region_info[i].exist) { + if (i == CTXT_EGRESS || i == CTXT_INGRESS) + size += CUDBG_LOWMEM_MAX_CTXT_QIDS * + SGE_CTXT_SIZE; + continue; + } + + size += (region_info[i].end - region_info[i].start + 1) / + SGE_CTXT_SIZE; + } return size * sizeof(struct cudbg_ch_cntxt); } @@ -1153,44 +1785,144 @@ static void cudbg_read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid, t4_sge_ctxt_rd_bd(padap, cid, ctype, data); } +static void cudbg_get_sge_ctxt_fw(struct cudbg_init *pdbg_init, u32 max_qid, + u8 ctxt_type, + struct cudbg_ch_cntxt **out_buff) +{ + struct cudbg_ch_cntxt *buff = *out_buff; + int rc; + u32 j; + + for (j = 0; j < max_qid; j++) { + cudbg_read_sge_ctxt(pdbg_init, j, ctxt_type, buff->data); + rc = cudbg_sge_ctxt_check_valid(buff->data, ctxt_type); + if (!rc) + continue; + + buff->cntxt_type = ctxt_type; + buff->cntxt_id = j; + buff++; + if (ctxt_type == CTXT_FLM) { + cudbg_read_sge_ctxt(pdbg_init, j, CTXT_CNM, buff->data); + buff->cntxt_type = CTXT_CNM; + buff->cntxt_id = j; + buff++; + } + } + + *out_buff = buff; +} + int cudbg_collect_dump_context(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err) { + struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} }; struct adapter *padap = pdbg_init->adap; + u32 j, size, max_ctx_size, max_ctx_qid; + u8 mem_type[CTXT_INGRESS + 1] = { 0 }; struct cudbg_buffer temp_buff = { 0 }; struct cudbg_ch_cntxt *buff; - u32 size, i = 0; + u64 *dst_off, *src_off; + u8 *ctx_buf; + u8 i, k; int rc; + /* Get max valid qid for each type of queue */ + rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type); + if (rc) + return rc; + rc = cudbg_dump_context_size(padap); if (rc <= 0) return CUDBG_STATUS_ENTITY_NOT_FOUND; size = rc; - rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); if (rc) return rc; + /* Get buffer with enough space to read the biggest context + * region in memory. + */ + max_ctx_size = max(region_info[CTXT_EGRESS].end - + region_info[CTXT_EGRESS].start + 1, + region_info[CTXT_INGRESS].end - + region_info[CTXT_INGRESS].start + 1); + + ctx_buf = kvzalloc(max_ctx_size, GFP_KERNEL); + if (!ctx_buf) { + cudbg_put_buff(pdbg_init, &temp_buff); + return -ENOMEM; + } + buff = (struct cudbg_ch_cntxt *)temp_buff.data; - while (size > 0) { - buff->cntxt_type = CTXT_FLM; - buff->cntxt_id = i; - cudbg_read_sge_ctxt(pdbg_init, i, CTXT_FLM, buff->data); - buff++; - size -= sizeof(struct cudbg_ch_cntxt); - buff->cntxt_type = CTXT_CNM; - buff->cntxt_id = i; - cudbg_read_sge_ctxt(pdbg_init, i, CTXT_CNM, buff->data); - buff++; - size -= sizeof(struct cudbg_ch_cntxt); + /* Collect EGRESS and INGRESS context data. + * In case of failures, fallback to collecting via FW or + * backdoor access. + */ + for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) { + if (!region_info[i].exist) { + max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS; + cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i, + &buff); + continue; + } - i++; + max_ctx_size = region_info[i].end - region_info[i].start + 1; + max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE; + + /* If firmware is not attached/alive, use backdoor register + * access to collect dump. + */ + if (is_fw_attached(pdbg_init)) { + t4_sge_ctxt_flush(padap, padap->mbox, i); + + rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type[i], + region_info[i].start, max_ctx_size, + (__be32 *)ctx_buf, 1); + } + + if (rc || !is_fw_attached(pdbg_init)) { + max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS; + cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i, + &buff); + continue; + } + + for (j = 0; j < max_ctx_qid; j++) { + src_off = (u64 *)(ctx_buf + j * SGE_CTXT_SIZE); + dst_off = (u64 *)buff->data; + + /* The data is stored in 64-bit cpu order. Convert it + * to big endian before parsing. + */ + for (k = 0; k < SGE_CTXT_SIZE / sizeof(u64); k++) + dst_off[k] = cpu_to_be64(src_off[k]); + + rc = cudbg_sge_ctxt_check_valid(buff->data, i); + if (!rc) + continue; + + buff->cntxt_type = i; + buff->cntxt_id = j; + buff++; + } } - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + kvfree(ctx_buf); + + /* Collect FREELIST and CONGESTION MANAGER contexts */ + max_ctx_size = region_info[CTXT_FLM].end - + region_info[CTXT_FLM].start + 1; + max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE; + /* Since FLM and CONM are 1-to-1 mapped, the below function + * will fetch both FLM and CONM contexts. + */ + cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, CTXT_FLM, &buff); + + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask) @@ -1228,9 +1960,10 @@ static void cudbg_mps_rpl_backdoor(struct adapter *padap, mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP0_A)); } -static int cudbg_collect_tcam_index(struct adapter *padap, +static int cudbg_collect_tcam_index(struct cudbg_init *pdbg_init, struct cudbg_mps_tcam *tcam, u32 idx) { + struct adapter *padap = pdbg_init->adap; u64 tcamy, tcamx, val; u32 ctl, data2; int rc = 0; @@ -1315,12 +2048,22 @@ static int cudbg_collect_tcam_index(struct adapter *padap, htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) | FW_LDST_CMD_IDX_V(idx)); - rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd, sizeof(ldst_cmd), - &ldst_cmd); - if (rc) + /* If firmware is not attached/alive, use backdoor register + * access to collect dump. + */ + if (is_fw_attached(pdbg_init)) + rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd, + sizeof(ldst_cmd), &ldst_cmd); + + if (rc || !is_fw_attached(pdbg_init)) { cudbg_mps_rpl_backdoor(padap, &mps_rplc); - else + /* Ignore error since we collected directly from + * reading registers. + */ + rc = 0; + } else { mps_rplc = ldst_cmd.u.mps.rplc; + } tcam->rplc[0] = ntohl(mps_rplc.rplc31_0); tcam->rplc[1] = ntohl(mps_rplc.rplc63_32); @@ -1351,16 +2094,16 @@ int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init, n = padap->params.arch.mps_tcam_size; size = sizeof(struct cudbg_mps_tcam) * n; - rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); if (rc) return rc; tcam = (struct cudbg_mps_tcam *)temp_buff.data; for (i = 0; i < n; i++) { - rc = cudbg_collect_tcam_index(padap, tcam, i); + rc = cudbg_collect_tcam_index(pdbg_init, tcam, i); if (rc) { cudbg_err->sys_err = rc; - cudbg_put_buff(&temp_buff, dbg_buff); + cudbg_put_buff(pdbg_init, &temp_buff); return rc; } total_size += sizeof(struct cudbg_mps_tcam); @@ -1370,11 +2113,10 @@ int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init, if (!total_size) { rc = CUDBG_SYSTEM_ERROR; cudbg_err->sys_err = rc; - cudbg_put_buff(&temp_buff, dbg_buff); + cudbg_put_buff(pdbg_init, &temp_buff); return rc; } - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init, @@ -1425,7 +2167,7 @@ int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init, if (rc) return rc; - rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_vpd_data), + rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_vpd_data), &temp_buff); if (rc) return rc; @@ -1441,8 +2183,7 @@ int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init, vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(fw_vers); vpd_data->fw_micro = FW_HDR_FW_VER_MICRO_G(fw_vers); vpd_data->fw_build = FW_HDR_FW_VER_BUILD_G(fw_vers); - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid, @@ -1593,7 +2334,7 @@ int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init, size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid; size += sizeof(struct cudbg_tcam); - rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); if (rc) return rc; @@ -1605,7 +2346,7 @@ int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init, rc = cudbg_read_tid(pdbg_init, i, tid_data); if (rc) { cudbg_err->sys_err = rc; - cudbg_put_buff(&temp_buff, dbg_buff); + cudbg_put_buff(pdbg_init, &temp_buff); return rc; } @@ -1616,8 +2357,7 @@ int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init, bytes += sizeof(struct cudbg_tid_data); } - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } int cudbg_collect_cctrl(struct cudbg_init *pdbg_init, @@ -1630,13 +2370,12 @@ int cudbg_collect_cctrl(struct cudbg_init *pdbg_init, int rc; size = sizeof(u16) * NMTUS * NCCTRL_WIN; - rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); if (rc) return rc; t4_read_cong_tbl(padap, (void *)temp_buff.data); - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init, @@ -1654,7 +2393,7 @@ int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init, n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32)); size = sizeof(struct ireg_buf) * n * 2; - rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); if (rc) return rc; @@ -1690,8 +2429,7 @@ int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init, } ma_indr++; } - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init, @@ -1704,7 +2442,7 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init, u32 i, j; int rc; - rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulptx_la), + rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulptx_la), &temp_buff); if (rc) return rc; @@ -1725,8 +2463,7 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init, t4_read_reg(padap, ULP_TX_LA_RDDATA_0_A + 0x10 * i); } - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init, @@ -1735,13 +2472,23 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init, { struct adapter *padap = pdbg_init->adap; struct cudbg_buffer temp_buff = { 0 }; + u32 local_offset, local_range; struct ireg_buf *up_cim; + u32 size, j, iter; + u32 instance = 0; int i, rc, n; - u32 size; - n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32)); + if (is_t5(padap->params.chip)) + n = sizeof(t5_up_cim_reg_array) / + ((IREG_NUM_ELEM + 1) * sizeof(u32)); + else if (is_t6(padap->params.chip)) + n = sizeof(t6_up_cim_reg_array) / + ((IREG_NUM_ELEM + 1) * sizeof(u32)); + else + return CUDBG_STATUS_NOT_IMPLEMENTED; + size = sizeof(struct ireg_buf) * n; - rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); if (rc) return rc; @@ -1757,6 +2504,7 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init, t5_up_cim_reg_array[i][2]; up_cim_reg->ireg_offset_range = t5_up_cim_reg_array[i][3]; + instance = t5_up_cim_reg_array[i][4]; } else if (is_t6(padap->params.chip)) { up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0]; up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1]; @@ -1764,18 +2512,39 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init, t6_up_cim_reg_array[i][2]; up_cim_reg->ireg_offset_range = t6_up_cim_reg_array[i][3]; + instance = t6_up_cim_reg_array[i][4]; } - rc = t4_cim_read(padap, up_cim_reg->ireg_local_offset, - up_cim_reg->ireg_offset_range, buff); - if (rc) { - cudbg_put_buff(&temp_buff, dbg_buff); - return rc; + switch (instance) { + case NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES: + iter = up_cim_reg->ireg_offset_range; + local_offset = 0x120; + local_range = 1; + break; + case NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES: + iter = up_cim_reg->ireg_offset_range; + local_offset = 0x10; + local_range = 1; + break; + default: + iter = 1; + local_offset = 0; + local_range = up_cim_reg->ireg_offset_range; + break; + } + + for (j = 0; j < iter; j++, buff++) { + rc = t4_cim_read(padap, + up_cim_reg->ireg_local_offset + + (j * local_offset), local_range, buff); + if (rc) { + cudbg_put_buff(pdbg_init, &temp_buff); + return rc; + } } up_cim++; } - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init, @@ -1788,7 +2557,8 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init, int i, rc; u32 addr; - rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pbt_tables), + rc = cudbg_get_buff(pdbg_init, dbg_buff, + sizeof(struct cudbg_pbt_tables), &temp_buff); if (rc) return rc; @@ -1801,7 +2571,7 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init, &pbt->pbt_dynamic[i]); if (rc) { cudbg_err->sys_err = rc; - cudbg_put_buff(&temp_buff, dbg_buff); + cudbg_put_buff(pdbg_init, &temp_buff); return rc; } } @@ -1814,7 +2584,7 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init, &pbt->pbt_static[i]); if (rc) { cudbg_err->sys_err = rc; - cudbg_put_buff(&temp_buff, dbg_buff); + cudbg_put_buff(pdbg_init, &temp_buff); return rc; } } @@ -1826,7 +2596,7 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init, &pbt->lrf_table[i]); if (rc) { cudbg_err->sys_err = rc; - cudbg_put_buff(&temp_buff, dbg_buff); + cudbg_put_buff(pdbg_init, &temp_buff); return rc; } } @@ -1838,12 +2608,11 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init, &pbt->pbt_data[i]); if (rc) { cudbg_err->sys_err = rc; - cudbg_put_buff(&temp_buff, dbg_buff); + cudbg_put_buff(pdbg_init, &temp_buff); return rc; } } - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init, @@ -1864,7 +2633,7 @@ int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init, log = padap->mbox_log; mbox_cmds = padap->mbox_log->size; size = sizeof(struct cudbg_mbox_log) * mbox_cmds; - rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); if (rc) return rc; @@ -1887,8 +2656,7 @@ int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init, } mboxlog++; } - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init, @@ -1906,7 +2674,7 @@ int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init, n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32)); size = sizeof(struct ireg_buf) * n; - rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); if (rc) return rc; @@ -1924,6 +2692,5 @@ int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init, hma_fli->ireg_local_offset); hma_indr++; } - cudbg_write_and_release_buff(&temp_buff, dbg_buff); - return rc; + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h index caeee8e33e86..eebefe7cd18e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h @@ -75,6 +75,12 @@ int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init, int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_mc0_meminfo(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_mc1_meminfo(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); int cudbg_collect_rss(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); @@ -102,6 +108,9 @@ int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init, int cudbg_collect_tp_la(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_meminfo(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); @@ -123,6 +132,9 @@ int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init, int cudbg_collect_tid(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_pcie_config(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); int cudbg_collect_dump_context(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); @@ -156,6 +168,9 @@ int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init, int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i); void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff, @@ -163,7 +178,8 @@ void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff, u32 cudbg_cim_obq_size(struct adapter *padap, int qid); int cudbg_dump_context_size(struct adapter *padap); -struct cudbg_tcam; +int cudbg_fill_meminfo(struct adapter *padap, + struct cudbg_meminfo *meminfo_buff); void cudbg_fill_le_tcam_info(struct adapter *padap, struct cudbg_tcam *tcam_region); #endif /* __CUDBG_LIB_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h index 24b33f28e548..8150ea85d6a5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h @@ -26,6 +26,7 @@ enum cudbg_dump_type { enum cudbg_compression_type { CUDBG_COMPRESSION_NONE = 1, + CUDBG_COMPRESSION_ZLIB, }; struct cudbg_hdr { @@ -78,10 +79,11 @@ struct cudbg_error { #define CDUMP_MAX_COMP_BUF_SIZE ((64 * 1024) - 1) #define CUDBG_CHUNK_SIZE ((CDUMP_MAX_COMP_BUF_SIZE / 1024) * 1024) -int cudbg_get_buff(struct cudbg_buffer *pdbg_buff, u32 size, +int cudbg_get_buff(struct cudbg_init *pdbg_init, + struct cudbg_buffer *pdbg_buff, u32 size, struct cudbg_buffer *pin_buff); -void cudbg_put_buff(struct cudbg_buffer *pin_buff, - struct cudbg_buffer *pdbg_buff); +void cudbg_put_buff(struct cudbg_init *pdbg_init, + struct cudbg_buffer *pin_buff); void cudbg_update_buff(struct cudbg_buffer *pin_buff, struct cudbg_buffer *pout_buff); #endif /* __CUDBG_LIB_COMMON_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.c new file mode 100644 index 000000000000..25cc06d75cff --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.c @@ -0,0 +1,82 @@ +/* + * Copyright (C) 2018 Chelsio Communications. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include <linux/zlib.h> + +#include "cxgb4.h" +#include "cudbg_if.h" +#include "cudbg_lib_common.h" +#include "cudbg_zlib.h" + +static int cudbg_get_compress_hdr(struct cudbg_buffer *pdbg_buff, + struct cudbg_buffer *pin_buff) +{ + if (pdbg_buff->offset + sizeof(struct cudbg_compress_hdr) > + pdbg_buff->size) + return CUDBG_STATUS_NO_MEM; + + pin_buff->data = (char *)pdbg_buff->data + pdbg_buff->offset; + pin_buff->offset = 0; + pin_buff->size = sizeof(struct cudbg_compress_hdr); + pdbg_buff->offset += sizeof(struct cudbg_compress_hdr); + return 0; +} + +int cudbg_compress_buff(struct cudbg_init *pdbg_init, + struct cudbg_buffer *pin_buff, + struct cudbg_buffer *pout_buff) +{ + struct cudbg_buffer temp_buff = { 0 }; + struct z_stream_s compress_stream; + struct cudbg_compress_hdr *c_hdr; + int rc; + + /* Write compression header to output buffer before compression */ + rc = cudbg_get_compress_hdr(pout_buff, &temp_buff); + if (rc) + return rc; + + c_hdr = (struct cudbg_compress_hdr *)temp_buff.data; + c_hdr->compress_id = CUDBG_ZLIB_COMPRESS_ID; + + memset(&compress_stream, 0, sizeof(struct z_stream_s)); + compress_stream.workspace = pdbg_init->workspace; + rc = zlib_deflateInit2(&compress_stream, Z_DEFAULT_COMPRESSION, + Z_DEFLATED, CUDBG_ZLIB_WIN_BITS, + CUDBG_ZLIB_MEM_LVL, Z_DEFAULT_STRATEGY); + if (rc != Z_OK) + return CUDBG_SYSTEM_ERROR; + + compress_stream.next_in = pin_buff->data; + compress_stream.avail_in = pin_buff->size; + compress_stream.next_out = pout_buff->data + pout_buff->offset; + compress_stream.avail_out = pout_buff->size - pout_buff->offset; + + rc = zlib_deflate(&compress_stream, Z_FINISH); + if (rc != Z_STREAM_END) + return CUDBG_SYSTEM_ERROR; + + rc = zlib_deflateEnd(&compress_stream); + if (rc != Z_OK) + return CUDBG_SYSTEM_ERROR; + + c_hdr->compress_size = compress_stream.total_out; + c_hdr->decompress_size = pin_buff->size; + pout_buff->offset += compress_stream.total_out; + + return 0; +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.h new file mode 100644 index 000000000000..60d23805dfc3 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2018 Chelsio Communications. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#ifndef __CUDBG_ZLIB_H__ +#define __CUDBG_ZLIB_H__ + +#include <linux/zlib.h> + +#define CUDBG_ZLIB_COMPRESS_ID 17 +#define CUDBG_ZLIB_WIN_BITS 12 +#define CUDBG_ZLIB_MEM_LVL 4 + +struct cudbg_compress_hdr { + u32 compress_id; + u64 decompress_size; + u64 compress_size; + u64 rsvd[32]; +}; + +static inline int cudbg_get_workspace_size(void) +{ + return zlib_deflate_workspacesize(CUDBG_ZLIB_WIN_BITS, + CUDBG_ZLIB_MEM_LVL); +} + +int cudbg_compress_buff(struct cudbg_init *pdbg_init, + struct cudbg_buffer *pin_buff, + struct cudbg_buffer *pout_buff); +#endif /* __CUDBG_ZLIB_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 351f4bf37ca9..9040e13ce4b7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -84,7 +84,8 @@ enum { MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, - MEM_MC1 + MEM_MC1, + MEM_HMA, }; enum { @@ -318,6 +319,7 @@ struct vpd_params { }; struct pci_params { + unsigned int vpd_cap_addr; unsigned char speed; unsigned char width; }; @@ -826,12 +828,17 @@ struct vf_info { unsigned char vf_mac_addr[ETH_ALEN]; unsigned int tx_rate; bool pf_set_mac; + u16 vlan; }; struct mbox_list { struct list_head list; }; +struct mps_encap_entry { + atomic_t refcnt; +}; + struct adapter { void __iomem *regs; void __iomem *bar2; @@ -846,6 +853,10 @@ struct adapter { enum chip_type chip; int msg_enable; + __be16 vxlan_port; + u8 vxlan_port_cnt; + __be16 geneve_port; + u8 geneve_port_cnt; struct adapter_params params; struct cxgb4_virt_res vres; @@ -875,7 +886,10 @@ struct adapter { unsigned int clipt_start; unsigned int clipt_end; struct clip_tbl *clipt; + unsigned int rawf_start; + unsigned int rawf_cnt; struct smt_data *smt; + struct mps_encap_entry *mps_encap; struct cxgb4_uld_info *uld; void *uld_handle[CXGB4_ULD_MAX]; unsigned int num_uld; @@ -1317,6 +1331,7 @@ void t4_sge_start(struct adapter *adap); void t4_sge_stop(struct adapter *adap); void cxgb4_set_ethtool_ops(struct net_device *netdev); int cxgb4_write_rss(const struct port_info *pi, const u16 *queues); +enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb); extern int dbfifo_int_thresh; #define for_each_port(adapter, iter) \ @@ -1435,6 +1450,21 @@ static inline void init_rspq(struct adapter *adap, struct sge_rspq *q, q->size = size; } +/** + * t4_is_inserted_mod_type - is a plugged in Firmware Module Type + * @fw_mod_type: the Firmware Mofule Type + * + * Return whether the Firmware Module Type represents a real Transceiver + * Module/Cable Module Type which has been inserted. + */ +static inline bool t4_is_inserted_mod_type(unsigned int fw_mod_type) +{ + return (fw_mod_type != FW_PORT_MOD_TYPE_NONE && + fw_mod_type != FW_PORT_MOD_TYPE_NOTSUPPORTED && + fw_mod_type != FW_PORT_MOD_TYPE_UNKNOWN && + fw_mod_type != FW_PORT_MOD_TYPE_ERROR); +} + void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, unsigned int data_reg, const u32 *vals, unsigned int nregs, unsigned int start_idx); @@ -1524,6 +1554,7 @@ int t4_init_portinfo(struct port_info *pi, int mbox, int port, int pf, int vf, u8 mac[]); int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); void t4_fatal_err(struct adapter *adapter); +unsigned int t4_chip_rss_size(struct adapter *adapter); int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, int start, int n, const u16 *rspq, unsigned int nrspq); int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, @@ -1633,6 +1664,12 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox, int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, int mtu, int promisc, int all_multi, int bcast, int vlanex, bool sleep_ok); +int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid, + const u8 *addr, const u8 *mask, unsigned int idx, + u8 lookup_type, u8 port_id, bool sleep_ok); +int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid, + const u8 *addr, const u8 *mask, unsigned int idx, + u8 lookup_type, u8 port_id, bool sleep_ok); int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, unsigned int viid, bool free, unsigned int naddr, const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok); @@ -1665,7 +1702,7 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid); int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid); -int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox); +int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type); void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl); int t4_update_port_info(struct port_info *pi); int t4_get_link_params(struct port_info *pi, unsigned int *link_okp, @@ -1708,6 +1745,9 @@ void t4_uld_mem_free(struct adapter *adap); int t4_uld_mem_alloc(struct adapter *adap); void t4_uld_clean_up(struct adapter *adap); void t4_register_netevent_notifier(void); +int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port, + unsigned int devid, unsigned int offset, + unsigned int len, u8 *buf); void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl); void free_tx_desc(struct adapter *adap, struct sge_txq *q, unsigned int n, bool unmap); @@ -1722,4 +1762,6 @@ void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q, struct ulptx_sgl *sgl, u64 *end, unsigned int start, const dma_addr_t *addr); void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n); +int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf, + u16 vlan); #endif /* __CXGB4_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 29cc625e9833..30485f9a598f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -18,11 +18,14 @@ #include "t4_regs.h" #include "cxgb4.h" #include "cxgb4_cudbg.h" -#include "cudbg_entity.h" +#include "cudbg_zlib.h" static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = { { CUDBG_EDC0, cudbg_collect_edc0_meminfo }, { CUDBG_EDC1, cudbg_collect_edc1_meminfo }, + { CUDBG_MC0, cudbg_collect_mc0_meminfo }, + { CUDBG_MC1, cudbg_collect_mc1_meminfo }, + { CUDBG_HMA, cudbg_collect_hma_meminfo }, }; static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { @@ -53,6 +56,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { { CUDBG_SGE_INDIRECT, cudbg_collect_sge_indirect }, { CUDBG_ULPRX_LA, cudbg_collect_ulprx_la }, { CUDBG_TP_LA, cudbg_collect_tp_la }, + { CUDBG_MEMINFO, cudbg_collect_meminfo }, { CUDBG_CIM_PIF_LA, cudbg_collect_cim_pif_la }, { CUDBG_CLK, cudbg_collect_clk_info }, { CUDBG_CIM_OBQ_RXQ0, cudbg_collect_obq_sge_rx_q0 }, @@ -60,6 +64,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { { CUDBG_PCIE_INDIRECT, cudbg_collect_pcie_indirect }, { CUDBG_PM_INDIRECT, cudbg_collect_pm_indirect }, { CUDBG_TID_INFO, cudbg_collect_tid }, + { CUDBG_PCIE_CONFIG, cudbg_collect_pcie_config }, { CUDBG_DUMP_CONTEXT, cudbg_collect_dump_context }, { CUDBG_MPS_TCAM, cudbg_collect_mps_tcam }, { CUDBG_VPD_DATA, cudbg_collect_vpd_data }, @@ -158,8 +163,24 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) } len = cudbg_mbytes_to_bytes(len); break; + case CUDBG_MC0: + value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); + if (value & EXT_MEM0_ENABLE_F) { + value = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); + len = EXT_MEM0_SIZE_G(value); + } + len = cudbg_mbytes_to_bytes(len); + break; + case CUDBG_MC1: + value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); + if (value & EXT_MEM1_ENABLE_F) { + value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); + len = EXT_MEM1_SIZE_G(value); + } + len = cudbg_mbytes_to_bytes(len); + break; case CUDBG_RSS: - len = RSS_NENTRIES * sizeof(u16); + len = t4_chip_rss_size(adap) * sizeof(u16); break; case CUDBG_RSS_VF_CONF: len = adap->params.arch.vfcount * @@ -201,6 +222,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) case CUDBG_TP_LA: len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64); break; + case CUDBG_MEMINFO: + len = sizeof(struct cudbg_meminfo); + break; case CUDBG_CIM_PIF_LA: len = sizeof(struct cudbg_cim_pif_la); len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32); @@ -219,6 +243,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) case CUDBG_TID_INFO: len = sizeof(struct cudbg_tid_info_region_rev1); break; + case CUDBG_PCIE_CONFIG: + len = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS; + break; case CUDBG_DUMP_CONTEXT: len = cudbg_dump_context_size(adap); break; @@ -248,7 +275,13 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) len = sizeof(struct cudbg_ulptx_la); break; case CUDBG_UP_CIM_INDIRECT: - n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32)); + n = 0; + if (is_t5(adap->params.chip)) + n = sizeof(t5_up_cim_reg_array) / + ((IREG_NUM_ELEM + 1) * sizeof(u32)); + else if (is_t6(adap->params.chip)) + n = sizeof(t6_up_cim_reg_array) / + ((IREG_NUM_ELEM + 1) * sizeof(u32)); len = sizeof(struct ireg_buf) * n; break; case CUDBG_PBT_TABLE: @@ -264,6 +297,17 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) len = sizeof(struct ireg_buf) * n; } break; + case CUDBG_HMA: + value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); + if (value & HMA_MUX_F) { + /* In T6, there's no MC1. So, HMA shares MC1 + * address space. + */ + value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); + len = EXT_MEM1_SIZE_G(value); + } + len = cudbg_mbytes_to_bytes(len); + break; default: break; } @@ -275,6 +319,7 @@ u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag) { u32 i, entity; u32 len = 0; + u32 wsize; if (flag & CXGB4_ETH_DUMP_HW) { for (i = 0; i < ARRAY_SIZE(cxgb4_collect_hw_dump); i++) { @@ -290,6 +335,11 @@ u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag) } } + /* If compression is enabled, a smaller destination buffer is enough */ + wsize = cudbg_get_workspace_size(); + if (wsize && len > CUDBG_DUMP_BUFF_SIZE) + len = CUDBG_DUMP_BUFF_SIZE; + return len; } @@ -298,22 +348,14 @@ static void cxgb4_cudbg_collect_entity(struct cudbg_init *pdbg_init, const struct cxgb4_collect_entity *e_arr, u32 arr_size, void *buf, u32 *tot_size) { - struct adapter *adap = pdbg_init->adap; struct cudbg_error cudbg_err = { 0 }; struct cudbg_entity_hdr *entity_hdr; - u32 entity_size, i; - u32 total_size = 0; + u32 i, total_size = 0; int ret; for (i = 0; i < arr_size; i++) { const struct cxgb4_collect_entity *e = &e_arr[i]; - /* Skip entities that won't fit in output buffer */ - entity_size = cxgb4_get_entity_length(adap, e->entity); - if (entity_size > - pdbg_init->outbuf_size - *tot_size - total_size) - continue; - entity_hdr = cudbg_get_entity_hdr(buf, e->entity); entity_hdr->entity_type = e->entity; entity_hdr->start_offset = dbg_buff->offset; @@ -339,16 +381,40 @@ static void cxgb4_cudbg_collect_entity(struct cudbg_init *pdbg_init, *tot_size += total_size; } +static int cudbg_alloc_compress_buff(struct cudbg_init *pdbg_init) +{ + u32 workspace_size; + + workspace_size = cudbg_get_workspace_size(); + pdbg_init->compress_buff = vzalloc(CUDBG_COMPRESS_BUFF_SIZE + + workspace_size); + if (!pdbg_init->compress_buff) + return -ENOMEM; + + pdbg_init->compress_buff_size = CUDBG_COMPRESS_BUFF_SIZE; + pdbg_init->workspace = (u8 *)pdbg_init->compress_buff + + CUDBG_COMPRESS_BUFF_SIZE - workspace_size; + return 0; +} + +static void cudbg_free_compress_buff(struct cudbg_init *pdbg_init) +{ + if (pdbg_init->compress_buff) + vfree(pdbg_init->compress_buff); +} + int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size, u32 flag) { - struct cudbg_init cudbg_init = { 0 }; struct cudbg_buffer dbg_buff = { 0 }; u32 size, min_size, total_size = 0; + struct cudbg_init cudbg_init; struct cudbg_hdr *cudbg_hdr; + int rc; size = *buf_size; + memset(&cudbg_init, 0, sizeof(struct cudbg_init)); cudbg_init.adap = adap; cudbg_init.outbuf = buf; cudbg_init.outbuf_size = size; @@ -365,7 +431,6 @@ int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size, cudbg_hdr->max_entities = CUDBG_MAX_ENTITY; cudbg_hdr->chip_ver = adap->params.chip; cudbg_hdr->dump_type = CUDBG_DUMP_TYPE_MINI; - cudbg_hdr->compress_type = CUDBG_COMPRESSION_NONE; min_size = sizeof(struct cudbg_hdr) + sizeof(struct cudbg_entity_hdr) * @@ -373,6 +438,24 @@ int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size, if (size < min_size) return -ENOMEM; + rc = cudbg_get_workspace_size(); + if (rc) { + /* Zlib available. So, use zlib deflate */ + cudbg_init.compress_type = CUDBG_COMPRESSION_ZLIB; + rc = cudbg_alloc_compress_buff(&cudbg_init); + if (rc) { + /* Ignore error and continue without compression. */ + dev_warn(adap->pdev_dev, + "Fail allocating compression buffer ret: %d. Continuing without compression.\n", + rc); + cudbg_init.compress_type = CUDBG_COMPRESSION_NONE; + rc = 0; + } + } else { + cudbg_init.compress_type = CUDBG_COMPRESSION_NONE; + } + + cudbg_hdr->compress_type = cudbg_init.compress_type; dbg_buff.offset += min_size; total_size = dbg_buff.offset; @@ -390,8 +473,12 @@ int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size, buf, &total_size); + cudbg_free_compress_buff(&cudbg_init); cudbg_hdr->data_len = total_size; - *buf_size = total_size; + if (cudbg_init.compress_type != CUDBG_COMPRESSION_NONE) + *buf_size = size; + else + *buf_size = total_size; return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h index c099b5aa2214..ce1ac9a1c878 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h @@ -20,8 +20,12 @@ #include "cudbg_if.h" #include "cudbg_lib_common.h" +#include "cudbg_entity.h" #include "cudbg_lib.h" +#define CUDBG_DUMP_BUFF_SIZE (32 * 1024 * 1024) /* 32 MB */ +#define CUDBG_COMPRESS_BUFF_SIZE (4 * 1024 * 1024) /* 4 MB */ + typedef int (*cudbg_collect_callback_t)(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index cf471831ee71..2822bbff73e8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -45,6 +45,10 @@ #include "cxgb4_debugfs.h" #include "clip_tbl.h" #include "l2t.h" +#include "cudbg_if.h" +#include "cudbg_lib_common.h" +#include "cudbg_entity.h" +#include "cudbg_lib.h" /* generic seq_file support for showing a table of size rows x width. */ static void *seq_tab_get_idx(struct seq_tab *tb, loff_t pos) @@ -1739,7 +1743,7 @@ static int mps_tcam_show(struct seq_file *seq, void *v) */ if (lookup_type && (lookup_type != DATALKPTYPE_M)) { /* Inner header VNI */ - vniy = ((data2 & DATAVIDH2_F) << 23) | + vniy = (data2 & DATAVIDH2_F) | (DATAVIDH1_G(data2) << 16) | VIDL_G(val); dip_hit = data2 & DATADIPHIT_F; } else { @@ -1749,6 +1753,7 @@ static int mps_tcam_show(struct seq_file *seq, void *v) port_num = DATAPORTNUM_G(data2); /* Read tcamx. Change the control param */ + vnix = 0; ctl |= CTLXYBITSEL_V(1); t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl); val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A); @@ -1757,7 +1762,7 @@ static int mps_tcam_show(struct seq_file *seq, void *v) data2 = t4_read_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A); if (lookup_type && (lookup_type != DATALKPTYPE_M)) { /* Inner header VNI mask */ - vnix = ((data2 & DATAVIDH2_F) << 23) | + vnix = (data2 & DATAVIDH2_F) | (DATAVIDH1_G(data2) << 16) | VIDL_G(val); } } else { @@ -1830,7 +1835,8 @@ static int mps_tcam_show(struct seq_file *seq, void *v) addr[1], addr[2], addr[3], addr[4], addr[5], (unsigned long long)mask, - vniy, vnix, dip_hit ? 'Y' : 'N', + vniy, (vnix | vniy), + dip_hit ? 'Y' : 'N', port_num, (cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N', PORTMAP_G(cls_hi), @@ -2017,11 +2023,12 @@ static int rss_show(struct seq_file *seq, void *v, int idx) static int rss_open(struct inode *inode, struct file *file) { - int ret; - struct seq_tab *p; struct adapter *adap = inode->i_private; + int ret, nentries; + struct seq_tab *p; - p = seq_open_tab(file, RSS_NENTRIES / 8, 8 * sizeof(u16), 0, rss_show); + nentries = t4_chip_rss_size(adap); + p = seq_open_tab(file, nentries / 8, 8 * sizeof(u16), 0, rss_show); if (!p) return -ENOMEM; @@ -2664,10 +2671,14 @@ static const struct file_operations mem_debugfs_fops = { static int tid_info_show(struct seq_file *seq, void *v) { + unsigned int tid_start = 0; struct adapter *adap = seq->private; const struct tid_info *t = &adap->tids; enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip); + if (chip > CHELSIO_T5) + tid_start = t4_read_reg(adap, LE_DB_ACTIVE_TABLE_START_INDEX_A); + if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) { unsigned int sb; seq_printf(seq, "Connections in use: %u\n", @@ -2679,8 +2690,8 @@ static int tid_info_show(struct seq_file *seq, void *v) sb = t4_read_reg(adap, LE_DB_SRVR_START_INDEX_A); if (sb) { - seq_printf(seq, "TID range: 0..%u/%u..%u", sb - 1, - adap->tids.hash_base, + seq_printf(seq, "TID range: %u..%u/%u..%u", tid_start, + sb - 1, adap->tids.hash_base, t->ntids - 1); seq_printf(seq, ", in use: %u/%u\n", atomic_read(&t->tids_in_use), @@ -2705,7 +2716,8 @@ static int tid_info_show(struct seq_file *seq, void *v) seq_printf(seq, "Connections in use: %u\n", atomic_read(&t->conns_in_use)); - seq_printf(seq, "TID range: 0..%u", t->ntids - 1); + seq_printf(seq, "TID range: %u..%u", tid_start, + tid_start + t->ntids - 1); seq_printf(seq, ", in use: %u\n", atomic_read(&t->tids_in_use)); } @@ -2794,18 +2806,6 @@ static const struct file_operations blocked_fl_fops = { .llseek = generic_file_llseek, }; -struct mem_desc { - unsigned int base; - unsigned int limit; - unsigned int idx; -}; - -static int mem_desc_cmp(const void *a, const void *b) -{ - return ((const struct mem_desc *)a)->base - - ((const struct mem_desc *)b)->base; -} - static void mem_region_show(struct seq_file *seq, const char *name, unsigned int from, unsigned int to) { @@ -2819,250 +2819,60 @@ static void mem_region_show(struct seq_file *seq, const char *name, static int meminfo_show(struct seq_file *seq, void *v) { static const char * const memory[] = { "EDC0:", "EDC1:", "MC:", - "MC0:", "MC1:"}; - static const char * const region[] = { - "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:", - "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:", - "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:", - "TDDP region:", "TPT region:", "STAG region:", "RQ region:", - "RQUDP region:", "PBL region:", "TXPBL region:", - "DBVFIFO region:", "ULPRX state:", "ULPTX state:", - "On-chip queues:" - }; - - int i, n; - u32 lo, hi, used, alloc; - struct mem_desc avail[4]; - struct mem_desc mem[ARRAY_SIZE(region) + 3]; /* up to 3 holes */ - struct mem_desc *md = mem; + "MC0:", "MC1:", "HMA:"}; struct adapter *adap = seq->private; + struct cudbg_meminfo meminfo; + int i, rc; - for (i = 0; i < ARRAY_SIZE(mem); i++) { - mem[i].limit = 0; - mem[i].idx = i; - } - - /* Find and sort the populated memory ranges */ - i = 0; - lo = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); - if (lo & EDRAM0_ENABLE_F) { - hi = t4_read_reg(adap, MA_EDRAM0_BAR_A); - avail[i].base = EDRAM0_BASE_G(hi) << 20; - avail[i].limit = avail[i].base + (EDRAM0_SIZE_G(hi) << 20); - avail[i].idx = 0; - i++; - } - if (lo & EDRAM1_ENABLE_F) { - hi = t4_read_reg(adap, MA_EDRAM1_BAR_A); - avail[i].base = EDRAM1_BASE_G(hi) << 20; - avail[i].limit = avail[i].base + (EDRAM1_SIZE_G(hi) << 20); - avail[i].idx = 1; - i++; - } - - if (is_t5(adap->params.chip)) { - if (lo & EXT_MEM0_ENABLE_F) { - hi = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); - avail[i].base = EXT_MEM0_BASE_G(hi) << 20; - avail[i].limit = - avail[i].base + (EXT_MEM0_SIZE_G(hi) << 20); - avail[i].idx = 3; - i++; - } - if (lo & EXT_MEM1_ENABLE_F) { - hi = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); - avail[i].base = EXT_MEM1_BASE_G(hi) << 20; - avail[i].limit = - avail[i].base + (EXT_MEM1_SIZE_G(hi) << 20); - avail[i].idx = 4; - i++; - } - } else { - if (lo & EXT_MEM_ENABLE_F) { - hi = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A); - avail[i].base = EXT_MEM_BASE_G(hi) << 20; - avail[i].limit = - avail[i].base + (EXT_MEM_SIZE_G(hi) << 20); - avail[i].idx = 2; - i++; - } - } - if (!i) /* no memory available */ - return 0; - sort(avail, i, sizeof(struct mem_desc), mem_desc_cmp, NULL); - - (md++)->base = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A); - (md++)->base = t4_read_reg(adap, SGE_IMSG_CTXT_BADDR_A); - (md++)->base = t4_read_reg(adap, SGE_FLM_CACHE_BADDR_A); - (md++)->base = t4_read_reg(adap, TP_CMM_TCB_BASE_A); - (md++)->base = t4_read_reg(adap, TP_CMM_MM_BASE_A); - (md++)->base = t4_read_reg(adap, TP_CMM_TIMER_BASE_A); - (md++)->base = t4_read_reg(adap, TP_CMM_MM_RX_FLST_BASE_A); - (md++)->base = t4_read_reg(adap, TP_CMM_MM_TX_FLST_BASE_A); - (md++)->base = t4_read_reg(adap, TP_CMM_MM_PS_FLST_BASE_A); - - /* the next few have explicit upper bounds */ - md->base = t4_read_reg(adap, TP_PMM_TX_BASE_A); - md->limit = md->base - 1 + - t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A) * - PMTXMAXPAGE_G(t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A)); - md++; - - md->base = t4_read_reg(adap, TP_PMM_RX_BASE_A); - md->limit = md->base - 1 + - t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) * - PMRXMAXPAGE_G(t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A)); - md++; - - if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) { - if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) { - hi = t4_read_reg(adap, LE_DB_TID_HASHBASE_A) / 4; - md->base = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A); - } else { - hi = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A); - md->base = t4_read_reg(adap, - LE_DB_HASH_TBL_BASE_ADDR_A); - } - md->limit = 0; - } else { - md->base = 0; - md->idx = ARRAY_SIZE(region); /* hide it */ - } - md++; - -#define ulp_region(reg) do { \ - md->base = t4_read_reg(adap, ULP_ ## reg ## _LLIMIT_A);\ - (md++)->limit = t4_read_reg(adap, ULP_ ## reg ## _ULIMIT_A); \ -} while (0) - - ulp_region(RX_ISCSI); - ulp_region(RX_TDDP); - ulp_region(TX_TPT); - ulp_region(RX_STAG); - ulp_region(RX_RQ); - ulp_region(RX_RQUDP); - ulp_region(RX_PBL); - ulp_region(TX_PBL); -#undef ulp_region - md->base = 0; - md->idx = ARRAY_SIZE(region); - if (!is_t4(adap->params.chip)) { - u32 size = 0; - u32 sge_ctrl = t4_read_reg(adap, SGE_CONTROL2_A); - u32 fifo_size = t4_read_reg(adap, SGE_DBVFIFO_SIZE_A); - - if (is_t5(adap->params.chip)) { - if (sge_ctrl & VFIFO_ENABLE_F) - size = DBVFIFO_SIZE_G(fifo_size); - } else { - size = T6_DBVFIFO_SIZE_G(fifo_size); - } - - if (size) { - md->base = BASEADDR_G(t4_read_reg(adap, - SGE_DBVFIFO_BADDR_A)); - md->limit = md->base + (size << 2) - 1; - } - } - - md++; - - md->base = t4_read_reg(adap, ULP_RX_CTX_BASE_A); - md->limit = 0; - md++; - md->base = t4_read_reg(adap, ULP_TX_ERR_TABLE_BASE_A); - md->limit = 0; - md++; - - md->base = adap->vres.ocq.start; - if (adap->vres.ocq.size) - md->limit = md->base + adap->vres.ocq.size - 1; - else - md->idx = ARRAY_SIZE(region); /* hide it */ - md++; - - /* add any address-space holes, there can be up to 3 */ - for (n = 0; n < i - 1; n++) - if (avail[n].limit < avail[n + 1].base) - (md++)->base = avail[n].limit; - if (avail[n].limit) - (md++)->base = avail[n].limit; - - n = md - mem; - sort(mem, n, sizeof(struct mem_desc), mem_desc_cmp, NULL); + memset(&meminfo, 0, sizeof(struct cudbg_meminfo)); + rc = cudbg_fill_meminfo(adap, &meminfo); + if (rc) + return -ENXIO; - for (lo = 0; lo < i; lo++) - mem_region_show(seq, memory[avail[lo].idx], avail[lo].base, - avail[lo].limit - 1); + for (i = 0; i < meminfo.avail_c; i++) + mem_region_show(seq, memory[meminfo.avail[i].idx], + meminfo.avail[i].base, + meminfo.avail[i].limit - 1); seq_putc(seq, '\n'); - for (i = 0; i < n; i++) { - if (mem[i].idx >= ARRAY_SIZE(region)) + for (i = 0; i < meminfo.mem_c; i++) { + if (meminfo.mem[i].idx >= ARRAY_SIZE(cudbg_region)) continue; /* skip holes */ - if (!mem[i].limit) - mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0; - mem_region_show(seq, region[mem[i].idx], mem[i].base, - mem[i].limit); + if (!meminfo.mem[i].limit) + meminfo.mem[i].limit = + i < meminfo.mem_c - 1 ? + meminfo.mem[i + 1].base - 1 : ~0; + mem_region_show(seq, cudbg_region[meminfo.mem[i].idx], + meminfo.mem[i].base, meminfo.mem[i].limit); } seq_putc(seq, '\n'); - lo = t4_read_reg(adap, CIM_SDRAM_BASE_ADDR_A); - hi = t4_read_reg(adap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1; - mem_region_show(seq, "uP RAM:", lo, hi); - - lo = t4_read_reg(adap, CIM_EXTMEM2_BASE_ADDR_A); - hi = t4_read_reg(adap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1; - mem_region_show(seq, "uP Extmem2:", lo, hi); + mem_region_show(seq, "uP RAM:", meminfo.up_ram_lo, meminfo.up_ram_hi); + mem_region_show(seq, "uP Extmem2:", meminfo.up_extmem2_lo, + meminfo.up_extmem2_hi); - lo = t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A); seq_printf(seq, "\n%u Rx pages of size %uKiB for %u channels\n", - PMRXMAXPAGE_G(lo), - t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) >> 10, - (lo & PMRXNUMCHN_F) ? 2 : 1); + meminfo.rx_pages_data[0], meminfo.rx_pages_data[1], + meminfo.rx_pages_data[2]); - lo = t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A); - hi = t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A); seq_printf(seq, "%u Tx pages of size %u%ciB for %u channels\n", - PMTXMAXPAGE_G(lo), - hi >= (1 << 20) ? (hi >> 20) : (hi >> 10), - hi >= (1 << 20) ? 'M' : 'K', 1 << PMTXNUMCHN_G(lo)); - seq_printf(seq, "%u p-structs\n\n", - t4_read_reg(adap, TP_CMM_MM_MAX_PSTRUCT_A)); - - for (i = 0; i < 4; i++) { - if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) - lo = t4_read_reg(adap, MPS_RX_MAC_BG_PG_CNT0_A + i * 4); - else - lo = t4_read_reg(adap, MPS_RX_PG_RSV0_A + i * 4); - if (is_t5(adap->params.chip)) { - used = T5_USED_G(lo); - alloc = T5_ALLOC_G(lo); - } else { - used = USED_G(lo); - alloc = ALLOC_G(lo); - } + meminfo.tx_pages_data[0], meminfo.tx_pages_data[1], + meminfo.tx_pages_data[2], meminfo.tx_pages_data[3]); + + seq_printf(seq, "%u p-structs\n\n", meminfo.p_structs); + + for (i = 0; i < 4; i++) /* For T6 these are MAC buffer groups */ seq_printf(seq, "Port %d using %u pages out of %u allocated\n", - i, used, alloc); - } - for (i = 0; i < adap->params.arch.nchan; i++) { - if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) - lo = t4_read_reg(adap, - MPS_RX_LPBK_BG_PG_CNT0_A + i * 4); - else - lo = t4_read_reg(adap, MPS_RX_PG_RSV4_A + i * 4); - if (is_t5(adap->params.chip)) { - used = T5_USED_G(lo); - alloc = T5_ALLOC_G(lo); - } else { - used = USED_G(lo); - alloc = ALLOC_G(lo); - } + i, meminfo.port_used[i], meminfo.port_alloc[i]); + + for (i = 0; i < adap->params.arch.nchan; i++) /* For T6 these are MAC buffer groups */ seq_printf(seq, "Loopback %d using %u pages out of %u allocated\n", - i, used, alloc); - } + i, meminfo.loopback_used[i], + meminfo.loopback_alloc[i]); + return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index eb338212f5af..7852d98bad75 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -517,7 +517,8 @@ static int from_fw_port_mod_type(enum fw_port_type port_type, else return PORT_OTHER; } else if (port_type == FW_PORT_TYPE_KR4_100G || - port_type == FW_PORT_TYPE_KR_SFP28) { + port_type == FW_PORT_TYPE_KR_SFP28 || + port_type == FW_PORT_TYPE_KR_XLAUI) { return PORT_NONE; } @@ -645,6 +646,13 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, FW_CAPS_TO_LMM(SPEED_25G, 25000baseKR_Full); break; + case FW_PORT_TYPE_KR_XLAUI: + SET_LMM(Backplane); + FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full); + FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full); + break; + case FW_PORT_TYPE_CR2_QSFP: SET_LMM(FIBRE); SET_LMM(50000baseSR2_Full); @@ -1396,6 +1404,101 @@ static int get_dump_data(struct net_device *dev, struct ethtool_dump *eth_dump, return 0; } +static int cxgb4_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo) +{ + struct port_info *pi = netdev_priv(dev); + u8 sff8472_comp, sff_diag_type, sff_rev; + struct adapter *adapter = pi->adapter; + int ret; + + if (!t4_is_inserted_mod_type(pi->mod_type)) + return -EINVAL; + + switch (pi->port_type) { + case FW_PORT_TYPE_SFP: + case FW_PORT_TYPE_QSA: + case FW_PORT_TYPE_SFP28: + ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan, + I2C_DEV_ADDR_A0, SFF_8472_COMP_ADDR, + SFF_8472_COMP_LEN, &sff8472_comp); + if (ret) + return ret; + ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan, + I2C_DEV_ADDR_A0, SFP_DIAG_TYPE_ADDR, + SFP_DIAG_TYPE_LEN, &sff_diag_type); + if (ret) + return ret; + + if (!sff8472_comp || (sff_diag_type & 4)) { + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + break; + + case FW_PORT_TYPE_QSFP: + case FW_PORT_TYPE_QSFP_10G: + case FW_PORT_TYPE_CR_QSFP: + case FW_PORT_TYPE_CR2_QSFP: + case FW_PORT_TYPE_CR4_QSFP: + ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan, + I2C_DEV_ADDR_A0, SFF_REV_ADDR, + SFF_REV_LEN, &sff_rev); + /* For QSFP type ports, revision value >= 3 + * means the SFP is 8636 compliant. + */ + if (ret) + return ret; + if (sff_rev >= 0x3) { + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + } + break; + + default: + return -EINVAL; + } + + return 0; +} + +static int cxgb4_get_module_eeprom(struct net_device *dev, + struct ethtool_eeprom *eprom, u8 *data) +{ + int ret = 0, offset = eprom->offset, len = eprom->len; + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + memset(data, 0, eprom->len); + if (offset + len <= I2C_PAGE_SIZE) + return t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan, + I2C_DEV_ADDR_A0, offset, len, data); + + /* offset + len spans 0xa0 and 0xa1 pages */ + if (offset <= I2C_PAGE_SIZE) { + /* read 0xa0 page */ + len = I2C_PAGE_SIZE - offset; + ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan, + I2C_DEV_ADDR_A0, offset, len, data); + if (ret) + return ret; + offset = I2C_PAGE_SIZE; + /* Remaining bytes to be read from second page = + * Total length - bytes read from first page + */ + len = eprom->len - len; + } + /* Read additional optical diagnostics from page 0xa2 if supported */ + return t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan, I2C_DEV_ADDR_A2, + offset, len, &data[eprom->len - len]); +} + static const struct ethtool_ops cxgb_ethtool_ops = { .get_link_ksettings = get_link_ksettings, .set_link_ksettings = set_link_ksettings, @@ -1430,6 +1533,8 @@ static const struct ethtool_ops cxgb_ethtool_ops = { .set_dump = set_dump, .get_dump_flag = get_dump_flag, .get_dump_data = get_dump_data, + .get_module_info = cxgb4_get_module_info, + .get_module_eeprom = cxgb4_get_module_eeprom, }; void cxgb4_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 5980f308a253..3177b0c9bd2d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -439,19 +439,32 @@ int cxgb4_get_free_ftid(struct net_device *dev, int family) if (ftid >= t->nftids) ftid = -1; } else { - ftid = bitmap_find_free_region(t->ftid_bmap, t->nftids, 2); - if (ftid < 0) - goto out_unlock; + if (is_t6(adap->params.chip)) { + ftid = bitmap_find_free_region(t->ftid_bmap, + t->nftids, 1); + if (ftid < 0) + goto out_unlock; + + /* this is only a lookup, keep the found region + * unallocated + */ + bitmap_release_region(t->ftid_bmap, ftid, 1); + } else { + ftid = bitmap_find_free_region(t->ftid_bmap, + t->nftids, 2); + if (ftid < 0) + goto out_unlock; - /* this is only a lookup, keep the found region unallocated */ - bitmap_release_region(t->ftid_bmap, ftid, 2); + bitmap_release_region(t->ftid_bmap, ftid, 2); + } } out_unlock: spin_unlock_bh(&t->ftid_lock); return ftid; } -static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family) +static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family, + unsigned int chip_ver) { spin_lock_bh(&t->ftid_lock); @@ -460,22 +473,31 @@ static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family) return -EBUSY; } - if (family == PF_INET) + if (family == PF_INET) { __set_bit(fidx, t->ftid_bmap); - else - bitmap_allocate_region(t->ftid_bmap, fidx, 2); + } else { + if (chip_ver < CHELSIO_T6) + bitmap_allocate_region(t->ftid_bmap, fidx, 2); + else + bitmap_allocate_region(t->ftid_bmap, fidx, 1); + } spin_unlock_bh(&t->ftid_lock); return 0; } -static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family) +static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family, + unsigned int chip_ver) { spin_lock_bh(&t->ftid_lock); - if (family == PF_INET) + if (family == PF_INET) { __clear_bit(fidx, t->ftid_bmap); - else - bitmap_release_region(t->ftid_bmap, fidx, 2); + } else { + if (chip_ver < CHELSIO_T6) + bitmap_release_region(t->ftid_bmap, fidx, 2); + else + bitmap_release_region(t->ftid_bmap, fidx, 1); + } spin_unlock_bh(&t->ftid_lock); } @@ -694,7 +716,7 @@ void clear_filter(struct adapter *adap, struct filter_entry *f) if (f->smt) cxgb4_smt_release(f->smt); - if (f->fs.hash && f->fs.type) + if ((f->fs.hash || is_t6(adap->params.chip)) && f->fs.type) cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1); /* The zeroing of the filter rule below clears the filter valid, @@ -1189,6 +1211,7 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id, struct filter_ctx *ctx) { struct adapter *adapter = netdev2adap(dev); + unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); unsigned int max_fidx, fidx; struct filter_entry *f; u32 iconf; @@ -1225,12 +1248,18 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id, * insertion. */ if (fs->type == 0) { /* IPv4 */ - /* If our IPv4 filter isn't being written to a - * multiple of four filter index and there's an IPv6 - * filter at the multiple of 4 base slot, then we - * prevent insertion. + /* For T6, If our IPv4 filter isn't being written to a + * multiple of two filter index and there's an IPv6 + * filter at the multiple of 2 base slot, then we need + * to delete that IPv6 filter ... + * For adapters below T6, IPv6 filter occupies 4 entries. + * Hence we need to delete the filter in multiple of 4 slot. */ - fidx = filter_id & ~0x3; + if (chip_ver < CHELSIO_T6) + fidx = filter_id & ~0x3; + else + fidx = filter_id & ~0x1; + if (fidx != filter_id && adapter->tids.ftid_tab[fidx].fs.type) { f = &adapter->tids.ftid_tab[fidx]; @@ -1242,23 +1271,42 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id, } } } else { /* IPv6 */ - /* Ensure that the IPv6 filter is aligned on a - * multiple of 4 boundary. - */ - if (filter_id & 0x3) { - dev_err(adapter->pdev_dev, - "Invalid location. IPv6 must be aligned on a 4-slot boundary\n"); - return -EINVAL; - } + if (chip_ver < CHELSIO_T6) { + /* Ensure that the IPv6 filter is aligned on a + * multiple of 4 boundary. + */ + if (filter_id & 0x3) { + dev_err(adapter->pdev_dev, + "Invalid location. IPv6 must be aligned on a 4-slot boundary\n"); + return -EINVAL; + } - /* Check all except the base overlapping IPv4 filter slots. */ - for (fidx = filter_id + 1; fidx < filter_id + 4; fidx++) { + /* Check all except the base overlapping IPv4 filter + * slots. + */ + for (fidx = filter_id + 1; fidx < filter_id + 4; + fidx++) { + f = &adapter->tids.ftid_tab[fidx]; + if (f->valid) { + dev_err(adapter->pdev_dev, + "Invalid location. IPv6 requires 4 slots and an IPv4 filter exists at %u\n", + fidx); + return -EBUSY; + } + } + } else { + /* For T6, CLIP being enabled, IPv6 filter would occupy + * 2 entries. + */ + if (filter_id & 0x1) + return -EINVAL; + /* Check overlapping IPv4 filter slot */ + fidx = filter_id + 1; f = &adapter->tids.ftid_tab[fidx]; if (f->valid) { - dev_err(adapter->pdev_dev, - "Invalid location. IPv6 requires 4 slots and an IPv4 filter exists at %u\n", - fidx); - return -EINVAL; + pr_err("%s: IPv6 filter requires 2 indices. IPv4 filter already present at %d. Please remove IPv4 filter first.\n", + __func__, fidx); + return -EBUSY; } } } @@ -1272,16 +1320,18 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id, fidx = filter_id + adapter->tids.ftid_base; ret = cxgb4_set_ftid(&adapter->tids, filter_id, - fs->type ? PF_INET6 : PF_INET); + fs->type ? PF_INET6 : PF_INET, + chip_ver); if (ret) return ret; - /* Check to make sure the filter requested is writable ... */ + /* Check t make sure the filter requested is writable ... */ ret = writable_filter(f); if (ret) { /* Clear the bits we have set above */ cxgb4_clear_ftid(&adapter->tids, filter_id, - fs->type ? PF_INET6 : PF_INET); + fs->type ? PF_INET6 : PF_INET, + chip_ver); return ret; } @@ -1291,6 +1341,17 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id, if (f->valid) clear_filter(adapter, f); + if (is_t6(adapter->params.chip) && fs->type && + ipv6_addr_type((const struct in6_addr *)fs->val.lip) != + IPV6_ADDR_ANY) { + ret = cxgb4_clip_get(dev, (const u32 *)&fs->val.lip, 1); + if (ret) { + cxgb4_clear_ftid(&adapter->tids, filter_id, PF_INET6, + chip_ver); + return ret; + } + } + /* Convert the filter specification into our internal format. * We copy the PF/VF specification into the Outer VLAN field * here so the rest of the code -- including the interface to @@ -1316,7 +1377,8 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id, ret = set_filter_wr(adapter, filter_id); if (ret) { cxgb4_clear_ftid(&adapter->tids, filter_id, - fs->type ? PF_INET6 : PF_INET); + fs->type ? PF_INET6 : PF_INET, + chip_ver); clear_filter(adapter, f); } @@ -1394,6 +1456,7 @@ int __cxgb4_del_filter(struct net_device *dev, int filter_id, struct filter_ctx *ctx) { struct adapter *adapter = netdev2adap(dev); + unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); struct filter_entry *f; unsigned int max_fidx; int ret; @@ -1419,7 +1482,8 @@ int __cxgb4_del_filter(struct net_device *dev, int filter_id, if (f->valid) { f->ctx = ctx; cxgb4_clear_ftid(&adapter->tids, filter_id, - f->fs.type ? PF_INET6 : PF_INET); + f->fs.type ? PF_INET6 : PF_INET, + chip_ver); return del_filter_wr(adapter, filter_id); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 05a4abfd5ec1..1ca2a39ed0f8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -65,6 +65,7 @@ #include <net/addrconf.h> #include <linux/uaccess.h> #include <linux/crash_dump.h> +#include <net/udp_tunnel.h> #include "cxgb4.h" #include "cxgb4_filter.h" @@ -101,7 +102,9 @@ const char cxgb4_driver_version[] = DRV_VERSION; */ #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ static const struct pci_device_id cxgb4_pci_tbl[] = { -#define CH_PCI_DEVICE_ID_FUNCTION 0x4 +#define CXGB4_UNIFIED_PF 0x4 + +#define CH_PCI_DEVICE_ID_FUNCTION CXGB4_UNIFIED_PF /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is * called for both. @@ -109,7 +112,7 @@ const char cxgb4_driver_version[] = DRV_VERSION; #define CH_PCI_DEVICE_ID_FUNCTION2 0x0 #define CH_PCI_ID_TABLE_ENTRY(devid) \ - {PCI_VDEVICE(CHELSIO, (devid)), 4} + {PCI_VDEVICE(CHELSIO, (devid)), CXGB4_UNIFIED_PF} #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \ { 0, } \ @@ -1673,7 +1676,7 @@ int cxgb4_flush_eq_cache(struct net_device *dev) { struct adapter *adap = netdev2adap(dev); - return t4_sge_ctxt_flush(adap, adap->mbox); + return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS); } EXPORT_SYMBOL(cxgb4_flush_eq_cache); @@ -2604,7 +2607,7 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu) } #ifdef CONFIG_PCI_IOV -static int dummy_open(struct net_device *dev) +static int cxgb4_mgmt_open(struct net_device *dev) { /* Turn carrier off since we don't have to transmit anything on this * interface. @@ -2614,39 +2617,44 @@ static int dummy_open(struct net_device *dev) } /* Fill MAC address that will be assigned by the FW */ -static void fill_vf_station_mac_addr(struct adapter *adap) +static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap) { - unsigned int i; u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN]; + unsigned int i, vf, nvfs; + u16 a, b; int err; u8 *na; - u16 a, b; + adap->params.pci.vpd_cap_addr = pci_find_capability(adap->pdev, + PCI_CAP_ID_VPD); err = t4_get_raw_vpd_params(adap, &adap->params.vpd); - if (!err) { - na = adap->params.vpd.na; - for (i = 0; i < ETH_ALEN; i++) - hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 + - hex2val(na[2 * i + 1])); - a = (hw_addr[0] << 8) | hw_addr[1]; - b = (hw_addr[1] << 8) | hw_addr[2]; - a ^= b; - a |= 0x0200; /* locally assigned Ethernet MAC address */ - a &= ~0x0100; /* not a multicast Ethernet MAC address */ - macaddr[0] = a >> 8; - macaddr[1] = a & 0xff; - - for (i = 2; i < 5; i++) - macaddr[i] = hw_addr[i + 1]; - - for (i = 0; i < adap->num_vfs; i++) { - macaddr[5] = adap->pf * 16 + i; - ether_addr_copy(adap->vfinfo[i].vf_mac_addr, macaddr); - } + if (err) + return; + + na = adap->params.vpd.na; + for (i = 0; i < ETH_ALEN; i++) + hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 + + hex2val(na[2 * i + 1])); + + a = (hw_addr[0] << 8) | hw_addr[1]; + b = (hw_addr[1] << 8) | hw_addr[2]; + a ^= b; + a |= 0x0200; /* locally assigned Ethernet MAC address */ + a &= ~0x0100; /* not a multicast Ethernet MAC address */ + macaddr[0] = a >> 8; + macaddr[1] = a & 0xff; + + for (i = 2; i < 5; i++) + macaddr[i] = hw_addr[i + 1]; + + for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev); + vf < nvfs; vf++) { + macaddr[5] = adap->pf * 16 + vf; + ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr); } } -static int cxgb_set_vf_mac(struct net_device *dev, int vf, u8 *mac) +static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac) { struct port_info *pi = netdev_priv(dev); struct adapter *adap = pi->adapter; @@ -2668,8 +2676,8 @@ static int cxgb_set_vf_mac(struct net_device *dev, int vf, u8 *mac) return ret; } -static int cxgb_get_vf_config(struct net_device *dev, - int vf, struct ifla_vf_info *ivi) +static int cxgb4_mgmt_get_vf_config(struct net_device *dev, + int vf, struct ifla_vf_info *ivi) { struct port_info *pi = netdev_priv(dev); struct adapter *adap = pi->adapter; @@ -2683,8 +2691,8 @@ static int cxgb_get_vf_config(struct net_device *dev, return 0; } -static int cxgb_get_phys_port_id(struct net_device *dev, - struct netdev_phys_item_id *ppid) +static int cxgb4_mgmt_get_phys_port_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) { struct port_info *pi = netdev_priv(dev); unsigned int phy_port_id; @@ -2695,8 +2703,8 @@ static int cxgb_get_phys_port_id(struct net_device *dev, return 0; } -static int cxgb_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, - int max_tx_rate) +static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf, + int min_tx_rate, int max_tx_rate) { struct port_info *pi = netdev_priv(dev); struct adapter *adap = pi->adapter; @@ -2775,7 +2783,30 @@ static int cxgb_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, return 0; } -#endif +static int cxgb4_mgmt_set_vf_vlan(struct net_device *dev, int vf, + u16 vlan, u8 qos, __be16 vlan_proto) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + int ret; + + if (vf >= adap->num_vfs || vlan > 4095 || qos > 7) + return -EINVAL; + + if (vlan_proto != htons(ETH_P_8021Q) || qos != 0) + return -EPROTONOSUPPORT; + + ret = t4_set_vlan_acl(adap, adap->mbox, vf + 1, vlan); + if (!ret) { + adap->vfinfo[vf].vlan = vlan; + return 0; + } + + dev_err(adap->pdev_dev, "Err %d %s VLAN ACL for PF/VF %d/%d\n", + ret, (vlan ? "setting" : "clearing"), adap->pf, vf); + return ret; +} +#endif /* CONFIG_PCI_IOV */ static int cxgb_set_mac_addr(struct net_device *dev, void *p) { @@ -2897,9 +2928,6 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) static int cxgb_setup_tc_flower(struct net_device *dev, struct tc_cls_flower_offload *cls_flower) { - if (cls_flower->common.chain_index) - return -EOPNOTSUPP; - switch (cls_flower->command) { case TC_CLSFLOWER_REPLACE: return cxgb4_tc_flower_replace(dev, cls_flower); @@ -2915,9 +2943,6 @@ static int cxgb_setup_tc_flower(struct net_device *dev, static int cxgb_setup_tc_cls_u32(struct net_device *dev, struct tc_cls_u32_offload *cls_u32) { - if (cls_u32->common.chain_index) - return -EOPNOTSUPP; - switch (cls_u32->command) { case TC_CLSU32_NEW_KNODE: case TC_CLSU32_REPLACE_KNODE: @@ -2943,7 +2968,7 @@ static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data, return -EINVAL; } - if (!tc_can_offload(dev)) + if (!tc_cls_can_offload_and_chain0(dev, type_data)) return -EOPNOTSUPP; switch (type) { @@ -2987,6 +3012,174 @@ static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type, } } +static void cxgb_del_udp_tunnel(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adapter = pi->adapter; + unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); + u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 }; + int ret = 0, i; + + if (chip_ver < CHELSIO_T6) + return; + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (!adapter->vxlan_port_cnt || + adapter->vxlan_port != ti->port) + return; /* Invalid VxLAN destination port */ + + adapter->vxlan_port_cnt--; + if (adapter->vxlan_port_cnt) + return; + + adapter->vxlan_port = 0; + t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0); + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (!adapter->geneve_port_cnt || + adapter->geneve_port != ti->port) + return; /* Invalid GENEVE destination port */ + + adapter->geneve_port_cnt--; + if (adapter->geneve_port_cnt) + return; + + adapter->geneve_port = 0; + t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0); + default: + return; + } + + /* Matchall mac entries can be deleted only after all tunnel ports + * are brought down or removed. + */ + if (!adapter->rawf_cnt) + return; + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + ret = t4_free_raw_mac_filt(adapter, pi->viid, + match_all_mac, match_all_mac, + adapter->rawf_start + + pi->port_id, + 1, pi->port_id, true); + if (ret < 0) { + netdev_info(netdev, "Failed to free mac filter entry, for port %d\n", + i); + return; + } + atomic_dec(&adapter->mps_encap[adapter->rawf_start + + pi->port_id].refcnt); + } +} + +static void cxgb_add_udp_tunnel(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adapter = pi->adapter; + unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); + u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 }; + int i, ret; + + if (chip_ver < CHELSIO_T6 || !adapter->rawf_cnt) + return; + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + /* Callback for adding vxlan port can be called with the same + * port for both IPv4 and IPv6. We should not disable the + * offloading when the same port for both protocols is added + * and later one of them is removed. + */ + if (adapter->vxlan_port_cnt && + adapter->vxlan_port == ti->port) { + adapter->vxlan_port_cnt++; + return; + } + + /* We will support only one VxLAN port */ + if (adapter->vxlan_port_cnt) { + netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n", + be16_to_cpu(adapter->vxlan_port), + be16_to_cpu(ti->port)); + return; + } + + adapter->vxlan_port = ti->port; + adapter->vxlan_port_cnt = 1; + + t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, + VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F); + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (adapter->geneve_port_cnt && + adapter->geneve_port == ti->port) { + adapter->geneve_port_cnt++; + return; + } + + /* We will support only one GENEVE port */ + if (adapter->geneve_port_cnt) { + netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n", + be16_to_cpu(adapter->geneve_port), + be16_to_cpu(ti->port)); + return; + } + + adapter->geneve_port = ti->port; + adapter->geneve_port_cnt = 1; + + t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, + GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F); + default: + return; + } + + /* Create a 'match all' mac filter entry for inner mac, + * if raw mac interface is supported. Once the linux kernel provides + * driver entry points for adding/deleting the inner mac addresses, + * we will remove this 'match all' entry and fallback to adding + * exact match filters. + */ + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + + ret = t4_alloc_raw_mac_filt(adapter, pi->viid, + match_all_mac, + match_all_mac, + adapter->rawf_start + + pi->port_id, + 1, pi->port_id, true); + if (ret < 0) { + netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n", + be16_to_cpu(ti->port)); + cxgb_del_udp_tunnel(netdev, ti); + return; + } + atomic_inc(&adapter->mps_encap[ret].refcnt); + } +} + +static netdev_features_t cxgb_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + if (CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6) + return features; + + /* Check if hw supports offload for this packet */ + if (!skb->encapsulation || cxgb_encap_offload_supported(skb)) + return features; + + /* Offload is not supported for this encapsulated packet */ + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); +} + static netdev_features_t cxgb_fix_features(struct net_device *dev, netdev_features_t features) { @@ -3018,20 +3211,25 @@ static const struct net_device_ops cxgb4_netdev_ops = { #endif /* CONFIG_CHELSIO_T4_FCOE */ .ndo_set_tx_maxrate = cxgb_set_tx_maxrate, .ndo_setup_tc = cxgb_setup_tc, + .ndo_udp_tunnel_add = cxgb_add_udp_tunnel, + .ndo_udp_tunnel_del = cxgb_del_udp_tunnel, + .ndo_features_check = cxgb_features_check, .ndo_fix_features = cxgb_fix_features, }; #ifdef CONFIG_PCI_IOV static const struct net_device_ops cxgb4_mgmt_netdev_ops = { - .ndo_open = dummy_open, - .ndo_set_vf_mac = cxgb_set_vf_mac, - .ndo_get_vf_config = cxgb_get_vf_config, - .ndo_set_vf_rate = cxgb_set_vf_rate, - .ndo_get_phys_port_id = cxgb_get_phys_port_id, + .ndo_open = cxgb4_mgmt_open, + .ndo_set_vf_mac = cxgb4_mgmt_set_vf_mac, + .ndo_get_vf_config = cxgb4_mgmt_get_vf_config, + .ndo_set_vf_rate = cxgb4_mgmt_set_vf_rate, + .ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id, + .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan, }; #endif -static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +static void cxgb4_mgmt_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) { struct adapter *adapter = netdev2adap(dev); @@ -3043,7 +3241,7 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) } static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = { - .get_drvinfo = get_drvinfo, + .get_drvinfo = cxgb4_mgmt_get_drvinfo, }; void t4_fatal_err(struct adapter *adap) @@ -4759,7 +4957,7 @@ static int get_chip_type(struct pci_dev *pdev, u32 pl_rev) } #ifdef CONFIG_PCI_IOV -static void dummy_setup(struct net_device *dev) +static void cxgb4_mgmt_setup(struct net_device *dev) { dev->type = ARPHRD_NONE; dev->mtu = 0; @@ -4775,38 +4973,6 @@ static void dummy_setup(struct net_device *dev) dev->needs_free_netdev = true; } -static int config_mgmt_dev(struct pci_dev *pdev) -{ - struct adapter *adap = pci_get_drvdata(pdev); - struct net_device *netdev; - struct port_info *pi; - char name[IFNAMSIZ]; - int err; - - snprintf(name, IFNAMSIZ, "mgmtpf%d%d", adap->adap_idx, adap->pf); - netdev = alloc_netdev(sizeof(struct port_info), name, NET_NAME_UNKNOWN, - dummy_setup); - if (!netdev) - return -ENOMEM; - - pi = netdev_priv(netdev); - pi->adapter = adap; - pi->tx_chan = adap->pf % adap->params.nports; - SET_NETDEV_DEV(netdev, &pdev->dev); - - adap->port[0] = netdev; - pi->port_id = 0; - - err = register_netdev(adap->port[0]); - if (err) { - pr_info("Unable to register VF mgmt netdev %s\n", name); - free_netdev(adap->port[0]); - adap->port[0] = NULL; - return err; - } - return 0; -} - static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) { struct adapter *adap = pci_get_drvdata(pdev); @@ -4818,7 +4984,7 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) /* Check if cxgb4 is the MASTER and fw is initialized */ if (!(pcie_fw & PCIE_FW_INIT_F) || !(pcie_fw & PCIE_FW_MASTER_VLD_F) || - PCIE_FW_MASTER_G(pcie_fw) != 4) { + PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF) { dev_warn(&pdev->dev, "cxgb4 driver needs to be MASTER to support SRIOV\n"); return -EOPNOTSUPP; @@ -4830,46 +4996,132 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) if (current_vfs && pci_vfs_assigned(pdev)) { dev_err(&pdev->dev, "Cannot modify SR-IOV while VFs are assigned\n"); - num_vfs = current_vfs; - return num_vfs; + return current_vfs; } - - /* Disable SRIOV when zero is passed. - * One needs to disable SRIOV before modifying it, else - * stack throws the below warning: - * " 'n' VFs already enabled. Disable before enabling 'm' VFs." + /* Note that the upper-level code ensures that we're never called with + * a non-zero "num_vfs" when we already have VFs instantiated. But + * it never hurts to code defensively. */ + if (num_vfs != 0 && current_vfs != 0) + return -EBUSY; + + /* Nothing to do for no change. */ + if (num_vfs == current_vfs) + return num_vfs; + + /* Disable SRIOV when zero is passed. */ if (!num_vfs) { pci_disable_sriov(pdev); - if (adap->port[0]) { - unregister_netdev(adap->port[0]); - adap->port[0] = NULL; - } + /* free VF Management Interface */ + unregister_netdev(adap->port[0]); + free_netdev(adap->port[0]); + adap->port[0] = NULL; + /* free VF resources */ + adap->num_vfs = 0; kfree(adap->vfinfo); adap->vfinfo = NULL; - adap->num_vfs = 0; - return num_vfs; + return 0; } - if (num_vfs != current_vfs) { - err = pci_enable_sriov(pdev, num_vfs); + if (!current_vfs) { + struct fw_pfvf_cmd port_cmd, port_rpl; + struct net_device *netdev; + unsigned int pmask, port; + struct pci_dev *pbridge; + struct port_info *pi; + char name[IFNAMSIZ]; + u32 devcap2; + u16 flags; + int pos; + + /* If we want to instantiate Virtual Functions, then our + * parent bridge's PCI-E needs to support Alternative Routing + * ID (ARI) because our VFs will show up at function offset 8 + * and above. + */ + pbridge = pdev->bus->self; + pos = pci_find_capability(pbridge, PCI_CAP_ID_EXP); + pci_read_config_word(pbridge, pos + PCI_EXP_FLAGS, &flags); + pci_read_config_dword(pbridge, pos + PCI_EXP_DEVCAP2, &devcap2); + + if ((flags & PCI_EXP_FLAGS_VERS) < 2 || + !(devcap2 & PCI_EXP_DEVCAP2_ARI)) { + /* Our parent bridge does not support ARI so issue a + * warning and skip instantiating the VFs. They + * won't be reachable. + */ + dev_warn(&pdev->dev, "Parent bridge %02x:%02x.%x doesn't support ARI; can't instantiate Virtual Functions\n", + pbridge->bus->number, PCI_SLOT(pbridge->devfn), + PCI_FUNC(pbridge->devfn)); + return -ENOTSUPP; + } + memset(&port_cmd, 0, sizeof(port_cmd)); + port_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | + FW_PFVF_CMD_PFN_V(adap->pf) | + FW_PFVF_CMD_VFN_V(0)); + port_cmd.retval_len16 = cpu_to_be32(FW_LEN16(port_cmd)); + err = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd), + &port_rpl); if (err) return err; + pmask = FW_PFVF_CMD_PMASK_G(be32_to_cpu(port_rpl.type_to_neq)); + port = ffs(pmask) - 1; + /* Allocate VF Management Interface. */ + snprintf(name, IFNAMSIZ, "mgmtpf%d,%d", adap->adap_idx, + adap->pf); + netdev = alloc_netdev(sizeof(struct port_info), + name, NET_NAME_UNKNOWN, cxgb4_mgmt_setup); + if (!netdev) + return -ENOMEM; - adap->num_vfs = num_vfs; - err = config_mgmt_dev(pdev); - if (err) + pi = netdev_priv(netdev); + pi->adapter = adap; + pi->lport = port; + pi->tx_chan = port; + SET_NETDEV_DEV(netdev, &pdev->dev); + + adap->port[0] = netdev; + pi->port_id = 0; + + err = register_netdev(adap->port[0]); + if (err) { + pr_info("Unable to register VF mgmt netdev %s\n", name); + free_netdev(adap->port[0]); + adap->port[0] = NULL; return err; + } + /* Allocate and set up VF Information. */ + adap->vfinfo = kcalloc(pci_sriov_get_totalvfs(pdev), + sizeof(struct vf_info), GFP_KERNEL); + if (!adap->vfinfo) { + unregister_netdev(adap->port[0]); + free_netdev(adap->port[0]); + adap->port[0] = NULL; + return -ENOMEM; + } + cxgb4_mgmt_fill_vf_station_mac_addr(adap); + } + /* Instantiate the requested number of VFs. */ + err = pci_enable_sriov(pdev, num_vfs); + if (err) { + pr_info("Unable to instantiate %d VFs\n", num_vfs); + if (!current_vfs) { + unregister_netdev(adap->port[0]); + free_netdev(adap->port[0]); + adap->port[0] = NULL; + kfree(adap->vfinfo); + adap->vfinfo = NULL; + } + return err; } - adap->vfinfo = kcalloc(adap->num_vfs, - sizeof(struct vf_info), GFP_KERNEL); - if (adap->vfinfo) - fill_vf_station_mac_addr(adap); + adap->num_vfs = num_vfs; return num_vfs; } -#endif +#endif /* CONFIG_PCI_IOV */ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -4882,9 +5134,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) u32 whoami, pl_rev; enum chip_type chip; static int adap_idx = 1; -#ifdef CONFIG_PCI_IOV - u32 v, port_vec; -#endif printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); @@ -4908,6 +5157,13 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_disable_device; } + adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); + if (!adapter) { + err = -ENOMEM; + goto out_unmap_bar0; + } + + adapter->regs = regs; err = t4_wait_dev_ready(regs); if (err < 0) goto out_unmap_bar0; @@ -4918,13 +5174,29 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) chip = get_chip_type(pdev, pl_rev); func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ? SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami); + + adapter->pdev = pdev; + adapter->pdev_dev = &pdev->dev; + adapter->name = pci_name(pdev); + adapter->mbox = func; + adapter->pf = func; + adapter->msg_enable = DFLT_MSG_ENABLE; + adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) + + (sizeof(struct mbox_cmd) * + T4_OS_LOG_MBOX_CMDS), + GFP_KERNEL); + if (!adapter->mbox_log) { + err = -ENOMEM; + goto out_free_adapter; + } + spin_lock_init(&adapter->mbox_lock); + INIT_LIST_HEAD(&adapter->mlist.list); + pci_set_drvdata(pdev, adapter); + if (func != ent->driver_data) { -#ifndef CONFIG_PCI_IOV - iounmap(regs); -#endif pci_disable_device(pdev); pci_save_state(pdev); /* to restore SR-IOV later */ - goto sriov; + return 0; } if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { @@ -4933,53 +5205,30 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) { dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " "coherent allocations\n"); - goto out_unmap_bar0; + goto out_free_adapter; } } else { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "no usable DMA configuration\n"); - goto out_unmap_bar0; + goto out_free_adapter; } } pci_enable_pcie_error_reporting(pdev); pci_set_master(pdev); pci_save_state(pdev); - - adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); - if (!adapter) { - err = -ENOMEM; - goto out_unmap_bar0; - } adap_idx++; - adapter->workq = create_singlethread_workqueue("cxgb4"); if (!adapter->workq) { err = -ENOMEM; goto out_free_adapter; } - adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) + - (sizeof(struct mbox_cmd) * - T4_OS_LOG_MBOX_CMDS), - GFP_KERNEL); - if (!adapter->mbox_log) { - err = -ENOMEM; - goto out_free_adapter; - } adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS; /* PCI device has been enabled */ adapter->flags |= DEV_ENABLED; - - adapter->regs = regs; - adapter->pdev = pdev; - adapter->pdev_dev = &pdev->dev; - adapter->name = pci_name(pdev); - adapter->mbox = func; - adapter->pf = func; - adapter->msg_enable = DFLT_MSG_ENABLE; memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); /* If possible, we use PCIe Relaxed Ordering Attribute to deliver @@ -5002,9 +5251,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) spin_lock_init(&adapter->stats_lock); spin_lock_init(&adapter->tid_release_lock); spin_lock_init(&adapter->win0_lock); - spin_lock_init(&adapter->mbox_lock); - - INIT_LIST_HEAD(&adapter->mlist.list); INIT_WORK(&adapter->tid_release_task, process_tid_release_list); INIT_WORK(&adapter->db_full_task, process_db_full); @@ -5080,6 +5326,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_TC; + + if (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5) + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + if (highdma) netdev->hw_features |= NETIF_F_HIGHDMA; netdev->features |= netdev->hw_features; @@ -5273,58 +5523,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) setup_fw_sge_queues(adapter); return 0; -sriov: -#ifdef CONFIG_PCI_IOV - adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); - if (!adapter) { - err = -ENOMEM; - goto free_pci_region; - } - - adapter->pdev = pdev; - adapter->pdev_dev = &pdev->dev; - adapter->name = pci_name(pdev); - adapter->mbox = func; - adapter->pf = func; - adapter->regs = regs; - adapter->adap_idx = adap_idx; - adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) + - (sizeof(struct mbox_cmd) * - T4_OS_LOG_MBOX_CMDS), - GFP_KERNEL); - if (!adapter->mbox_log) { - err = -ENOMEM; - goto free_adapter; - } - spin_lock_init(&adapter->mbox_lock); - INIT_LIST_HEAD(&adapter->mlist.list); - - v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | - FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC); - err = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 1, - &v, &port_vec); - if (err < 0) { - dev_err(adapter->pdev_dev, "Could not fetch port params\n"); - goto free_mbox_log; - } - - adapter->params.nports = hweight32(port_vec); - pci_set_drvdata(pdev, adapter); - return 0; - -free_mbox_log: - kfree(adapter->mbox_log); - free_adapter: - kfree(adapter); - free_pci_region: - iounmap(regs); - pci_disable_sriov(pdev); - pci_release_regions(pdev); - return err; -#else - return 0; -#endif - out_free_dev: free_some_resources(adapter); if (adapter->flags & USING_MSIX) @@ -5416,14 +5614,7 @@ static void remove_one(struct pci_dev *pdev) } #ifdef CONFIG_PCI_IOV else { - if (adapter->port[0]) - unregister_netdev(adapter->port[0]); - iounmap(adapter->regs); - kfree(adapter->vfinfo); - kfree(adapter->mbox_log); - kfree(adapter); - pci_disable_sriov(pdev); - pci_release_regions(pdev); + cxgb4_iov_configure(adapter->pdev, 0); } #endif } @@ -5467,18 +5658,6 @@ static void shutdown_one(struct pci_dev *pdev) if (adapter->flags & FW_OK) t4_fw_bye(adapter, adapter->mbox); } -#ifdef CONFIG_PCI_IOV - else { - if (adapter->port[0]) - unregister_netdev(adapter->port[0]); - iounmap(adapter->regs); - kfree(adapter->vfinfo); - kfree(adapter->mbox_log); - kfree(adapter); - pci_disable_sriov(pdev); - pci_release_regions(pdev); - } -#endif } static struct pci_driver cxgb4_driver = { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index a452d5a1b0f3..36563364bae7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -43,7 +43,7 @@ #define STATS_CHECK_PERIOD (HZ / 2) -struct ch_tc_pedit_fields pedits[] = { +static struct ch_tc_pedit_fields pedits[] = { PEDIT_FIELDS(ETH_, DMAC_31_0, 4, dmac, 0), PEDIT_FIELDS(ETH_, DMAC_47_32, 2, dmac, 4), PEDIT_FIELDS(ETH_, SMAC_15_0, 2, smac, 0), @@ -408,9 +408,7 @@ static void cxgb4_process_flow_actions(struct net_device *in, } else if (is_tcf_gact_shot(a)) { fs->action = FILTER_DROP; } else if (is_tcf_mirred_egress_redirect(a)) { - int ifindex = tcf_mirred_ifindex(a); - struct net_device *out = __dev_get_by_index(dev_net(in), - ifindex); + struct net_device *out = tcf_mirred_dev(a); struct port_info *pi = netdev_priv(out); fs->action = FILTER_SWITCH; @@ -585,14 +583,14 @@ static int cxgb4_validate_flow_actions(struct net_device *dev, /* Do nothing */ } else if (is_tcf_mirred_egress_redirect(a)) { struct adapter *adap = netdev2adap(dev); - struct net_device *n_dev; - unsigned int i, ifindex; + struct net_device *n_dev, *target_dev; + unsigned int i; bool found = false; - ifindex = tcf_mirred_ifindex(a); + target_dev = tcf_mirred_dev(a); for_each_port(adap, i) { n_dev = adap->port[i]; - if (ifindex == n_dev->ifindex) { + if (target_dev == n_dev) { found = true; break; } @@ -768,9 +766,7 @@ static void ch_flower_stats_handler(struct work_struct *work) rhashtable_walk_enter(&adap->flower_tbl, &iter); do { - flower_entry = ERR_PTR(rhashtable_walk_start(&iter)); - if (IS_ERR(flower_entry)) - goto walk_stop; + rhashtable_walk_start(&iter); while ((flower_entry = rhashtable_walk_next(&iter)) && !IS_ERR(flower_entry)) { @@ -789,8 +785,9 @@ static void ch_flower_stats_handler(struct work_struct *work) spin_unlock(&flower_entry->lock); } } -walk_stop: + rhashtable_walk_stop(&iter); + } while (flower_entry == ERR_PTR(-EAGAIN)); rhashtable_walk_exit(&iter); mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c index cd0cd13a964d..ab174bcfbfb0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c @@ -114,14 +114,14 @@ static int fill_action_fields(struct adapter *adap, /* Re-direct to specified port in hardware. */ if (is_tcf_mirred_egress_redirect(a)) { - struct net_device *n_dev; - unsigned int i, index; + struct net_device *n_dev, *target_dev; bool found = false; + unsigned int i; - index = tcf_mirred_ifindex(a); + target_dev = tcf_mirred_dev(a); for_each_port(adap, i) { n_dev = adap->port[i]; - if (index == n_dev->ifindex) { + if (target_dev == n_dev) { fs->action = FILTER_SWITCH; fs->eport = i; found = true; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 6c7b0ac0b48b..6e310a0da7c9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -761,12 +761,19 @@ static inline unsigned int flits_to_desc(unsigned int n) * Returns whether an Ethernet packet is small enough to fit as * immediate data. Return value corresponds to headroom required. */ -static inline int is_eth_imm(const struct sk_buff *skb) +static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver) { - int hdrlen = skb_shinfo(skb)->gso_size ? - sizeof(struct cpl_tx_pkt_lso_core) : 0; + int hdrlen = 0; - hdrlen += sizeof(struct cpl_tx_pkt); + if (skb->encapsulation && skb_shinfo(skb)->gso_size && + chip_ver > CHELSIO_T5) { + hdrlen = sizeof(struct cpl_tx_tnl_lso); + hdrlen += sizeof(struct cpl_tx_pkt_core); + } else { + hdrlen = skb_shinfo(skb)->gso_size ? + sizeof(struct cpl_tx_pkt_lso_core) : 0; + hdrlen += sizeof(struct cpl_tx_pkt); + } if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen) return hdrlen; return 0; @@ -779,10 +786,11 @@ static inline int is_eth_imm(const struct sk_buff *skb) * Returns the number of flits needed for a Tx WR for the given Ethernet * packet, including the needed WR and CPL headers. */ -static inline unsigned int calc_tx_flits(const struct sk_buff *skb) +static inline unsigned int calc_tx_flits(const struct sk_buff *skb, + unsigned int chip_ver) { unsigned int flits; - int hdrlen = is_eth_imm(skb); + int hdrlen = is_eth_imm(skb, chip_ver); /* If the skb is small enough, we can pump it out as a work request * with only immediate data. In that case we just have to have the @@ -801,13 +809,20 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb) * with an embedded TX Packet Write CPL message. */ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); - if (skb_shinfo(skb)->gso_size) - flits += (sizeof(struct fw_eth_tx_pkt_wr) + - sizeof(struct cpl_tx_pkt_lso_core) + - sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); - else + if (skb_shinfo(skb)->gso_size) { + if (skb->encapsulation && chip_ver > CHELSIO_T5) + hdrlen = sizeof(struct fw_eth_tx_pkt_wr) + + sizeof(struct cpl_tx_tnl_lso); + else + hdrlen = sizeof(struct fw_eth_tx_pkt_wr) + + sizeof(struct cpl_tx_pkt_lso_core); + + hdrlen += sizeof(struct cpl_tx_pkt_core); + flits += (hdrlen / sizeof(__be64)); + } else { flits += (sizeof(struct fw_eth_tx_pkt_wr) + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); + } return flits; } @@ -818,9 +833,10 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb) * Returns the number of Tx descriptors needed for the given Ethernet * packet, including the needed WR and CPL headers. */ -static inline unsigned int calc_tx_descs(const struct sk_buff *skb) +static inline unsigned int calc_tx_descs(const struct sk_buff *skb, + unsigned int chip_ver) { - return flits_to_desc(calc_tx_flits(skb)); + return flits_to_desc(calc_tx_flits(skb, chip_ver)); } /** @@ -1148,6 +1164,105 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap, } #endif /* CONFIG_CHELSIO_T4_FCOE */ +/* Returns tunnel type if hardware supports offloading of the same. + * It is called only for T5 and onwards. + */ +enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb) +{ + u8 l4_hdr = 0; + enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE; + struct port_info *pi = netdev_priv(skb->dev); + struct adapter *adapter = pi->adapter; + + if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || + skb->inner_protocol != htons(ETH_P_TEB)) + return tnl_type; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + l4_hdr = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + l4_hdr = ipv6_hdr(skb)->nexthdr; + break; + default: + return tnl_type; + } + + switch (l4_hdr) { + case IPPROTO_UDP: + if (adapter->vxlan_port == udp_hdr(skb)->dest) + tnl_type = TX_TNL_TYPE_VXLAN; + else if (adapter->geneve_port == udp_hdr(skb)->dest) + tnl_type = TX_TNL_TYPE_GENEVE; + break; + default: + return tnl_type; + } + + return tnl_type; +} + +static inline void t6_fill_tnl_lso(struct sk_buff *skb, + struct cpl_tx_tnl_lso *tnl_lso, + enum cpl_tx_tnl_lso_type tnl_type) +{ + u32 val; + int in_eth_xtra_len; + int l3hdr_len = skb_network_header_len(skb); + int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; + const struct skb_shared_info *ssi = skb_shinfo(skb); + bool v6 = (ip_hdr(skb)->version == 6); + + val = CPL_TX_TNL_LSO_OPCODE_V(CPL_TX_TNL_LSO) | + CPL_TX_TNL_LSO_FIRST_F | + CPL_TX_TNL_LSO_LAST_F | + (v6 ? CPL_TX_TNL_LSO_IPV6OUT_F : 0) | + CPL_TX_TNL_LSO_ETHHDRLENOUT_V(eth_xtra_len / 4) | + CPL_TX_TNL_LSO_IPHDRLENOUT_V(l3hdr_len / 4) | + (v6 ? 0 : CPL_TX_TNL_LSO_IPHDRCHKOUT_F) | + CPL_TX_TNL_LSO_IPLENSETOUT_F | + (v6 ? 0 : CPL_TX_TNL_LSO_IPIDINCOUT_F); + tnl_lso->op_to_IpIdSplitOut = htonl(val); + + tnl_lso->IpIdOffsetOut = 0; + + /* Get the tunnel header length */ + val = skb_inner_mac_header(skb) - skb_mac_header(skb); + in_eth_xtra_len = skb_inner_network_header(skb) - + skb_inner_mac_header(skb) - ETH_HLEN; + + switch (tnl_type) { + case TX_TNL_TYPE_VXLAN: + case TX_TNL_TYPE_GENEVE: + tnl_lso->UdpLenSetOut_to_TnlHdrLen = + htons(CPL_TX_TNL_LSO_UDPCHKCLROUT_F | + CPL_TX_TNL_LSO_UDPLENSETOUT_F); + break; + default: + tnl_lso->UdpLenSetOut_to_TnlHdrLen = 0; + break; + } + + tnl_lso->UdpLenSetOut_to_TnlHdrLen |= + htons(CPL_TX_TNL_LSO_TNLHDRLEN_V(val) | + CPL_TX_TNL_LSO_TNLTYPE_V(tnl_type)); + + tnl_lso->r1 = 0; + + val = CPL_TX_TNL_LSO_ETHHDRLEN_V(in_eth_xtra_len / 4) | + CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) | + CPL_TX_TNL_LSO_IPHDRLEN_V(skb_inner_network_header_len(skb) / 4) | + CPL_TX_TNL_LSO_TCPHDRLEN_V(inner_tcp_hdrlen(skb) / 4); + tnl_lso->Flow_to_TcpHdrLen = htonl(val); + + tnl_lso->IpIdOffset = htons(0); + + tnl_lso->IpIdSplit_to_Mss = htons(CPL_TX_TNL_LSO_MSS_V(ssi->gso_size)); + tnl_lso->TCPSeqOffset = htonl(0); + tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len)); +} + /** * t4_eth_xmit - add a packet to an Ethernet Tx queue * @skb: the packet @@ -1171,6 +1286,9 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev) bool immediate = false; int len, max_pkt_len; bool ptp_enabled = is_ptp_enabled(skb, dev); + unsigned int chip_ver; + enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE; + #ifdef CONFIG_CHELSIO_T4_FCOE int err; #endif /* CONFIG_CHELSIO_T4_FCOE */ @@ -1227,7 +1345,8 @@ out_free: dev_kfree_skb_any(skb); } #endif /* CONFIG_CHELSIO_T4_FCOE */ - flits = calc_tx_flits(skb); + chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); + flits = calc_tx_flits(skb, chip_ver); ndesc = flits_to_desc(flits); credits = txq_avail(&q->q) - ndesc; @@ -1241,9 +1360,12 @@ out_free: dev_kfree_skb_any(skb); return NETDEV_TX_BUSY; } - if (is_eth_imm(skb)) + if (is_eth_imm(skb, chip_ver)) immediate = true; + if (skb->encapsulation && chip_ver > CHELSIO_T5) + tnl_type = cxgb_encap_offload_supported(skb); + if (!immediate && unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) { q->mapping_err++; @@ -1269,33 +1391,58 @@ out_free: dev_kfree_skb_any(skb); bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; int l3hdr_len = skb_network_header_len(skb); int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; + struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1); + + if (tnl_type) + len += sizeof(*tnl_lso); + else + len += sizeof(*lso); - len += sizeof(*lso); wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | FW_WR_IMMDLEN_V(len)); - lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) | - LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F | - LSO_IPV6_V(v6) | - LSO_ETHHDR_LEN_V(eth_xtra_len / 4) | - LSO_IPHDR_LEN_V(l3hdr_len / 4) | - LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); - lso->c.ipid_ofst = htons(0); - lso->c.mss = htons(ssi->gso_size); - lso->c.seqno_offset = htonl(0); - if (is_t4(adap->params.chip)) - lso->c.len = htonl(skb->len); - else - lso->c.len = htonl(LSO_T5_XFER_SIZE_V(skb->len)); - cpl = (void *)(lso + 1); + if (tnl_type) { + struct iphdr *iph = ip_hdr(skb); - if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) - cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len); - else - cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len); + t6_fill_tnl_lso(skb, tnl_lso, tnl_type); + cpl = (void *)(tnl_lso + 1); + /* Driver is expected to compute partial checksum that + * does not include the IP Total Length. + */ + if (iph->version == 4) { + iph->check = 0; + iph->tot_len = 0; + iph->check = (u16)(~ip_fast_csum((u8 *)iph, + iph->ihl)); + } + if (skb->ip_summed == CHECKSUM_PARTIAL) + cntrl = hwcsum(adap->params.chip, skb); + } else { + lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) | + LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F | + LSO_IPV6_V(v6) | + LSO_ETHHDR_LEN_V(eth_xtra_len / 4) | + LSO_IPHDR_LEN_V(l3hdr_len / 4) | + LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); + lso->c.ipid_ofst = htons(0); + lso->c.mss = htons(ssi->gso_size); + lso->c.seqno_offset = htonl(0); + if (is_t4(adap->params.chip)) + lso->c.len = htonl(skb->len); + else + lso->c.len = + htonl(LSO_T5_XFER_SIZE_V(skb->len)); + cpl = (void *)(lso + 1); - cntrl |= TXPKT_CSUM_TYPE_V(v6 ? - TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | - TXPKT_IPHDR_LEN_V(l3hdr_len); + if (CHELSIO_CHIP_VERSION(adap->params.chip) + <= CHELSIO_T5) + cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len); + else + cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len); + + cntrl |= TXPKT_CSUM_TYPE_V(v6 ? + TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | + TXPKT_IPHDR_LEN_V(l3hdr_len); + } q->tso++; q->tx_cso += ssi->gso_segs; } else { diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 375ef86a84da..047609ef0515 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -195,9 +195,11 @@ static void t4_report_fw_error(struct adapter *adap) u32 pcie_fw; pcie_fw = t4_read_reg(adap, PCIE_FW_A); - if (pcie_fw & PCIE_FW_ERR_F) + if (pcie_fw & PCIE_FW_ERR_F) { dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n", reason[PCIE_FW_EVAL_G(pcie_fw)]); + adap->flags &= ~FW_OK; + } } /* @@ -317,9 +319,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, * wait [for a while] till we're at the front [or bail out with an * EBUSY] ... */ - spin_lock(&adap->mbox_lock); + spin_lock_bh(&adap->mbox_lock); list_add_tail(&entry.list, &adap->mlist.list); - spin_unlock(&adap->mbox_lock); + spin_unlock_bh(&adap->mbox_lock); delay_idx = 0; ms = delay[0]; @@ -332,9 +334,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, */ pcie_fw = t4_read_reg(adap, PCIE_FW_A); if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) { - spin_lock(&adap->mbox_lock); + spin_lock_bh(&adap->mbox_lock); list_del(&entry.list); - spin_unlock(&adap->mbox_lock); + spin_unlock_bh(&adap->mbox_lock); ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY; t4_record_mbox(adap, cmd, size, access, ret); return ret; @@ -365,9 +367,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) v = MBOWNER_G(t4_read_reg(adap, ctl_reg)); if (v != MBOX_OWNER_DRV) { - spin_lock(&adap->mbox_lock); + spin_lock_bh(&adap->mbox_lock); list_del(&entry.list); - spin_unlock(&adap->mbox_lock); + spin_unlock_bh(&adap->mbox_lock); ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT; t4_record_mbox(adap, cmd, size, access, ret); return ret; @@ -418,9 +420,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, execute = i + ms; t4_record_mbox(adap, cmd_rpl, MBOX_LEN, access, execute); - spin_lock(&adap->mbox_lock); + spin_lock_bh(&adap->mbox_lock); list_del(&entry.list); - spin_unlock(&adap->mbox_lock); + spin_unlock_bh(&adap->mbox_lock); return -FW_CMD_RETVAL_G((int)res); } } @@ -430,9 +432,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", *(const u8 *)cmd, mbox); t4_report_fw_error(adap); - spin_lock(&adap->mbox_lock); + spin_lock_bh(&adap->mbox_lock); list_del(&entry.list); - spin_unlock(&adap->mbox_lock); + spin_unlock_bh(&adap->mbox_lock); t4_fatal_err(adap); return ret; } @@ -524,11 +526,14 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, * MEM_EDC1 = 1 * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5) + * MEM_HMA = 4 */ edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A)); - if (mtype != MEM_MC1) + if (mtype == MEM_HMA) { + memoffset = 2 * (edc_size * 1024 * 1024); + } else if (mtype != MEM_MC1) { memoffset = (mtype * (edc_size * 1024 * 1024)); - else { + } else { mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A)); memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; @@ -4923,6 +4928,14 @@ void t4_intr_disable(struct adapter *adapter) t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0); } +unsigned int t4_chip_rss_size(struct adapter *adap) +{ + if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) + return RSS_NENTRIES; + else + return T6_RSS_NENTRIES; +} + /** * t4_config_rss_range - configure a portion of the RSS mapping table * @adapter: the adapter @@ -5061,10 +5074,11 @@ static int rd_rss_row(struct adapter *adap, int row, u32 *val) */ int t4_read_rss(struct adapter *adapter, u16 *map) { + int i, ret, nentries; u32 val; - int i, ret; - for (i = 0; i < RSS_NENTRIES / 2; ++i) { + nentries = t4_chip_rss_size(adapter); + for (i = 0; i < nentries / 2; ++i) { ret = rd_rss_row(adapter, i, &val); if (ret) return ret; @@ -5076,7 +5090,7 @@ int t4_read_rss(struct adapter *adapter, u16 *map) static unsigned int t4_use_ldst(struct adapter *adap) { - return (adap->flags & FW_OK) || !adap->use_bd; + return (adap->flags & FW_OK) && !adap->use_bd; } /** @@ -6071,6 +6085,7 @@ const char *t4_get_port_type_description(enum fw_port_type port_type) "CR2_QSFP", "SFP28", "KR_SFP28", + "KR_XLAUI" }; if (port_type < ARRAY_SIZE(port_type_description)) @@ -6526,18 +6541,21 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state) * t4_sge_ctxt_flush - flush the SGE context cache * @adap: the adapter * @mbox: mailbox to use for the FW command + * @ctx_type: Egress or Ingress * * Issues a FW command through the given mailbox to flush the * SGE context cache. */ -int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox) +int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type) { int ret; u32 ldst_addrspace; struct fw_ldst_cmd c; memset(&c, 0, sizeof(c)); - ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_SGE_EGRC); + ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(ctxt_type == CTXT_EGRESS ? + FW_LDST_ADDRSPC_SGE_EGRC : + FW_LDST_ADDRSPC_SGE_INGC); c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F | FW_CMD_READ_F | ldst_addrspace); @@ -7451,6 +7469,112 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, } /** + * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam + * @adap: the adapter + * @viid: the VI id + * @addr: the MAC address + * @mask: the mask + * @idx: index of the entry in mps tcam + * @lookup_type: MAC address for inner (1) or outer (0) header + * @port_id: the port index + * @sleep_ok: call is allowed to sleep + * + * Removes the mac entry at the specified index using raw mac interface. + * + * Returns a negative error number on failure. + */ +int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid, + const u8 *addr, const u8 *mask, unsigned int idx, + u8 lookup_type, u8 port_id, bool sleep_ok) +{ + struct fw_vi_mac_cmd c; + struct fw_vi_mac_raw *p = &c.u.raw; + u32 val; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F | + FW_CMD_EXEC_V(0) | + FW_VI_MAC_CMD_VIID_V(viid)); + val = FW_CMD_LEN16_V(1) | + FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW); + c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) | + FW_CMD_LEN16_V(val)); + + p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx) | + FW_VI_MAC_ID_BASED_FREE); + + /* Lookup Type. Outer header: 0, Inner header: 1 */ + p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) | + DATAPORTNUM_V(port_id)); + /* Lookup mask and port mask */ + p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) | + DATAPORTNUM_V(DATAPORTNUM_M)); + + /* Copy the address and the mask */ + memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN); + memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN); + + return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); +} + +/** + * t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam + * @adap: the adapter + * @viid: the VI id + * @mac: the MAC address + * @mask: the mask + * @idx: index at which to add this entry + * @port_id: the port index + * @lookup_type: MAC address for inner (1) or outer (0) header + * @sleep_ok: call is allowed to sleep + * + * Adds the mac entry at the specified index using raw mac interface. + * + * Returns a negative error number or the allocated index for this mac. + */ +int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid, + const u8 *addr, const u8 *mask, unsigned int idx, + u8 lookup_type, u8 port_id, bool sleep_ok) +{ + int ret = 0; + struct fw_vi_mac_cmd c; + struct fw_vi_mac_raw *p = &c.u.raw; + u32 val; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F | + FW_VI_MAC_CMD_VIID_V(viid)); + val = FW_CMD_LEN16_V(1) | + FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW); + c.freemacs_to_len16 = cpu_to_be32(val); + + /* Specify that this is an inner mac address */ + p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx)); + + /* Lookup Type. Outer header: 0, Inner header: 1 */ + p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) | + DATAPORTNUM_V(port_id)); + /* Lookup mask and port mask */ + p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) | + DATAPORTNUM_V(DATAPORTNUM_M)); + + /* Copy the address and the mask */ + memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN); + memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN); + + ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); + if (ret == 0) { + ret = FW_VI_MAC_CMD_RAW_IDX_G(be32_to_cpu(p->raw_idx_pkd)); + if (ret != idx) + ret = -ENOMEM; + } + + return ret; +} + +/** * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses * @adap: the adapter * @mbox: mailbox to use for the FW command @@ -8491,22 +8615,6 @@ found: return 0; } -static void set_pcie_completion_timeout(struct adapter *adapter, u8 range) -{ - u16 val; - u32 pcie_cap; - - pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); - if (pcie_cap) { - pci_read_config_word(adapter->pdev, - pcie_cap + PCI_EXP_DEVCTL2, &val); - val &= ~PCI_EXP_DEVCTL2_COMP_TIMEOUT; - val |= range; - pci_write_config_word(adapter->pdev, - pcie_cap + PCI_EXP_DEVCTL2, val); - } -} - /** * t4_prep_adapter - prepare SW and HW for operation * @adapter: the adapter @@ -8592,8 +8700,9 @@ int t4_prep_adapter(struct adapter *adapter) adapter->params.portvec = 1; adapter->params.vpd.cclk = 50000; - /* Set pci completion timeout value to 4 seconds. */ - set_pcie_completion_timeout(adapter, 0xd); + /* Set PCIe completion timeout to 4 seconds. */ + pcie_capability_clear_and_set_word(adapter->pdev, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd); return 0; } @@ -9736,3 +9845,91 @@ int t4_sched_params(struct adapter *adapter, int type, int level, int mode, return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd), NULL, 1); } + +/** + * t4_i2c_rd - read I2C data from adapter + * @adap: the adapter + * @port: Port number if per-port device; <0 if not + * @devid: per-port device ID or absolute device ID + * @offset: byte offset into device I2C space + * @len: byte length of I2C space data + * @buf: buffer in which to return I2C data + * + * Reads the I2C data from the indicated device and location. + */ +int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port, + unsigned int devid, unsigned int offset, + unsigned int len, u8 *buf) +{ + struct fw_ldst_cmd ldst_cmd, ldst_rpl; + unsigned int i2c_max = sizeof(ldst_cmd.u.i2c.data); + int ret = 0; + + if (len > I2C_PAGE_SIZE) + return -EINVAL; + + /* Dont allow reads that spans multiple pages */ + if (offset < I2C_PAGE_SIZE && offset + len > I2C_PAGE_SIZE) + return -EINVAL; + + memset(&ldst_cmd, 0, sizeof(ldst_cmd)); + ldst_cmd.op_to_addrspace = + cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | + FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_I2C)); + ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd)); + ldst_cmd.u.i2c.pid = (port < 0 ? 0xff : port); + ldst_cmd.u.i2c.did = devid; + + while (len > 0) { + unsigned int i2c_len = (len < i2c_max) ? len : i2c_max; + + ldst_cmd.u.i2c.boffset = offset; + ldst_cmd.u.i2c.blen = i2c_len; + + ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd), + &ldst_rpl); + if (ret) + break; + + memcpy(buf, ldst_rpl.u.i2c.data, i2c_len); + offset += i2c_len; + buf += i2c_len; + len -= i2c_len; + } + + return ret; +} + +/** + * t4_set_vlan_acl - Set a VLAN id for the specified VF + * @adapter: the adapter + * @mbox: mailbox to use for the FW command + * @vf: one of the VFs instantiated by the specified PF + * @vlan: The vlanid to be set + */ +int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf, + u16 vlan) +{ + struct fw_acl_vlan_cmd vlan_cmd; + unsigned int enable; + + enable = (vlan ? FW_ACL_VLAN_CMD_EN_F : 0); + memset(&vlan_cmd, 0, sizeof(vlan_cmd)); + vlan_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_VLAN_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_CMD_EXEC_F | + FW_ACL_VLAN_CMD_PFN_V(adap->pf) | + FW_ACL_VLAN_CMD_VFN_V(vf)); + vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd)); + /* Drop all packets that donot match vlan id */ + vlan_cmd.dropnovlan_fm = FW_ACL_VLAN_CMD_FM_F; + if (enable != 0) { + vlan_cmd.nvlan = 1; + vlan_cmd.vlanid[0] = cpu_to_be16(vlan); + } + + return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL); +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h index a964ed184356..361d5032c288 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h @@ -38,21 +38,22 @@ #include <linux/types.h> enum { - NCHAN = 4, /* # of HW channels */ - MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */ - EEPROMSIZE = 17408, /* Serial EEPROM physical size */ - EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */ - EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */ - RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */ - TCB_SIZE = 128, /* TCB size */ - NMTUS = 16, /* size of MTU table */ - NCCTRL_WIN = 32, /* # of congestion control windows */ - NTX_SCHED = 8, /* # of HW Tx scheduling queues */ - PM_NSTATS = 5, /* # of PM stats */ - T6_PM_NSTATS = 7, /* # of PM stats in T6 */ - MBOX_LEN = 64, /* mailbox size in bytes */ - TRACE_LEN = 112, /* length of trace data and mask */ - FILTER_OPT_LEN = 36, /* filter tuple width for optional components */ + NCHAN = 4, /* # of HW channels */ + MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */ + EEPROMSIZE = 17408,/* Serial EEPROM physical size */ + EEPROMVSIZE = 32768,/* Serial EEPROM virtual address space size */ + EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */ + RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */ + T6_RSS_NENTRIES = 4096, /* # of entries in RSS mapping table */ + TCB_SIZE = 128, /* TCB size */ + NMTUS = 16, /* size of MTU table */ + NCCTRL_WIN = 32, /* # of congestion control windows */ + NTX_SCHED = 8, /* # of HW Tx scheduling queues */ + PM_NSTATS = 5, /* # of PM stats */ + T6_PM_NSTATS = 7, /* # of PM stats in T6 */ + MBOX_LEN = 64, /* mailbox size in bytes */ + TRACE_LEN = 112, /* length of trace data and mask */ + FILTER_OPT_LEN = 36, /* filter tuple width for optional components */ }; enum { @@ -70,7 +71,9 @@ enum { /* SGE context types */ enum ctxt_type { - CTXT_FLM = 2, + CTXT_EGRESS, + CTXT_INGRESS, + CTXT_FLM, CTXT_CNM, }; @@ -284,4 +287,14 @@ enum { #define SGE_TIMESTAMP_V(x) ((__u64)(x) << SGE_TIMESTAMP_S) #define SGE_TIMESTAMP_G(x) (((__u64)(x) >> SGE_TIMESTAMP_S) & SGE_TIMESTAMP_M) +#define I2C_DEV_ADDR_A0 0xa0 +#define I2C_DEV_ADDR_A2 0xa2 +#define I2C_PAGE_SIZE 0x100 +#define SFP_DIAG_TYPE_ADDR 0x5c +#define SFP_DIAG_TYPE_LEN 0x1 +#define SFF_8472_COMP_ADDR 0x5e +#define SFF_8472_COMP_LEN 0x1 +#define SFF_REV_ADDR 0x1 +#define SFF_REV_LEN 0x1 + #endif /* __T4_HW_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index 7e12f241145b..d0db4427b77e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -107,6 +107,7 @@ enum { CPL_FW6_MSG = 0xE0, CPL_FW6_PLD = 0xE1, + CPL_TX_TNL_LSO = 0xEC, CPL_TX_PKT_LSO = 0xED, CPL_TX_PKT_XT = 0xEE, @@ -1479,6 +1480,169 @@ struct ulp_txpkt { #define ULP_TXPKT_RO_V(x) ((x) << ULP_TXPKT_RO_S) #define ULP_TXPKT_RO_F ULP_TXPKT_RO_V(1U) +enum cpl_tx_tnl_lso_type { + TX_TNL_TYPE_OPAQUE, + TX_TNL_TYPE_NVGRE, + TX_TNL_TYPE_VXLAN, + TX_TNL_TYPE_GENEVE, +}; + +struct cpl_tx_tnl_lso { + __be32 op_to_IpIdSplitOut; + __be16 IpIdOffsetOut; + __be16 UdpLenSetOut_to_TnlHdrLen; + __be64 r1; + __be32 Flow_to_TcpHdrLen; + __be16 IpIdOffset; + __be16 IpIdSplit_to_Mss; + __be32 TCPSeqOffset; + __be32 EthLenOffset_Size; + /* encapsulated CPL (TX_PKT_XT) follows here */ +}; + +#define CPL_TX_TNL_LSO_OPCODE_S 24 +#define CPL_TX_TNL_LSO_OPCODE_M 0xff +#define CPL_TX_TNL_LSO_OPCODE_V(x) ((x) << CPL_TX_TNL_LSO_OPCODE_S) +#define CPL_TX_TNL_LSO_OPCODE_G(x) \ + (((x) >> CPL_TX_TNL_LSO_OPCODE_S) & CPL_TX_TNL_LSO_OPCODE_M) + +#define CPL_TX_TNL_LSO_FIRST_S 23 +#define CPL_TX_TNL_LSO_FIRST_M 0x1 +#define CPL_TX_TNL_LSO_FIRST_V(x) ((x) << CPL_TX_TNL_LSO_FIRST_S) +#define CPL_TX_TNL_LSO_FIRST_G(x) \ + (((x) >> CPL_TX_TNL_LSO_FIRST_S) & CPL_TX_TNL_LSO_FIRST_M) +#define CPL_TX_TNL_LSO_FIRST_F CPL_TX_TNL_LSO_FIRST_V(1U) + +#define CPL_TX_TNL_LSO_LAST_S 22 +#define CPL_TX_TNL_LSO_LAST_M 0x1 +#define CPL_TX_TNL_LSO_LAST_V(x) ((x) << CPL_TX_TNL_LSO_LAST_S) +#define CPL_TX_TNL_LSO_LAST_G(x) \ + (((x) >> CPL_TX_TNL_LSO_LAST_S) & CPL_TX_TNL_LSO_LAST_M) +#define CPL_TX_TNL_LSO_LAST_F CPL_TX_TNL_LSO_LAST_V(1U) + +#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_S 21 +#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_M 0x1 +#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_V(x) \ + ((x) << CPL_TX_TNL_LSO_ETHHDRLENXOUT_S) +#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_G(x) \ + (((x) >> CPL_TX_TNL_LSO_ETHHDRLENXOUT_S) & \ + CPL_TX_TNL_LSO_ETHHDRLENXOUT_M) +#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_F CPL_TX_TNL_LSO_ETHHDRLENXOUT_V(1U) + +#define CPL_TX_TNL_LSO_IPV6OUT_S 20 +#define CPL_TX_TNL_LSO_IPV6OUT_M 0x1 +#define CPL_TX_TNL_LSO_IPV6OUT_V(x) ((x) << CPL_TX_TNL_LSO_IPV6OUT_S) +#define CPL_TX_TNL_LSO_IPV6OUT_G(x) \ + (((x) >> CPL_TX_TNL_LSO_IPV6OUT_S) & CPL_TX_TNL_LSO_IPV6OUT_M) +#define CPL_TX_TNL_LSO_IPV6OUT_F CPL_TX_TNL_LSO_IPV6OUT_V(1U) + +#define CPL_TX_TNL_LSO_ETHHDRLEN_S 16 +#define CPL_TX_TNL_LSO_ETHHDRLEN_M 0xf +#define CPL_TX_TNL_LSO_ETHHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_ETHHDRLEN_S) +#define CPL_TX_TNL_LSO_ETHHDRLEN_G(x) \ + (((x) >> CPL_TX_TNL_LSO_ETHHDRLEN_S) & CPL_TX_TNL_LSO_ETHHDRLEN_M) + +#define CPL_TX_TNL_LSO_IPHDRLEN_S 4 +#define CPL_TX_TNL_LSO_IPHDRLEN_M 0xfff +#define CPL_TX_TNL_LSO_IPHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_IPHDRLEN_S) +#define CPL_TX_TNL_LSO_IPHDRLEN_G(x) \ + (((x) >> CPL_TX_TNL_LSO_IPHDRLEN_S) & CPL_TX_TNL_LSO_IPHDRLEN_M) + +#define CPL_TX_TNL_LSO_TCPHDRLEN_S 0 +#define CPL_TX_TNL_LSO_TCPHDRLEN_M 0xf +#define CPL_TX_TNL_LSO_TCPHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_TCPHDRLEN_S) +#define CPL_TX_TNL_LSO_TCPHDRLEN_G(x) \ + (((x) >> CPL_TX_TNL_LSO_TCPHDRLEN_S) & CPL_TX_TNL_LSO_TCPHDRLEN_M) + +#define CPL_TX_TNL_LSO_MSS_S 0 +#define CPL_TX_TNL_LSO_MSS_M 0x3fff +#define CPL_TX_TNL_LSO_MSS_V(x) ((x) << CPL_TX_TNL_LSO_MSS_S) +#define CPL_TX_TNL_LSO_MSS_G(x) \ + (((x) >> CPL_TX_TNL_LSO_MSS_S) & CPL_TX_TNL_LSO_MSS_M) + +#define CPL_TX_TNL_LSO_SIZE_S 0 +#define CPL_TX_TNL_LSO_SIZE_M 0xfffffff +#define CPL_TX_TNL_LSO_SIZE_V(x) ((x) << CPL_TX_TNL_LSO_SIZE_S) +#define CPL_TX_TNL_LSO_SIZE_G(x) \ + (((x) >> CPL_TX_TNL_LSO_SIZE_S) & CPL_TX_TNL_LSO_SIZE_M) + +#define CPL_TX_TNL_LSO_ETHHDRLENOUT_S 16 +#define CPL_TX_TNL_LSO_ETHHDRLENOUT_M 0xf +#define CPL_TX_TNL_LSO_ETHHDRLENOUT_V(x) \ + ((x) << CPL_TX_TNL_LSO_ETHHDRLENOUT_S) +#define CPL_TX_TNL_LSO_ETHHDRLENOUT_G(x) \ + (((x) >> CPL_TX_TNL_LSO_ETHHDRLENOUT_S) & CPL_TX_TNL_LSO_ETHHDRLENOUT_M) + +#define CPL_TX_TNL_LSO_IPHDRLENOUT_S 4 +#define CPL_TX_TNL_LSO_IPHDRLENOUT_M 0xfff +#define CPL_TX_TNL_LSO_IPHDRLENOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPHDRLENOUT_S) +#define CPL_TX_TNL_LSO_IPHDRLENOUT_G(x) \ + (((x) >> CPL_TX_TNL_LSO_IPHDRLENOUT_S) & CPL_TX_TNL_LSO_IPHDRLENOUT_M) + +#define CPL_TX_TNL_LSO_IPHDRCHKOUT_S 3 +#define CPL_TX_TNL_LSO_IPHDRCHKOUT_M 0x1 +#define CPL_TX_TNL_LSO_IPHDRCHKOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPHDRCHKOUT_S) +#define CPL_TX_TNL_LSO_IPHDRCHKOUT_G(x) \ + (((x) >> CPL_TX_TNL_LSO_IPHDRCHKOUT_S) & CPL_TX_TNL_LSO_IPHDRCHKOUT_M) +#define CPL_TX_TNL_LSO_IPHDRCHKOUT_F CPL_TX_TNL_LSO_IPHDRCHKOUT_V(1U) + +#define CPL_TX_TNL_LSO_IPLENSETOUT_S 2 +#define CPL_TX_TNL_LSO_IPLENSETOUT_M 0x1 +#define CPL_TX_TNL_LSO_IPLENSETOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPLENSETOUT_S) +#define CPL_TX_TNL_LSO_IPLENSETOUT_G(x) \ + (((x) >> CPL_TX_TNL_LSO_IPLENSETOUT_S) & CPL_TX_TNL_LSO_IPLENSETOUT_M) +#define CPL_TX_TNL_LSO_IPLENSETOUT_F CPL_TX_TNL_LSO_IPLENSETOUT_V(1U) + +#define CPL_TX_TNL_LSO_IPIDINCOUT_S 1 +#define CPL_TX_TNL_LSO_IPIDINCOUT_M 0x1 +#define CPL_TX_TNL_LSO_IPIDINCOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPIDINCOUT_S) +#define CPL_TX_TNL_LSO_IPIDINCOUT_G(x) \ + (((x) >> CPL_TX_TNL_LSO_IPIDINCOUT_S) & CPL_TX_TNL_LSO_IPIDINCOUT_M) +#define CPL_TX_TNL_LSO_IPIDINCOUT_F CPL_TX_TNL_LSO_IPIDINCOUT_V(1U) + +#define CPL_TX_TNL_LSO_UDPCHKCLROUT_S 14 +#define CPL_TX_TNL_LSO_UDPCHKCLROUT_M 0x1 +#define CPL_TX_TNL_LSO_UDPCHKCLROUT_V(x) \ + ((x) << CPL_TX_TNL_LSO_UDPCHKCLROUT_S) +#define CPL_TX_TNL_LSO_UDPCHKCLROUT_G(x) \ + (((x) >> CPL_TX_TNL_LSO_UDPCHKCLROUT_S) & \ + CPL_TX_TNL_LSO_UDPCHKCLROUT_M) +#define CPL_TX_TNL_LSO_UDPCHKCLROUT_F CPL_TX_TNL_LSO_UDPCHKCLROUT_V(1U) + +#define CPL_TX_TNL_LSO_UDPLENSETOUT_S 15 +#define CPL_TX_TNL_LSO_UDPLENSETOUT_M 0x1 +#define CPL_TX_TNL_LSO_UDPLENSETOUT_V(x) \ + ((x) << CPL_TX_TNL_LSO_UDPLENSETOUT_S) +#define CPL_TX_TNL_LSO_UDPLENSETOUT_G(x) \ + (((x) >> CPL_TX_TNL_LSO_UDPLENSETOUT_S) & \ + CPL_TX_TNL_LSO_UDPLENSETOUT_M) +#define CPL_TX_TNL_LSO_UDPLENSETOUT_F CPL_TX_TNL_LSO_UDPLENSETOUT_V(1U) + +#define CPL_TX_TNL_LSO_TNLTYPE_S 12 +#define CPL_TX_TNL_LSO_TNLTYPE_M 0x3 +#define CPL_TX_TNL_LSO_TNLTYPE_V(x) ((x) << CPL_TX_TNL_LSO_TNLTYPE_S) +#define CPL_TX_TNL_LSO_TNLTYPE_G(x) \ + (((x) >> CPL_TX_TNL_LSO_TNLTYPE_S) & CPL_TX_TNL_LSO_TNLTYPE_M) + +#define S_CPL_TX_TNL_LSO_ETHHDRLEN 16 +#define M_CPL_TX_TNL_LSO_ETHHDRLEN 0xf +#define V_CPL_TX_TNL_LSO_ETHHDRLEN(x) ((x) << S_CPL_TX_TNL_LSO_ETHHDRLEN) +#define G_CPL_TX_TNL_LSO_ETHHDRLEN(x) \ + (((x) >> S_CPL_TX_TNL_LSO_ETHHDRLEN) & M_CPL_TX_TNL_LSO_ETHHDRLEN) + +#define CPL_TX_TNL_LSO_TNLHDRLEN_S 0 +#define CPL_TX_TNL_LSO_TNLHDRLEN_M 0xfff +#define CPL_TX_TNL_LSO_TNLHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_TNLHDRLEN_S) +#define CPL_TX_TNL_LSO_TNLHDRLEN_G(x) \ + (((x) >> CPL_TX_TNL_LSO_TNLHDRLEN_S) & CPL_TX_TNL_LSO_TNLHDRLEN_M) + +#define CPL_TX_TNL_LSO_IPV6_S 20 +#define CPL_TX_TNL_LSO_IPV6_M 0x1 +#define CPL_TX_TNL_LSO_IPV6_V(x) ((x) << CPL_TX_TNL_LSO_IPV6_S) +#define CPL_TX_TNL_LSO_IPV6_G(x) \ + (((x) >> CPL_TX_TNL_LSO_IPV6_S) & CPL_TX_TNL_LSO_IPV6_M) +#define CPL_TX_TNL_LSO_IPV6_F CPL_TX_TNL_LSO_IPV6_V(1U) + #define ULP_TX_SC_MORE_S 23 #define ULP_TX_SC_MORE_V(x) ((x) << ULP_TX_SC_MORE_S) #define ULP_TX_SC_MORE_F ULP_TX_SC_MORE_V(1U) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index 60cf9e02de5d..51b18035d691 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -183,6 +183,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x50a9), /* Custom T580-KR */ CH_PCI_ID_TABLE_FENTRY(0x50aa), /* Custom T580-CR */ CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */ + CH_PCI_ID_TABLE_FENTRY(0x50ac), /* Custom T540-BT */ /* T6 adapters: */ @@ -206,6 +207,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x6084), /* Custom T64100-CR QSFP28 */ CH_PCI_ID_TABLE_FENTRY(0x6085), /* Custom T6240-SO */ CH_PCI_ID_TABLE_FENTRY(0x6086), /* Custom T6225-SO-CR */ + CH_PCI_ID_TABLE_FENTRY(0x6087), /* Custom T6225-CR */ CH_PCI_DEVICE_ID_TABLE_DEFINE_END; #endif /* __T4_PCI_ID_TBL_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index a7cfece72828..a6df73398d17 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -45,6 +45,9 @@ #define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE) #define PF_REG(idx, reg) (PF_BASE(idx) + (reg)) +#define NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES 4 +#define NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES 16 + #define MYPORT_BASE 0x1c000 #define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr)) @@ -961,6 +964,10 @@ #define MA_EXT_MEMORY1_BAR_A 0x7808 +#define HMA_MUX_S 5 +#define HMA_MUX_V(x) ((x) << HMA_MUX_S) +#define HMA_MUX_F HMA_MUX_V(1U) + #define EXT_MEM1_BASE_S 16 #define EXT_MEM1_BASE_M 0xfffU #define EXT_MEM1_BASE_G(x) (((x) >> EXT_MEM1_BASE_S) & EXT_MEM1_BASE_M) @@ -2504,6 +2511,28 @@ #define MPS_RX_MAC_BG_PG_CNT0_A 0x11208 #define MPS_RX_LPBK_BG_PG_CNT0_A 0x11218 +#define MPS_RX_VXLAN_TYPE_A 0x11234 + +#define VXLAN_EN_S 16 +#define VXLAN_EN_V(x) ((x) << VXLAN_EN_S) +#define VXLAN_EN_F VXLAN_EN_V(1U) + +#define VXLAN_S 0 +#define VXLAN_M 0xffffU +#define VXLAN_V(x) ((x) << VXLAN_S) +#define VXLAN_G(x) (((x) >> VXLAN_S) & VXLAN_M) + +#define MPS_RX_GENEVE_TYPE_A 0x11238 + +#define GENEVE_EN_S 16 +#define GENEVE_EN_V(x) ((x) << GENEVE_EN_S) +#define GENEVE_EN_F GENEVE_EN_V(1U) + +#define GENEVE_S 0 +#define GENEVE_M 0xffffU +#define GENEVE_V(x) ((x) << GENEVE_S) +#define GENEVE_G(x) (((x) >> GENEVE_S) & GENEVE_M) + #define MPS_CLS_TCAM_Y_L_A 0xf000 #define MPS_CLS_TCAM_DATA0_A 0xf000 #define MPS_CLS_TCAM_DATA1_A 0xf004 @@ -2530,8 +2559,14 @@ #define DATAPORTNUM_S 12 #define DATAPORTNUM_M 0xfU +#define DATAPORTNUM_V(x) ((x) << DATAPORTNUM_S) #define DATAPORTNUM_G(x) (((x) >> DATAPORTNUM_S) & DATAPORTNUM_M) +#define DATALKPTYPE_S 10 +#define DATALKPTYPE_M 0x3U +#define DATALKPTYPE_V(x) ((x) << DATALKPTYPE_S) +#define DATALKPTYPE_G(x) (((x) >> DATALKPTYPE_S) & DATALKPTYPE_M) + #define DATADIPHIT_S 8 #define DATADIPHIT_V(x) ((x) << DATADIPHIT_S) #define DATADIPHIT_F DATADIPHIT_V(1U) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index be3658301832..0d83b4064a78 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -835,6 +835,7 @@ enum fw_ldst_addrspc { FW_LDST_ADDRSPC_MPS = 0x0020, FW_LDST_ADDRSPC_FUNC = 0x0028, FW_LDST_ADDRSPC_FUNC_PCIE = 0x0029, + FW_LDST_ADDRSPC_I2C = 0x0038, }; enum fw_ldst_mps_fid { @@ -2066,6 +2067,7 @@ struct fw_vi_cmd { #define FW_VI_MAC_ADD_MAC 0x3FF #define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE #define FW_VI_MAC_MAC_BASED_FREE 0x3FD +#define FW_VI_MAC_ID_BASED_FREE 0x3FC #define FW_CLS_TCAM_NUM_ENTRIES 336 enum fw_vi_mac_smac { @@ -2082,6 +2084,13 @@ enum fw_vi_mac_result { FW_VI_MAC_R_F_ACL_CHECK }; +enum fw_vi_mac_entry_types { + FW_VI_MAC_TYPE_EXACTMAC, + FW_VI_MAC_TYPE_HASHVEC, + FW_VI_MAC_TYPE_RAW, + FW_VI_MAC_TYPE_EXACTMAC_VNI, +}; + struct fw_vi_mac_cmd { __be32 op_to_viid; __be32 freemacs_to_len16; @@ -2093,6 +2102,13 @@ struct fw_vi_mac_cmd { struct fw_vi_mac_hash { __be64 hashvec; } hash; + struct fw_vi_mac_raw { + __be32 raw_idx_pkd; + __be32 data0_pkd; + __be32 data1[2]; + __be64 data0m_pkd; + __be32 data1m[2]; + } raw; } u; }; @@ -2102,6 +2118,12 @@ struct fw_vi_mac_cmd { #define FW_VI_MAC_CMD_FREEMACS_S 31 #define FW_VI_MAC_CMD_FREEMACS_V(x) ((x) << FW_VI_MAC_CMD_FREEMACS_S) +#define FW_VI_MAC_CMD_ENTRY_TYPE_S 23 +#define FW_VI_MAC_CMD_ENTRY_TYPE_M 0x7 +#define FW_VI_MAC_CMD_ENTRY_TYPE_V(x) ((x) << FW_VI_MAC_CMD_ENTRY_TYPE_S) +#define FW_VI_MAC_CMD_ENTRY_TYPE_G(x) \ + (((x) >> FW_VI_MAC_CMD_ENTRY_TYPE_S) & FW_VI_MAC_CMD_ENTRY_TYPE_M) + #define FW_VI_MAC_CMD_HASHVECEN_S 23 #define FW_VI_MAC_CMD_HASHVECEN_V(x) ((x) << FW_VI_MAC_CMD_HASHVECEN_S) #define FW_VI_MAC_CMD_HASHVECEN_F FW_VI_MAC_CMD_HASHVECEN_V(1U) @@ -2128,6 +2150,12 @@ struct fw_vi_mac_cmd { #define FW_VI_MAC_CMD_IDX_G(x) \ (((x) >> FW_VI_MAC_CMD_IDX_S) & FW_VI_MAC_CMD_IDX_M) +#define FW_VI_MAC_CMD_RAW_IDX_S 16 +#define FW_VI_MAC_CMD_RAW_IDX_M 0xffff +#define FW_VI_MAC_CMD_RAW_IDX_V(x) ((x) << FW_VI_MAC_CMD_RAW_IDX_S) +#define FW_VI_MAC_CMD_RAW_IDX_G(x) \ + (((x) >> FW_VI_MAC_CMD_RAW_IDX_S) & FW_VI_MAC_CMD_RAW_IDX_M) + #define FW_RXMODE_MTU_NO_CHG 65535 struct fw_vi_rxmode_cmd { @@ -2332,14 +2360,22 @@ struct fw_acl_vlan_cmd { #define FW_ACL_VLAN_CMD_VFN_S 0 #define FW_ACL_VLAN_CMD_VFN_V(x) ((x) << FW_ACL_VLAN_CMD_VFN_S) -#define FW_ACL_VLAN_CMD_EN_S 31 -#define FW_ACL_VLAN_CMD_EN_V(x) ((x) << FW_ACL_VLAN_CMD_EN_S) +#define FW_ACL_VLAN_CMD_EN_S 31 +#define FW_ACL_VLAN_CMD_EN_M 0x1 +#define FW_ACL_VLAN_CMD_EN_V(x) ((x) << FW_ACL_VLAN_CMD_EN_S) +#define FW_ACL_VLAN_CMD_EN_G(x) \ + (((x) >> S_FW_ACL_VLAN_CMD_EN_S) & FW_ACL_VLAN_CMD_EN_M) +#define FW_ACL_VLAN_CMD_EN_F FW_ACL_VLAN_CMD_EN_V(1U) #define FW_ACL_VLAN_CMD_DROPNOVLAN_S 7 #define FW_ACL_VLAN_CMD_DROPNOVLAN_V(x) ((x) << FW_ACL_VLAN_CMD_DROPNOVLAN_S) -#define FW_ACL_VLAN_CMD_FM_S 6 -#define FW_ACL_VLAN_CMD_FM_V(x) ((x) << FW_ACL_VLAN_CMD_FM_S) +#define FW_ACL_VLAN_CMD_FM_S 6 +#define FW_ACL_VLAN_CMD_FM_M 0x1 +#define FW_ACL_VLAN_CMD_FM_V(x) ((x) << FW_ACL_VLAN_CMD_FM_S) +#define FW_ACL_VLAN_CMD_FM_G(x) \ + (((x) >> FW_ACL_VLAN_CMD_FM_S) & FW_ACL_VLAN_CMD_FM_M) +#define FW_ACL_VLAN_CMD_FM_F FW_ACL_VLAN_CMD_FM_V(1U) /* old 16-bit port capabilities bitmap (fw_port_cap16_t) */ enum fw_port_cap { @@ -2835,6 +2871,7 @@ enum fw_port_type { FW_PORT_TYPE_CR2_QSFP, FW_PORT_TYPE_SFP28, FW_PORT_TYPE_KR_SFP28, + FW_PORT_TYPE_KR_XLAUI, FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_M }; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index 08c6ddb84a04..5883f09e3804 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h @@ -92,6 +92,7 @@ struct sge_rspq; */ struct port_info { struct adapter *adapter; /* our adapter */ + u32 vlan_id; /* vlan id for VST */ u16 viid; /* virtual interface ID */ s16 xact_addr_filt; /* index of our MAC address filter */ u16 rss_size; /* size of VI's RSS table slice */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index b48361cfdc78..b7e79e64d2ed 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -791,6 +791,8 @@ static int cxgb4vf_open(struct net_device *dev) if (err) goto err_unwind; + pi->vlan_id = t4vf_get_vf_vlan_acl(adapter); + netif_tx_start_all_queues(dev); set_bit(pi->port_id, &adapter->open_device_map); return 0; @@ -1229,7 +1231,8 @@ static int from_fw_port_mod_type(enum fw_port_type port_type, else return PORT_OTHER; } else if (port_type == FW_PORT_TYPE_KR4_100G || - port_type == FW_PORT_TYPE_KR_SFP28) { + port_type == FW_PORT_TYPE_KR_SFP28 || + port_type == FW_PORT_TYPE_KR_XLAUI) { return PORT_NONE; } @@ -1323,6 +1326,13 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, SET_LMM(25000baseKR_Full); break; + case FW_PORT_TYPE_KR_XLAUI: + SET_LMM(Backplane); + FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full); + FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full); + break; + case FW_PORT_TYPE_CR2_QSFP: SET_LMM(FIBRE); SET_LMM(50000baseSR2_Full); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 14d7e673c656..dfce5df7538e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -1202,6 +1202,10 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) BUG_ON(qidx >= pi->nqsets); txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; + if (pi->vlan_id && !skb_vlan_tag_present(skb)) + __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q), + pi->vlan_id); + /* * Take this opportunity to reclaim any TX Descriptors whose DMA * transfers have completed. @@ -1570,6 +1574,7 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, { struct adapter *adapter = rxq->rspq.adapter; struct sge *s = &adapter->sge; + struct port_info *pi; int ret; struct sk_buff *skb; @@ -1586,8 +1591,9 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, skb->truesize += skb->data_len; skb->ip_summed = CHECKSUM_UNNECESSARY; skb_record_rx_queue(skb, rxq->rspq.idx); + pi = netdev_priv(skb->dev); - if (pkt->vlan_ex) { + if (pkt->vlan_ex && !pi->vlan_id) { __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q), be16_to_cpu(pkt->vlan)); rxq->stats.vlan_ex++; @@ -1620,6 +1626,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); struct adapter *adapter = rspq->adapter; struct sge *s = &adapter->sge; + struct port_info *pi; /* * If this is a good TCP packet and we have Generic Receive Offload @@ -1644,6 +1651,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, __skb_pull(skb, s->pktshift); skb->protocol = eth_type_trans(skb, rspq->netdev); skb_record_rx_queue(skb, rspq->idx); + pi = netdev_priv(skb->dev); rxq->stats.pkts++; if (csum_ok && !pkt->err_vec && @@ -1660,9 +1668,10 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, } else skb_checksum_none_assert(skb); - if (pkt->vlan_ex) { + if (pkt->vlan_ex && !pi->vlan_id) { rxq->stats.vlan_ex++; - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(pkt->vlan)); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + be16_to_cpu(pkt->vlan)); } netif_receive_skb(skb); @@ -2619,8 +2628,8 @@ void t4vf_sge_stop(struct adapter *adapter) int t4vf_sge_init(struct adapter *adapter) { struct sge_params *sge_params = &adapter->params.sge; - u32 fl0 = sge_params->sge_fl_buffer_size[0]; - u32 fl1 = sge_params->sge_fl_buffer_size[1]; + u32 fl_small_pg = sge_params->sge_fl_buffer_size[0]; + u32 fl_large_pg = sge_params->sge_fl_buffer_size[1]; struct sge *s = &adapter->sge; /* @@ -2628,9 +2637,20 @@ int t4vf_sge_init(struct adapter *adapter) * the Physical Function Driver. Ideally we should be able to deal * with _any_ configuration. Practice is different ... */ - if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) { + + /* We only bother using the Large Page logic if the Large Page Buffer + * is larger than our Page Size Buffer. + */ + if (fl_large_pg <= fl_small_pg) + fl_large_pg = 0; + + /* The Page Size Buffer must be exactly equal to our Page Size and the + * Large Page Size Buffer should be 0 (per above) or a power of 2. + */ + if (fl_small_pg != PAGE_SIZE || + (fl_large_pg & (fl_large_pg - 1)) != 0) { dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n", - fl0, fl1); + fl_small_pg, fl_large_pg); return -EINVAL; } if ((sge_params->sge_control & RXPKTCPLMODE_F) != @@ -2642,8 +2662,8 @@ int t4vf_sge_init(struct adapter *adapter) /* * Now translate the adapter parameters into our internal forms. */ - if (fl1) - s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT; + if (fl_large_pg) + s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64); s->pktshift = PKTSHIFT_G(sge_params->sge_control); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 9cf9c56b0f73..712e8f0c71b4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -413,5 +413,6 @@ int t4vf_handle_fw_rpl(struct adapter *, const __be64 *); int t4vf_prep_adapter(struct adapter *); int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf, unsigned int *naddr, u8 *addr); +int t4vf_get_vf_vlan_acl(struct adapter *adapter); #endif /* __T4VF_COMMON_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 67aec59a14e6..798695bf8678 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -2147,3 +2147,31 @@ int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf, return ret; } + +/** + * t4vf_get_vf_vlan_acl - Get the VLAN ID to be set to + * the VI of this VF. + * @adapter: The adapter + * + * Find the VLAN ID to be set to the VF's VI. The requested VLAN ID + * is from the host OS via callback in the PF driver. + */ +int t4vf_get_vf_vlan_acl(struct adapter *adapter) +{ + struct fw_acl_vlan_cmd cmd; + int vlan = 0; + int ret = 0; + + cmd.op_to_vfn = htonl(FW_CMD_OP_V(FW_ACL_VLAN_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F); + + /* Note: Do not enable the ACL */ + cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd)); + + ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &cmd); + + if (!ret) + vlan = be16_to_cpu(cmd.vlanid[0]); + + return vlan; +} diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index 6a9527004cb1..9b218f0e5a4c 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -43,6 +43,8 @@ #define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX) #define ENIC_INTR_MAX (ENIC_CQ_MAX + 2) +#define ENIC_WQ_NAPI_BUDGET 256 + #define ENIC_AIC_LARGE_PKT_DIFF 3 struct enic_msix_entry { diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index 462d0ce51240..efb9333c7cf8 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -18,6 +18,7 @@ #include <linux/netdevice.h> #include <linux/ethtool.h> +#include <linux/net_tstamp.h> #include "enic_res.h" #include "enic.h" @@ -578,6 +579,16 @@ static int enic_set_rxfh(struct net_device *netdev, const u32 *indir, return __enic_set_rsskey(enic); } +static int enic_get_ts_info(struct net_device *netdev, + struct ethtool_ts_info *info) +{ + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + + return 0; +} + static const struct ethtool_ops enic_ethtool_ops = { .get_drvinfo = enic_get_drvinfo, .get_msglevel = enic_get_msglevel, @@ -597,6 +608,7 @@ static const struct ethtool_ops enic_ethtool_ops = { .get_rxfh = enic_get_rxfh, .set_rxfh = enic_set_rxfh, .get_link_ksettings = enic_get_ksettings, + .get_ts_info = enic_get_ts_info, }; void enic_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index e130fb757e7b..f202ba72a811 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -856,6 +856,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) netif_tx_stop_queue(txq); + skb_tx_timestamp(skb); if (!skb->xmit_more || netif_xmit_stopped(txq)) vnic_wq_doorbell(wq); @@ -1499,7 +1500,7 @@ static int enic_poll(struct napi_struct *napi, int budget) unsigned int cq_wq = enic_cq_wq(enic, 0); unsigned int intr = enic_legacy_io_intr(); unsigned int rq_work_to_do = budget; - unsigned int wq_work_to_do = -1; /* no limit */ + unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET; unsigned int work_done, rq_work_done = 0, wq_work_done; int err; @@ -1597,7 +1598,7 @@ static int enic_poll_msix_wq(struct napi_struct *napi, int budget) struct vnic_wq *wq = &enic->wq[wq_index]; unsigned int cq; unsigned int intr; - unsigned int wq_work_to_do = -1; /* clean all desc possible */ + unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET; unsigned int wq_work_done; unsigned int wq_irq; diff --git a/drivers/net/ethernet/cortina/Kconfig b/drivers/net/ethernet/cortina/Kconfig new file mode 100644 index 000000000000..89bc4579724d --- /dev/null +++ b/drivers/net/ethernet/cortina/Kconfig @@ -0,0 +1,23 @@ +# SPDX-License-Identifier: GPL-2.0 +# Cortina ethernet devices + +config NET_VENDOR_CORTINA + bool "Cortina Gemini devices" + default y + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + <http://www.tldp.org/docs.html#howto>. + +if NET_VENDOR_CORTINA + +config GEMINI_ETHERNET + tristate "Gemini Gigabit Ethernet support" + depends on OF + depends on HAS_IOMEM + select PHYLIB + select CRC32 + ---help--- + This driver supports StorLink SL351x (Gemini) dual Gigabit Ethernet. + +endif # NET_VENDOR_CORTINA diff --git a/drivers/net/ethernet/cortina/Makefile b/drivers/net/ethernet/cortina/Makefile new file mode 100644 index 000000000000..4e86d398a89c --- /dev/null +++ b/drivers/net/ethernet/cortina/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 +# Makefile for the Cortina Gemini network device drivers. + +obj-$(CONFIG_GEMINI_ETHERNET) += gemini.o diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c new file mode 100644 index 000000000000..5eb999af2c40 --- /dev/null +++ b/drivers/net/ethernet/cortina/gemini.c @@ -0,0 +1,2593 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Ethernet device driver for Cortina Systems Gemini SoC + * Also known as the StorLink SL3512 and SL3516 (SL351x) or Lepus + * Net Engine and Gigabit Ethernet MAC (GMAC) + * This hardware contains a TCP Offload Engine (TOE) but currently the + * driver does not make use of it. + * + * Authors: + * Linus Walleij <linus.walleij@linaro.org> + * Tobias Waldvogel <tobias.waldvogel@gmail.com> (OpenWRT) + * MichaÅ‚ MirosÅ‚aw <mirq-linux@rere.qmqm.pl> + * Paulius Zaleckas <paulius.zaleckas@gmail.com> + * Giuseppe De Robertis <Giuseppe.DeRobertis@ba.infn.it> + * Gary Chen & Ch Hsu Storlink Semiconductor + */ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/dma-mapping.h> +#include <linux/cache.h> +#include <linux/interrupt.h> +#include <linux/reset.h> +#include <linux/clk.h> +#include <linux/of.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/of_platform.h> +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/skbuff.h> +#include <linux/phy.h> +#include <linux/crc32.h> +#include <linux/ethtool.h> +#include <linux/tcp.h> +#include <linux/u64_stats_sync.h> + +#include <linux/in.h> +#include <linux/ip.h> +#include <linux/ipv6.h> + +#include "gemini.h" + +#define DRV_NAME "gmac-gemini" +#define DRV_VERSION "1.0" + +#define HSIZE_8 0x00 +#define HSIZE_16 0x01 +#define HSIZE_32 0x02 + +#define HBURST_SINGLE 0x00 +#define HBURST_INCR 0x01 +#define HBURST_INCR4 0x02 +#define HBURST_INCR8 0x03 + +#define HPROT_DATA_CACHE BIT(0) +#define HPROT_PRIVILIGED BIT(1) +#define HPROT_BUFFERABLE BIT(2) +#define HPROT_CACHABLE BIT(3) + +#define DEFAULT_RX_COALESCE_NSECS 0 +#define DEFAULT_GMAC_RXQ_ORDER 9 +#define DEFAULT_GMAC_TXQ_ORDER 8 +#define DEFAULT_RX_BUF_ORDER 11 +#define DEFAULT_NAPI_WEIGHT 64 +#define TX_MAX_FRAGS 16 +#define TX_QUEUE_NUM 1 /* max: 6 */ +#define RX_MAX_ALLOC_ORDER 2 + +#define GMAC0_IRQ0_2 (GMAC0_TXDERR_INT_BIT | GMAC0_TXPERR_INT_BIT | \ + GMAC0_RXDERR_INT_BIT | GMAC0_RXPERR_INT_BIT) +#define GMAC0_IRQ0_TXQ0_INTS (GMAC0_SWTQ00_EOF_INT_BIT | \ + GMAC0_SWTQ00_FIN_INT_BIT) +#define GMAC0_IRQ4_8 (GMAC0_MIB_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT) + +#define GMAC_OFFLOAD_FEATURES (NETIF_F_SG | NETIF_F_IP_CSUM | \ + NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | \ + NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6) + +/** + * struct gmac_queue_page - page buffer per-page info + */ +struct gmac_queue_page { + struct page *page; + dma_addr_t mapping; +}; + +struct gmac_txq { + struct gmac_txdesc *ring; + struct sk_buff **skb; + unsigned int cptr; + unsigned int noirq_packets; +}; + +struct gemini_ethernet; + +struct gemini_ethernet_port { + u8 id; /* 0 or 1 */ + + struct gemini_ethernet *geth; + struct net_device *netdev; + struct device *dev; + void __iomem *dma_base; + void __iomem *gmac_base; + struct clk *pclk; + struct reset_control *reset; + int irq; + __le32 mac_addr[3]; + + void __iomem *rxq_rwptr; + struct gmac_rxdesc *rxq_ring; + unsigned int rxq_order; + + struct napi_struct napi; + struct hrtimer rx_coalesce_timer; + unsigned int rx_coalesce_nsecs; + unsigned int freeq_refill; + struct gmac_txq txq[TX_QUEUE_NUM]; + unsigned int txq_order; + unsigned int irq_every_tx_packets; + + dma_addr_t rxq_dma_base; + dma_addr_t txq_dma_base; + + unsigned int msg_enable; + spinlock_t config_lock; /* Locks config register */ + + struct u64_stats_sync tx_stats_syncp; + struct u64_stats_sync rx_stats_syncp; + struct u64_stats_sync ir_stats_syncp; + + struct rtnl_link_stats64 stats; + u64 hw_stats[RX_STATS_NUM]; + u64 rx_stats[RX_STATUS_NUM]; + u64 rx_csum_stats[RX_CHKSUM_NUM]; + u64 rx_napi_exits; + u64 tx_frag_stats[TX_MAX_FRAGS]; + u64 tx_frags_linearized; + u64 tx_hw_csummed; +}; + +struct gemini_ethernet { + struct device *dev; + void __iomem *base; + struct gemini_ethernet_port *port0; + struct gemini_ethernet_port *port1; + + spinlock_t irq_lock; /* Locks IRQ-related registers */ + unsigned int freeq_order; + unsigned int freeq_frag_order; + struct gmac_rxdesc *freeq_ring; + dma_addr_t freeq_dma_base; + struct gmac_queue_page *freeq_pages; + unsigned int num_freeq_pages; + spinlock_t freeq_lock; /* Locks queue from reentrance */ +}; + +#define GMAC_STATS_NUM ( \ + RX_STATS_NUM + RX_STATUS_NUM + RX_CHKSUM_NUM + 1 + \ + TX_MAX_FRAGS + 2) + +static const char gmac_stats_strings[GMAC_STATS_NUM][ETH_GSTRING_LEN] = { + "GMAC_IN_DISCARDS", + "GMAC_IN_ERRORS", + "GMAC_IN_MCAST", + "GMAC_IN_BCAST", + "GMAC_IN_MAC1", + "GMAC_IN_MAC2", + "RX_STATUS_GOOD_FRAME", + "RX_STATUS_TOO_LONG_GOOD_CRC", + "RX_STATUS_RUNT_FRAME", + "RX_STATUS_SFD_NOT_FOUND", + "RX_STATUS_CRC_ERROR", + "RX_STATUS_TOO_LONG_BAD_CRC", + "RX_STATUS_ALIGNMENT_ERROR", + "RX_STATUS_TOO_LONG_BAD_ALIGN", + "RX_STATUS_RX_ERR", + "RX_STATUS_DA_FILTERED", + "RX_STATUS_BUFFER_FULL", + "RX_STATUS_11", + "RX_STATUS_12", + "RX_STATUS_13", + "RX_STATUS_14", + "RX_STATUS_15", + "RX_CHKSUM_IP_UDP_TCP_OK", + "RX_CHKSUM_IP_OK_ONLY", + "RX_CHKSUM_NONE", + "RX_CHKSUM_3", + "RX_CHKSUM_IP_ERR_UNKNOWN", + "RX_CHKSUM_IP_ERR", + "RX_CHKSUM_TCP_UDP_ERR", + "RX_CHKSUM_7", + "RX_NAPI_EXITS", + "TX_FRAGS[1]", + "TX_FRAGS[2]", + "TX_FRAGS[3]", + "TX_FRAGS[4]", + "TX_FRAGS[5]", + "TX_FRAGS[6]", + "TX_FRAGS[7]", + "TX_FRAGS[8]", + "TX_FRAGS[9]", + "TX_FRAGS[10]", + "TX_FRAGS[11]", + "TX_FRAGS[12]", + "TX_FRAGS[13]", + "TX_FRAGS[14]", + "TX_FRAGS[15]", + "TX_FRAGS[16+]", + "TX_FRAGS_LINEARIZED", + "TX_HW_CSUMMED", +}; + +static void gmac_dump_dma_state(struct net_device *netdev); + +static void gmac_update_config0_reg(struct net_device *netdev, + u32 val, u32 vmask) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + unsigned long flags; + u32 reg; + + spin_lock_irqsave(&port->config_lock, flags); + + reg = readl(port->gmac_base + GMAC_CONFIG0); + reg = (reg & ~vmask) | val; + writel(reg, port->gmac_base + GMAC_CONFIG0); + + spin_unlock_irqrestore(&port->config_lock, flags); +} + +static void gmac_enable_tx_rx(struct net_device *netdev) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + unsigned long flags; + u32 reg; + + spin_lock_irqsave(&port->config_lock, flags); + + reg = readl(port->gmac_base + GMAC_CONFIG0); + reg &= ~CONFIG0_TX_RX_DISABLE; + writel(reg, port->gmac_base + GMAC_CONFIG0); + + spin_unlock_irqrestore(&port->config_lock, flags); +} + +static void gmac_disable_tx_rx(struct net_device *netdev) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + unsigned long flags; + u32 val; + + spin_lock_irqsave(&port->config_lock, flags); + + val = readl(port->gmac_base + GMAC_CONFIG0); + val |= CONFIG0_TX_RX_DISABLE; + writel(val, port->gmac_base + GMAC_CONFIG0); + + spin_unlock_irqrestore(&port->config_lock, flags); + + mdelay(10); /* let GMAC consume packet */ +} + +static void gmac_set_flow_control(struct net_device *netdev, bool tx, bool rx) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + unsigned long flags; + u32 val; + + spin_lock_irqsave(&port->config_lock, flags); + + val = readl(port->gmac_base + GMAC_CONFIG0); + val &= ~CONFIG0_FLOW_CTL; + if (tx) + val |= CONFIG0_FLOW_TX; + if (rx) + val |= CONFIG0_FLOW_RX; + writel(val, port->gmac_base + GMAC_CONFIG0); + + spin_unlock_irqrestore(&port->config_lock, flags); +} + +static void gmac_speed_set(struct net_device *netdev) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + struct phy_device *phydev = netdev->phydev; + union gmac_status status, old_status; + int pause_tx = 0; + int pause_rx = 0; + + status.bits32 = readl(port->gmac_base + GMAC_STATUS); + old_status.bits32 = status.bits32; + status.bits.link = phydev->link; + status.bits.duplex = phydev->duplex; + + switch (phydev->speed) { + case 1000: + status.bits.speed = GMAC_SPEED_1000; + if (phydev->interface == PHY_INTERFACE_MODE_RGMII) + status.bits.mii_rmii = GMAC_PHY_RGMII_1000; + netdev_info(netdev, "connect to RGMII @ 1Gbit\n"); + break; + case 100: + status.bits.speed = GMAC_SPEED_100; + if (phydev->interface == PHY_INTERFACE_MODE_RGMII) + status.bits.mii_rmii = GMAC_PHY_RGMII_100_10; + netdev_info(netdev, "connect to RGMII @ 100 Mbit\n"); + break; + case 10: + status.bits.speed = GMAC_SPEED_10; + if (phydev->interface == PHY_INTERFACE_MODE_RGMII) + status.bits.mii_rmii = GMAC_PHY_RGMII_100_10; + netdev_info(netdev, "connect to RGMII @ 10 Mbit\n"); + break; + default: + netdev_warn(netdev, "Not supported PHY speed (%d)\n", + phydev->speed); + } + + if (phydev->duplex == DUPLEX_FULL) { + u16 lcladv = phy_read(phydev, MII_ADVERTISE); + u16 rmtadv = phy_read(phydev, MII_LPA); + u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); + + if (cap & FLOW_CTRL_RX) + pause_rx = 1; + if (cap & FLOW_CTRL_TX) + pause_tx = 1; + } + + gmac_set_flow_control(netdev, pause_tx, pause_rx); + + if (old_status.bits32 == status.bits32) + return; + + if (netif_msg_link(port)) { + phy_print_status(phydev); + netdev_info(netdev, "link flow control: %s\n", + phydev->pause + ? (phydev->asym_pause ? "tx" : "both") + : (phydev->asym_pause ? "rx" : "none") + ); + } + + gmac_disable_tx_rx(netdev); + writel(status.bits32, port->gmac_base + GMAC_STATUS); + gmac_enable_tx_rx(netdev); +} + +static int gmac_setup_phy(struct net_device *netdev) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + union gmac_status status = { .bits32 = 0 }; + struct device *dev = port->dev; + struct phy_device *phy; + + phy = of_phy_get_and_connect(netdev, + dev->of_node, + gmac_speed_set); + if (!phy) + return -ENODEV; + netdev->phydev = phy; + + netdev_info(netdev, "connected to PHY \"%s\"\n", + phydev_name(phy)); + phy_attached_print(phy, "phy_id=0x%.8lx, phy_mode=%s\n", + (unsigned long)phy->phy_id, + phy_modes(phy->interface)); + + phy->supported &= PHY_GBIT_FEATURES; + phy->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause; + phy->advertising = phy->supported; + + /* set PHY interface type */ + switch (phy->interface) { + case PHY_INTERFACE_MODE_MII: + netdev_info(netdev, "set GMAC0 to GMII mode, GMAC1 disabled\n"); + status.bits.mii_rmii = GMAC_PHY_MII; + netdev_info(netdev, "connect to MII\n"); + break; + case PHY_INTERFACE_MODE_GMII: + netdev_info(netdev, "set GMAC0 to GMII mode, GMAC1 disabled\n"); + status.bits.mii_rmii = GMAC_PHY_GMII; + netdev_info(netdev, "connect to GMII\n"); + break; + case PHY_INTERFACE_MODE_RGMII: + dev_info(dev, "set GMAC0 and GMAC1 to MII/RGMII mode\n"); + status.bits.mii_rmii = GMAC_PHY_RGMII_100_10; + netdev_info(netdev, "connect to RGMII\n"); + break; + default: + netdev_err(netdev, "Unsupported MII interface\n"); + phy_disconnect(phy); + netdev->phydev = NULL; + return -EINVAL; + } + writel(status.bits32, port->gmac_base + GMAC_STATUS); + + return 0; +} + +static int gmac_pick_rx_max_len(int max_l3_len) +{ + /* index = CONFIG_MAXLEN_XXX values */ + static const int max_len[8] = { + 1536, 1518, 1522, 1542, + 9212, 10236, 1518, 1518 + }; + int i, n = 5; + + max_l3_len += ETH_HLEN + VLAN_HLEN; + + if (max_l3_len > max_len[n]) + return -1; + + for (i = 0; i < 5; i++) { + if (max_len[i] >= max_l3_len && max_len[i] < max_len[n]) + n = i; + } + + return n; +} + +static int gmac_init(struct net_device *netdev) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + union gmac_config0 config0 = { .bits = { + .dis_tx = 1, + .dis_rx = 1, + .ipv4_rx_chksum = 1, + .ipv6_rx_chksum = 1, + .rx_err_detect = 1, + .rgmm_edge = 1, + .port0_chk_hwq = 1, + .port1_chk_hwq = 1, + .port0_chk_toeq = 1, + .port1_chk_toeq = 1, + .port0_chk_classq = 1, + .port1_chk_classq = 1, + } }; + union gmac_ahb_weight ahb_weight = { .bits = { + .rx_weight = 1, + .tx_weight = 1, + .hash_weight = 1, + .pre_req = 0x1f, + .tq_dv_threshold = 0, + } }; + union gmac_tx_wcr0 hw_weigh = { .bits = { + .hw_tq3 = 1, + .hw_tq2 = 1, + .hw_tq1 = 1, + .hw_tq0 = 1, + } }; + union gmac_tx_wcr1 sw_weigh = { .bits = { + .sw_tq5 = 1, + .sw_tq4 = 1, + .sw_tq3 = 1, + .sw_tq2 = 1, + .sw_tq1 = 1, + .sw_tq0 = 1, + } }; + union gmac_config1 config1 = { .bits = { + .set_threshold = 16, + .rel_threshold = 24, + } }; + union gmac_config2 config2 = { .bits = { + .set_threshold = 16, + .rel_threshold = 32, + } }; + union gmac_config3 config3 = { .bits = { + .set_threshold = 0, + .rel_threshold = 0, + } }; + union gmac_config0 tmp; + u32 val; + + config0.bits.max_len = gmac_pick_rx_max_len(netdev->mtu); + tmp.bits32 = readl(port->gmac_base + GMAC_CONFIG0); + config0.bits.reserved = tmp.bits.reserved; + writel(config0.bits32, port->gmac_base + GMAC_CONFIG0); + writel(config1.bits32, port->gmac_base + GMAC_CONFIG1); + writel(config2.bits32, port->gmac_base + GMAC_CONFIG2); + writel(config3.bits32, port->gmac_base + GMAC_CONFIG3); + + val = readl(port->dma_base + GMAC_AHB_WEIGHT_REG); + writel(ahb_weight.bits32, port->dma_base + GMAC_AHB_WEIGHT_REG); + + writel(hw_weigh.bits32, + port->dma_base + GMAC_TX_WEIGHTING_CTRL_0_REG); + writel(sw_weigh.bits32, + port->dma_base + GMAC_TX_WEIGHTING_CTRL_1_REG); + + port->rxq_order = DEFAULT_GMAC_RXQ_ORDER; + port->txq_order = DEFAULT_GMAC_TXQ_ORDER; + port->rx_coalesce_nsecs = DEFAULT_RX_COALESCE_NSECS; + + /* Mark every quarter of the queue a packet for interrupt + * in order to be able to wake up the queue if it was stopped + */ + port->irq_every_tx_packets = 1 << (port->txq_order - 2); + + return 0; +} + +static void gmac_uninit(struct net_device *netdev) +{ + if (netdev->phydev) + phy_disconnect(netdev->phydev); +} + +static int gmac_setup_txqs(struct net_device *netdev) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + unsigned int n_txq = netdev->num_tx_queues; + struct gemini_ethernet *geth = port->geth; + size_t entries = 1 << port->txq_order; + struct gmac_txq *txq = port->txq; + struct gmac_txdesc *desc_ring; + size_t len = n_txq * entries; + struct sk_buff **skb_tab; + void __iomem *rwptr_reg; + unsigned int r; + int i; + + rwptr_reg = port->dma_base + GMAC_SW_TX_QUEUE0_PTR_REG; + + skb_tab = kcalloc(len, sizeof(*skb_tab), GFP_KERNEL); + if (!skb_tab) + return -ENOMEM; + + desc_ring = dma_alloc_coherent(geth->dev, len * sizeof(*desc_ring), + &port->txq_dma_base, GFP_KERNEL); + + if (!desc_ring) { + kfree(skb_tab); + return -ENOMEM; + } + + if (port->txq_dma_base & ~DMA_Q_BASE_MASK) { + dev_warn(geth->dev, "TX queue base it not aligned\n"); + return -ENOMEM; + } + + writel(port->txq_dma_base | port->txq_order, + port->dma_base + GMAC_SW_TX_QUEUE_BASE_REG); + + for (i = 0; i < n_txq; i++) { + txq->ring = desc_ring; + txq->skb = skb_tab; + txq->noirq_packets = 0; + + r = readw(rwptr_reg); + rwptr_reg += 2; + writew(r, rwptr_reg); + rwptr_reg += 2; + txq->cptr = r; + + txq++; + desc_ring += entries; + skb_tab += entries; + } + + return 0; +} + +static void gmac_clean_txq(struct net_device *netdev, struct gmac_txq *txq, + unsigned int r) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + unsigned int m = (1 << port->txq_order) - 1; + struct gemini_ethernet *geth = port->geth; + unsigned int c = txq->cptr; + union gmac_txdesc_0 word0; + union gmac_txdesc_1 word1; + unsigned int hwchksum = 0; + unsigned long bytes = 0; + struct gmac_txdesc *txd; + unsigned short nfrags; + unsigned int errs = 0; + unsigned int pkts = 0; + unsigned int word3; + dma_addr_t mapping; + + if (c == r) + return; + + while (c != r) { + txd = txq->ring + c; + word0 = txd->word0; + word1 = txd->word1; + mapping = txd->word2.buf_adr; + word3 = txd->word3.bits32; + + dma_unmap_single(geth->dev, mapping, + word0.bits.buffer_size, DMA_TO_DEVICE); + + if (word3 & EOF_BIT) + dev_kfree_skb(txq->skb[c]); + + c++; + c &= m; + + if (!(word3 & SOF_BIT)) + continue; + + if (!word0.bits.status_tx_ok) { + errs++; + continue; + } + + pkts++; + bytes += txd->word1.bits.byte_count; + + if (word1.bits32 & TSS_CHECKUM_ENABLE) + hwchksum++; + + nfrags = word0.bits.desc_count - 1; + if (nfrags) { + if (nfrags >= TX_MAX_FRAGS) + nfrags = TX_MAX_FRAGS - 1; + + u64_stats_update_begin(&port->tx_stats_syncp); + port->tx_frag_stats[nfrags]++; + u64_stats_update_end(&port->ir_stats_syncp); + } + } + + u64_stats_update_begin(&port->ir_stats_syncp); + port->stats.tx_errors += errs; + port->stats.tx_packets += pkts; + port->stats.tx_bytes += bytes; + port->tx_hw_csummed += hwchksum; + u64_stats_update_end(&port->ir_stats_syncp); + + txq->cptr = c; +} + +static void gmac_cleanup_txqs(struct net_device *netdev) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + unsigned int n_txq = netdev->num_tx_queues; + struct gemini_ethernet *geth = port->geth; + void __iomem *rwptr_reg; + unsigned int r, i; + + rwptr_reg = port->dma_base + GMAC_SW_TX_QUEUE0_PTR_REG; + + for (i = 0; i < n_txq; i++) { + r = readw(rwptr_reg); + rwptr_reg += 2; + writew(r, rwptr_reg); + rwptr_reg += 2; + + gmac_clean_txq(netdev, port->txq + i, r); + } + writel(0, port->dma_base + GMAC_SW_TX_QUEUE_BASE_REG); + + kfree(port->txq->skb); + dma_free_coherent(geth->dev, + n_txq * sizeof(*port->txq->ring) << port->txq_order, + port->txq->ring, port->txq_dma_base); +} + +static int gmac_setup_rxq(struct net_device *netdev) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + struct gemini_ethernet *geth = port->geth; + struct nontoe_qhdr __iomem *qhdr; + + qhdr = geth->base + TOE_DEFAULT_Q_HDR_BASE(netdev->dev_id); + port->rxq_rwptr = &qhdr->word1; + + /* Remap a slew of memory to use for the RX queue */ + port->rxq_ring = dma_alloc_coherent(geth->dev, + sizeof(*port->rxq_ring) << port->rxq_order, + &port->rxq_dma_base, GFP_KERNEL); + if (!port->rxq_ring) + return -ENOMEM; + if (port->rxq_dma_base & ~NONTOE_QHDR0_BASE_MASK) { + dev_warn(geth->dev, "RX queue base it not aligned\n"); + return -ENOMEM; + } + + writel(port->rxq_dma_base | port->rxq_order, &qhdr->word0); + writel(0, port->rxq_rwptr); + return 0; +} + +static struct gmac_queue_page * +gmac_get_queue_page(struct gemini_ethernet *geth, + struct gemini_ethernet_port *port, + dma_addr_t addr) +{ + struct gmac_queue_page *gpage; + dma_addr_t mapping; + int i; + + /* Only look for even pages */ + mapping = addr & PAGE_MASK; + + if (!geth->freeq_pages) { + dev_err(geth->dev, "try to get page with no page list\n"); + return NULL; + } + + /* Look up a ring buffer page from virtual mapping */ + for (i = 0; i < geth->num_freeq_pages; i++) { + gpage = &geth->freeq_pages[i]; + if (gpage->mapping == mapping) + return gpage; + } + + return NULL; +} + +static void gmac_cleanup_rxq(struct net_device *netdev) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + struct gemini_ethernet *geth = port->geth; + struct gmac_rxdesc *rxd = port->rxq_ring; + static struct gmac_queue_page *gpage; + struct nontoe_qhdr __iomem *qhdr; + void __iomem *dma_reg; + void __iomem *ptr_reg; + dma_addr_t mapping; + union dma_rwptr rw; + unsigned int r, w; + + qhdr = geth->base + + TOE_DEFAULT_Q_HDR_BASE(netdev->dev_id); + dma_reg = &qhdr->word0; + ptr_reg = &qhdr->word1; + + rw.bits32 = readl(ptr_reg); + r = rw.bits.rptr; + w = rw.bits.wptr; + writew(r, ptr_reg + 2); + + writel(0, dma_reg); + + /* Loop from read pointer to write pointer of the RX queue + * and free up all pages by the queue. + */ + while (r != w) { + mapping = rxd[r].word2.buf_adr; + r++; + r &= ((1 << port->rxq_order) - 1); + + if (!mapping) + continue; + + /* Freeq pointers are one page off */ + gpage = gmac_get_queue_page(geth, port, mapping + PAGE_SIZE); + if (!gpage) { + dev_err(geth->dev, "could not find page\n"); + continue; + } + /* Release the RX queue reference to the page */ + put_page(gpage->page); + } + + dma_free_coherent(geth->dev, sizeof(*port->rxq_ring) << port->rxq_order, + port->rxq_ring, port->rxq_dma_base); +} + +static struct page *geth_freeq_alloc_map_page(struct gemini_ethernet *geth, + int pn) +{ + struct gmac_rxdesc *freeq_entry; + struct gmac_queue_page *gpage; + unsigned int fpp_order; + unsigned int frag_len; + dma_addr_t mapping; + struct page *page; + int i; + + /* First allocate and DMA map a single page */ + page = alloc_page(GFP_ATOMIC); + if (!page) + return NULL; + + mapping = dma_map_single(geth->dev, page_address(page), + PAGE_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(geth->dev, mapping)) { + put_page(page); + return NULL; + } + + /* The assign the page mapping (physical address) to the buffer address + * in the hardware queue. PAGE_SHIFT on ARM is 12 (1 page is 4096 bytes, + * 4k), and the default RX frag order is 11 (fragments are up 20 2048 + * bytes, 2k) so fpp_order (fragments per page order) is default 1. Thus + * each page normally needs two entries in the queue. + */ + frag_len = 1 << geth->freeq_frag_order; /* Usually 2048 */ + fpp_order = PAGE_SHIFT - geth->freeq_frag_order; + freeq_entry = geth->freeq_ring + (pn << fpp_order); + dev_dbg(geth->dev, "allocate page %d fragment length %d fragments per page %d, freeq entry %p\n", + pn, frag_len, (1 << fpp_order), freeq_entry); + for (i = (1 << fpp_order); i > 0; i--) { + freeq_entry->word2.buf_adr = mapping; + freeq_entry++; + mapping += frag_len; + } + + /* If the freeq entry already has a page mapped, then unmap it. */ + gpage = &geth->freeq_pages[pn]; + if (gpage->page) { + mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr; + dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE); + /* This should be the last reference to the page so it gets + * released + */ + put_page(gpage->page); + } + + /* Then put our new mapping into the page table */ + dev_dbg(geth->dev, "page %d, DMA addr: %08x, page %p\n", + pn, (unsigned int)mapping, page); + gpage->mapping = mapping; + gpage->page = page; + + return page; +} + +/** + * geth_fill_freeq() - Fill the freeq with empty fragments to use + * @geth: the ethernet adapter + * @refill: whether to reset the queue by filling in all freeq entries or + * just refill it, usually the interrupt to refill the queue happens when + * the queue is half empty. + */ +static unsigned int geth_fill_freeq(struct gemini_ethernet *geth, bool refill) +{ + unsigned int fpp_order = PAGE_SHIFT - geth->freeq_frag_order; + unsigned int count = 0; + unsigned int pn, epn; + unsigned long flags; + union dma_rwptr rw; + unsigned int m_pn; + + /* Mask for page */ + m_pn = (1 << (geth->freeq_order - fpp_order)) - 1; + + spin_lock_irqsave(&geth->freeq_lock, flags); + + rw.bits32 = readl(geth->base + GLOBAL_SWFQ_RWPTR_REG); + pn = (refill ? rw.bits.wptr : rw.bits.rptr) >> fpp_order; + epn = (rw.bits.rptr >> fpp_order) - 1; + epn &= m_pn; + + /* Loop over the freeq ring buffer entries */ + while (pn != epn) { + struct gmac_queue_page *gpage; + struct page *page; + + gpage = &geth->freeq_pages[pn]; + page = gpage->page; + + dev_dbg(geth->dev, "fill entry %d page ref count %d add %d refs\n", + pn, page_ref_count(page), 1 << fpp_order); + + if (page_ref_count(page) > 1) { + unsigned int fl = (pn - epn) & m_pn; + + if (fl > 64 >> fpp_order) + break; + + page = geth_freeq_alloc_map_page(geth, pn); + if (!page) + break; + } + + /* Add one reference per fragment in the page */ + page_ref_add(page, 1 << fpp_order); + count += 1 << fpp_order; + pn++; + pn &= m_pn; + } + + writew(pn << fpp_order, geth->base + GLOBAL_SWFQ_RWPTR_REG + 2); + + spin_unlock_irqrestore(&geth->freeq_lock, flags); + + return count; +} + +static int geth_setup_freeq(struct gemini_ethernet *geth) +{ + unsigned int fpp_order = PAGE_SHIFT - geth->freeq_frag_order; + unsigned int frag_len = 1 << geth->freeq_frag_order; + unsigned int len = 1 << geth->freeq_order; + unsigned int pages = len >> fpp_order; + union queue_threshold qt; + union dma_skb_size skbsz; + unsigned int filled; + unsigned int pn; + + geth->freeq_ring = dma_alloc_coherent(geth->dev, + sizeof(*geth->freeq_ring) << geth->freeq_order, + &geth->freeq_dma_base, GFP_KERNEL); + if (!geth->freeq_ring) + return -ENOMEM; + if (geth->freeq_dma_base & ~DMA_Q_BASE_MASK) { + dev_warn(geth->dev, "queue ring base it not aligned\n"); + goto err_freeq; + } + + /* Allocate a mapping to page look-up index */ + geth->freeq_pages = kzalloc(pages * sizeof(*geth->freeq_pages), + GFP_KERNEL); + if (!geth->freeq_pages) + goto err_freeq; + geth->num_freeq_pages = pages; + + dev_info(geth->dev, "allocate %d pages for queue\n", pages); + for (pn = 0; pn < pages; pn++) + if (!geth_freeq_alloc_map_page(geth, pn)) + goto err_freeq_alloc; + + filled = geth_fill_freeq(geth, false); + if (!filled) + goto err_freeq_alloc; + + qt.bits32 = readl(geth->base + GLOBAL_QUEUE_THRESHOLD_REG); + qt.bits.swfq_empty = 32; + writel(qt.bits32, geth->base + GLOBAL_QUEUE_THRESHOLD_REG); + + skbsz.bits.sw_skb_size = 1 << geth->freeq_frag_order; + writel(skbsz.bits32, geth->base + GLOBAL_DMA_SKB_SIZE_REG); + writel(geth->freeq_dma_base | geth->freeq_order, + geth->base + GLOBAL_SW_FREEQ_BASE_SIZE_REG); + + return 0; + +err_freeq_alloc: + while (pn > 0) { + struct gmac_queue_page *gpage; + dma_addr_t mapping; + + --pn; + mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr; + dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE); + gpage = &geth->freeq_pages[pn]; + put_page(gpage->page); + } + + kfree(geth->freeq_pages); +err_freeq: + dma_free_coherent(geth->dev, + sizeof(*geth->freeq_ring) << geth->freeq_order, + geth->freeq_ring, geth->freeq_dma_base); + geth->freeq_ring = NULL; + return -ENOMEM; +} + +/** + * geth_cleanup_freeq() - cleanup the DMA mappings and free the queue + * @geth: the Gemini global ethernet state + */ +static void geth_cleanup_freeq(struct gemini_ethernet *geth) +{ + unsigned int fpp_order = PAGE_SHIFT - geth->freeq_frag_order; + unsigned int frag_len = 1 << geth->freeq_frag_order; + unsigned int len = 1 << geth->freeq_order; + unsigned int pages = len >> fpp_order; + unsigned int pn; + + writew(readw(geth->base + GLOBAL_SWFQ_RWPTR_REG), + geth->base + GLOBAL_SWFQ_RWPTR_REG + 2); + writel(0, geth->base + GLOBAL_SW_FREEQ_BASE_SIZE_REG); + + for (pn = 0; pn < pages; pn++) { + struct gmac_queue_page *gpage; + dma_addr_t mapping; + + mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr; + dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE); + + gpage = &geth->freeq_pages[pn]; + while (page_ref_count(gpage->page) > 0) + put_page(gpage->page); + } + + kfree(geth->freeq_pages); + + dma_free_coherent(geth->dev, + sizeof(*geth->freeq_ring) << geth->freeq_order, + geth->freeq_ring, geth->freeq_dma_base); +} + +/** + * geth_resize_freeq() - resize the software queue depth + * @port: the port requesting the change + * + * This gets called at least once during probe() so the device queue gets + * "resized" from the hardware defaults. Since both ports/net devices share + * the same hardware queue, some synchronization between the ports is + * needed. + */ +static int geth_resize_freeq(struct gemini_ethernet_port *port) +{ + struct gemini_ethernet *geth = port->geth; + struct net_device *netdev = port->netdev; + struct gemini_ethernet_port *other_port; + struct net_device *other_netdev; + unsigned int new_size = 0; + unsigned int new_order; + unsigned long flags; + u32 en; + int ret; + + if (netdev->dev_id == 0) + other_netdev = geth->port1->netdev; + else + other_netdev = geth->port0->netdev; + + if (other_netdev && netif_running(other_netdev)) + return -EBUSY; + + new_size = 1 << (port->rxq_order + 1); + netdev_dbg(netdev, "port %d size: %d order %d\n", + netdev->dev_id, + new_size, + port->rxq_order); + if (other_netdev) { + other_port = netdev_priv(other_netdev); + new_size += 1 << (other_port->rxq_order + 1); + netdev_dbg(other_netdev, "port %d size: %d order %d\n", + other_netdev->dev_id, + (1 << (other_port->rxq_order + 1)), + other_port->rxq_order); + } + + new_order = min(15, ilog2(new_size - 1) + 1); + dev_dbg(geth->dev, "set shared queue to size %d order %d\n", + new_size, new_order); + if (geth->freeq_order == new_order) + return 0; + + spin_lock_irqsave(&geth->irq_lock, flags); + + /* Disable the software queue IRQs */ + en = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); + en &= ~SWFQ_EMPTY_INT_BIT; + writel(en, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); + spin_unlock_irqrestore(&geth->irq_lock, flags); + + /* Drop the old queue */ + if (geth->freeq_ring) + geth_cleanup_freeq(geth); + + /* Allocate a new queue with the desired order */ + geth->freeq_order = new_order; + ret = geth_setup_freeq(geth); + + /* Restart the interrupts - NOTE if this is the first resize + * after probe(), this is where the interrupts get turned on + * in the first place. + */ + spin_lock_irqsave(&geth->irq_lock, flags); + en |= SWFQ_EMPTY_INT_BIT; + writel(en, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); + spin_unlock_irqrestore(&geth->irq_lock, flags); + + return ret; +} + +static void gmac_tx_irq_enable(struct net_device *netdev, + unsigned int txq, int en) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + struct gemini_ethernet *geth = port->geth; + u32 val, mask; + + netdev_dbg(netdev, "%s device %d\n", __func__, netdev->dev_id); + + mask = GMAC0_IRQ0_TXQ0_INTS << (6 * netdev->dev_id + txq); + + if (en) + writel(mask, geth->base + GLOBAL_INTERRUPT_STATUS_0_REG); + + val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG); + val = en ? val | mask : val & ~mask; + writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG); +} + +static void gmac_tx_irq(struct net_device *netdev, unsigned int txq_num) +{ + struct netdev_queue *ntxq = netdev_get_tx_queue(netdev, txq_num); + + gmac_tx_irq_enable(netdev, txq_num, 0); + netif_tx_wake_queue(ntxq); +} + +static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb, + struct gmac_txq *txq, unsigned short *desc) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + struct skb_shared_info *skb_si = skb_shinfo(skb); + unsigned short m = (1 << port->txq_order) - 1; + short frag, last_frag = skb_si->nr_frags - 1; + struct gemini_ethernet *geth = port->geth; + unsigned int word1, word3, buflen; + unsigned short w = *desc; + struct gmac_txdesc *txd; + skb_frag_t *skb_frag; + dma_addr_t mapping; + unsigned short mtu; + void *buffer; + + mtu = ETH_HLEN; + mtu += netdev->mtu; + if (skb->protocol == htons(ETH_P_8021Q)) + mtu += VLAN_HLEN; + + word1 = skb->len; + word3 = SOF_BIT; + + if (word1 > mtu) { + word1 |= TSS_MTU_ENABLE_BIT; + word3 |= mtu; + } + + if (skb->ip_summed != CHECKSUM_NONE) { + int tcp = 0; + + if (skb->protocol == htons(ETH_P_IP)) { + word1 |= TSS_IP_CHKSUM_BIT; + tcp = ip_hdr(skb)->protocol == IPPROTO_TCP; + } else { /* IPv6 */ + word1 |= TSS_IPV6_ENABLE_BIT; + tcp = ipv6_hdr(skb)->nexthdr == IPPROTO_TCP; + } + + word1 |= tcp ? TSS_TCP_CHKSUM_BIT : TSS_UDP_CHKSUM_BIT; + } + + frag = -1; + while (frag <= last_frag) { + if (frag == -1) { + buffer = skb->data; + buflen = skb_headlen(skb); + } else { + skb_frag = skb_si->frags + frag; + buffer = page_address(skb_frag_page(skb_frag)) + + skb_frag->page_offset; + buflen = skb_frag->size; + } + + if (frag == last_frag) { + word3 |= EOF_BIT; + txq->skb[w] = skb; + } + + mapping = dma_map_single(geth->dev, buffer, buflen, + DMA_TO_DEVICE); + if (dma_mapping_error(geth->dev, mapping)) + goto map_error; + + txd = txq->ring + w; + txd->word0.bits32 = buflen; + txd->word1.bits32 = word1; + txd->word2.buf_adr = mapping; + txd->word3.bits32 = word3; + + word3 &= MTU_SIZE_BIT_MASK; + w++; + w &= m; + frag++; + } + + *desc = w; + return 0; + +map_error: + while (w != *desc) { + w--; + w &= m; + + dma_unmap_page(geth->dev, txq->ring[w].word2.buf_adr, + txq->ring[w].word0.bits.buffer_size, + DMA_TO_DEVICE); + } + return -ENOMEM; +} + +static int gmac_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + unsigned short m = (1 << port->txq_order) - 1; + struct netdev_queue *ntxq; + unsigned short r, w, d; + void __iomem *ptr_reg; + struct gmac_txq *txq; + int txq_num, nfrags; + union dma_rwptr rw; + + SKB_FRAG_ASSERT(skb); + + if (skb->len >= 0x10000) + goto out_drop_free; + + txq_num = skb_get_queue_mapping(skb); + ptr_reg = port->dma_base + GMAC_SW_TX_QUEUE_PTR_REG(txq_num); + txq = &port->txq[txq_num]; + ntxq = netdev_get_tx_queue(netdev, txq_num); + nfrags = skb_shinfo(skb)->nr_frags; + + rw.bits32 = readl(ptr_reg); + r = rw.bits.rptr; + w = rw.bits.wptr; + + d = txq->cptr - w - 1; + d &= m; + + if (d < nfrags + 2) { + gmac_clean_txq(netdev, txq, r); + d = txq->cptr - w - 1; + d &= m; + + if (d < nfrags + 2) { + netif_tx_stop_queue(ntxq); + + d = txq->cptr + nfrags + 16; + d &= m; + txq->ring[d].word3.bits.eofie = 1; + gmac_tx_irq_enable(netdev, txq_num, 1); + + u64_stats_update_begin(&port->tx_stats_syncp); + netdev->stats.tx_fifo_errors++; + u64_stats_update_end(&port->tx_stats_syncp); + return NETDEV_TX_BUSY; + } + } + + if (gmac_map_tx_bufs(netdev, skb, txq, &w)) { + if (skb_linearize(skb)) + goto out_drop; + + u64_stats_update_begin(&port->tx_stats_syncp); + port->tx_frags_linearized++; + u64_stats_update_end(&port->tx_stats_syncp); + + if (gmac_map_tx_bufs(netdev, skb, txq, &w)) + goto out_drop_free; + } + + writew(w, ptr_reg + 2); + + gmac_clean_txq(netdev, txq, r); + return NETDEV_TX_OK; + +out_drop_free: + dev_kfree_skb(skb); +out_drop: + u64_stats_update_begin(&port->tx_stats_syncp); + port->stats.tx_dropped++; + u64_stats_update_end(&port->tx_stats_syncp); + return NETDEV_TX_OK; +} + +static void gmac_tx_timeout(struct net_device *netdev) +{ + netdev_err(netdev, "Tx timeout\n"); + gmac_dump_dma_state(netdev); +} + +static void gmac_enable_irq(struct net_device *netdev, int enable) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + struct gemini_ethernet *geth = port->geth; + unsigned long flags; + u32 val, mask; + + netdev_info(netdev, "%s device %d %s\n", __func__, + netdev->dev_id, enable ? "enable" : "disable"); + spin_lock_irqsave(&geth->irq_lock, flags); + + mask = GMAC0_IRQ0_2 << (netdev->dev_id * 2); + val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG); + val = enable ? (val | mask) : (val & ~mask); + writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG); + + mask = DEFAULT_Q0_INT_BIT << netdev->dev_id; + val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG); + val = enable ? (val | mask) : (val & ~mask); + writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG); + + mask = GMAC0_IRQ4_8 << (netdev->dev_id * 8); + val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); + val = enable ? (val | mask) : (val & ~mask); + writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); + + spin_unlock_irqrestore(&geth->irq_lock, flags); +} + +static void gmac_enable_rx_irq(struct net_device *netdev, int enable) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + struct gemini_ethernet *geth = port->geth; + unsigned long flags; + u32 val, mask; + + netdev_dbg(netdev, "%s device %d %s\n", __func__, netdev->dev_id, + enable ? "enable" : "disable"); + spin_lock_irqsave(&geth->irq_lock, flags); + mask = DEFAULT_Q0_INT_BIT << netdev->dev_id; + + val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG); + val = enable ? (val | mask) : (val & ~mask); + writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG); + + spin_unlock_irqrestore(&geth->irq_lock, flags); +} + +static struct sk_buff *gmac_skb_if_good_frame(struct gemini_ethernet_port *port, + union gmac_rxdesc_0 word0, + unsigned int frame_len) +{ + unsigned int rx_csum = word0.bits.chksum_status; + unsigned int rx_status = word0.bits.status; + struct sk_buff *skb = NULL; + + port->rx_stats[rx_status]++; + port->rx_csum_stats[rx_csum]++; + + if (word0.bits.derr || word0.bits.perr || + rx_status || frame_len < ETH_ZLEN || + rx_csum >= RX_CHKSUM_IP_ERR_UNKNOWN) { + port->stats.rx_errors++; + + if (frame_len < ETH_ZLEN || RX_ERROR_LENGTH(rx_status)) + port->stats.rx_length_errors++; + if (RX_ERROR_OVER(rx_status)) + port->stats.rx_over_errors++; + if (RX_ERROR_CRC(rx_status)) + port->stats.rx_crc_errors++; + if (RX_ERROR_FRAME(rx_status)) + port->stats.rx_frame_errors++; + return NULL; + } + + skb = napi_get_frags(&port->napi); + if (!skb) + goto update_exit; + + if (rx_csum == RX_CHKSUM_IP_UDP_TCP_OK) + skb->ip_summed = CHECKSUM_UNNECESSARY; + +update_exit: + port->stats.rx_bytes += frame_len; + port->stats.rx_packets++; + return skb; +} + +static unsigned int gmac_rx(struct net_device *netdev, unsigned int budget) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + unsigned short m = (1 << port->rxq_order) - 1; + struct gemini_ethernet *geth = port->geth; + void __iomem *ptr_reg = port->rxq_rwptr; + unsigned int frame_len, frag_len; + struct gmac_rxdesc *rx = NULL; + struct gmac_queue_page *gpage; + static struct sk_buff *skb; + union gmac_rxdesc_0 word0; + union gmac_rxdesc_1 word1; + union gmac_rxdesc_3 word3; + struct page *page = NULL; + unsigned int page_offs; + unsigned short r, w; + union dma_rwptr rw; + dma_addr_t mapping; + int frag_nr = 0; + + rw.bits32 = readl(ptr_reg); + /* Reset interrupt as all packages until here are taken into account */ + writel(DEFAULT_Q0_INT_BIT << netdev->dev_id, + geth->base + GLOBAL_INTERRUPT_STATUS_1_REG); + r = rw.bits.rptr; + w = rw.bits.wptr; + + while (budget && w != r) { + rx = port->rxq_ring + r; + word0 = rx->word0; + word1 = rx->word1; + mapping = rx->word2.buf_adr; + word3 = rx->word3; + + r++; + r &= m; + + frag_len = word0.bits.buffer_size; + frame_len = word1.bits.byte_count; + page_offs = mapping & ~PAGE_MASK; + + if (!mapping) { + netdev_err(netdev, + "rxq[%u]: HW BUG: zero DMA desc\n", r); + goto err_drop; + } + + /* Freeq pointers are one page off */ + gpage = gmac_get_queue_page(geth, port, mapping + PAGE_SIZE); + if (!gpage) { + dev_err(geth->dev, "could not find mapping\n"); + continue; + } + page = gpage->page; + + if (word3.bits32 & SOF_BIT) { + if (skb) { + napi_free_frags(&port->napi); + port->stats.rx_dropped++; + } + + skb = gmac_skb_if_good_frame(port, word0, frame_len); + if (!skb) + goto err_drop; + + page_offs += NET_IP_ALIGN; + frag_len -= NET_IP_ALIGN; + frag_nr = 0; + + } else if (!skb) { + put_page(page); + continue; + } + + if (word3.bits32 & EOF_BIT) + frag_len = frame_len - skb->len; + + /* append page frag to skb */ + if (frag_nr == MAX_SKB_FRAGS) + goto err_drop; + + if (frag_len == 0) + netdev_err(netdev, "Received fragment with len = 0\n"); + + skb_fill_page_desc(skb, frag_nr, page, page_offs, frag_len); + skb->len += frag_len; + skb->data_len += frag_len; + skb->truesize += frag_len; + frag_nr++; + + if (word3.bits32 & EOF_BIT) { + napi_gro_frags(&port->napi); + skb = NULL; + --budget; + } + continue; + +err_drop: + if (skb) { + napi_free_frags(&port->napi); + skb = NULL; + } + + if (mapping) + put_page(page); + + port->stats.rx_dropped++; + } + + writew(r, ptr_reg); + return budget; +} + +static int gmac_napi_poll(struct napi_struct *napi, int budget) +{ + struct gemini_ethernet_port *port = netdev_priv(napi->dev); + struct gemini_ethernet *geth = port->geth; + unsigned int freeq_threshold; + unsigned int received; + + freeq_threshold = 1 << (geth->freeq_order - 1); + u64_stats_update_begin(&port->rx_stats_syncp); + + received = gmac_rx(napi->dev, budget); + if (received < budget) { + napi_gro_flush(napi, false); + napi_complete_done(napi, received); + gmac_enable_rx_irq(napi->dev, 1); + ++port->rx_napi_exits; + } + + port->freeq_refill += (budget - received); + if (port->freeq_refill > freeq_threshold) { + port->freeq_refill -= freeq_threshold; + geth_fill_freeq(geth, true); + } + + u64_stats_update_end(&port->rx_stats_syncp); + return received; +} + +static void gmac_dump_dma_state(struct net_device *netdev) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + struct gemini_ethernet *geth = port->geth; + void __iomem *ptr_reg; + u32 reg[5]; + + /* Interrupt status */ + reg[0] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_0_REG); + reg[1] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_1_REG); + reg[2] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_2_REG); + reg[3] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_3_REG); + reg[4] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_4_REG); + netdev_err(netdev, "IRQ status: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", + reg[0], reg[1], reg[2], reg[3], reg[4]); + + /* Interrupt enable */ + reg[0] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG); + reg[1] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG); + reg[2] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_2_REG); + reg[3] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_3_REG); + reg[4] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); + netdev_err(netdev, "IRQ enable: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", + reg[0], reg[1], reg[2], reg[3], reg[4]); + + /* RX DMA status */ + reg[0] = readl(port->dma_base + GMAC_DMA_RX_FIRST_DESC_REG); + reg[1] = readl(port->dma_base + GMAC_DMA_RX_CURR_DESC_REG); + reg[2] = GET_RPTR(port->rxq_rwptr); + reg[3] = GET_WPTR(port->rxq_rwptr); + netdev_err(netdev, "RX DMA regs: 0x%08x 0x%08x, ptr: %u %u\n", + reg[0], reg[1], reg[2], reg[3]); + + reg[0] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD0_REG); + reg[1] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD1_REG); + reg[2] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD2_REG); + reg[3] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD3_REG); + netdev_err(netdev, "RX DMA descriptor: 0x%08x 0x%08x 0x%08x 0x%08x\n", + reg[0], reg[1], reg[2], reg[3]); + + /* TX DMA status */ + ptr_reg = port->dma_base + GMAC_SW_TX_QUEUE0_PTR_REG; + + reg[0] = readl(port->dma_base + GMAC_DMA_TX_FIRST_DESC_REG); + reg[1] = readl(port->dma_base + GMAC_DMA_TX_CURR_DESC_REG); + reg[2] = GET_RPTR(ptr_reg); + reg[3] = GET_WPTR(ptr_reg); + netdev_err(netdev, "TX DMA regs: 0x%08x 0x%08x, ptr: %u %u\n", + reg[0], reg[1], reg[2], reg[3]); + + reg[0] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD0_REG); + reg[1] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD1_REG); + reg[2] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD2_REG); + reg[3] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD3_REG); + netdev_err(netdev, "TX DMA descriptor: 0x%08x 0x%08x 0x%08x 0x%08x\n", + reg[0], reg[1], reg[2], reg[3]); + + /* FREE queues status */ + ptr_reg = geth->base + GLOBAL_SWFQ_RWPTR_REG; + + reg[0] = GET_RPTR(ptr_reg); + reg[1] = GET_WPTR(ptr_reg); + + ptr_reg = geth->base + GLOBAL_HWFQ_RWPTR_REG; + + reg[2] = GET_RPTR(ptr_reg); + reg[3] = GET_WPTR(ptr_reg); + netdev_err(netdev, "FQ SW ptr: %u %u, HW ptr: %u %u\n", + reg[0], reg[1], reg[2], reg[3]); +} + +static void gmac_update_hw_stats(struct net_device *netdev) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + unsigned int rx_discards, rx_mcast, rx_bcast; + struct gemini_ethernet *geth = port->geth; + unsigned long flags; + + spin_lock_irqsave(&geth->irq_lock, flags); + u64_stats_update_begin(&port->ir_stats_syncp); + + rx_discards = readl(port->gmac_base + GMAC_IN_DISCARDS); + port->hw_stats[0] += rx_discards; + port->hw_stats[1] += readl(port->gmac_base + GMAC_IN_ERRORS); + rx_mcast = readl(port->gmac_base + GMAC_IN_MCAST); + port->hw_stats[2] += rx_mcast; + rx_bcast = readl(port->gmac_base + GMAC_IN_BCAST); + port->hw_stats[3] += rx_bcast; + port->hw_stats[4] += readl(port->gmac_base + GMAC_IN_MAC1); + port->hw_stats[5] += readl(port->gmac_base + GMAC_IN_MAC2); + + port->stats.rx_missed_errors += rx_discards; + port->stats.multicast += rx_mcast; + port->stats.multicast += rx_bcast; + + writel(GMAC0_MIB_INT_BIT << (netdev->dev_id * 8), + geth->base + GLOBAL_INTERRUPT_STATUS_4_REG); + + u64_stats_update_end(&port->ir_stats_syncp); + spin_unlock_irqrestore(&geth->irq_lock, flags); +} + +/** + * gmac_get_intr_flags() - get interrupt status flags for a port from + * @netdev: the net device for the port to get flags from + * @i: the interrupt status register 0..4 + */ +static u32 gmac_get_intr_flags(struct net_device *netdev, int i) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + struct gemini_ethernet *geth = port->geth; + void __iomem *irqif_reg, *irqen_reg; + unsigned int offs, val; + + /* Calculate the offset using the stride of the status registers */ + offs = i * (GLOBAL_INTERRUPT_STATUS_1_REG - + GLOBAL_INTERRUPT_STATUS_0_REG); + + irqif_reg = geth->base + GLOBAL_INTERRUPT_STATUS_0_REG + offs; + irqen_reg = geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG + offs; + + val = readl(irqif_reg) & readl(irqen_reg); + return val; +} + +static enum hrtimer_restart gmac_coalesce_delay_expired(struct hrtimer *timer) +{ + struct gemini_ethernet_port *port = + container_of(timer, struct gemini_ethernet_port, + rx_coalesce_timer); + + napi_schedule(&port->napi); + return HRTIMER_NORESTART; +} + +static irqreturn_t gmac_irq(int irq, void *data) +{ + struct gemini_ethernet_port *port; + struct net_device *netdev = data; + struct gemini_ethernet *geth; + u32 val, orr = 0; + + port = netdev_priv(netdev); + geth = port->geth; + + val = gmac_get_intr_flags(netdev, 0); + orr |= val; + + if (val & (GMAC0_IRQ0_2 << (netdev->dev_id * 2))) { + /* Oh, crap */ + netdev_err(netdev, "hw failure/sw bug\n"); + gmac_dump_dma_state(netdev); + + /* don't know how to recover, just reduce losses */ + gmac_enable_irq(netdev, 0); + return IRQ_HANDLED; + } + + if (val & (GMAC0_IRQ0_TXQ0_INTS << (netdev->dev_id * 6))) + gmac_tx_irq(netdev, 0); + + val = gmac_get_intr_flags(netdev, 1); + orr |= val; + + if (val & (DEFAULT_Q0_INT_BIT << netdev->dev_id)) { + gmac_enable_rx_irq(netdev, 0); + + if (!port->rx_coalesce_nsecs) { + napi_schedule(&port->napi); + } else { + ktime_t ktime; + + ktime = ktime_set(0, port->rx_coalesce_nsecs); + hrtimer_start(&port->rx_coalesce_timer, ktime, + HRTIMER_MODE_REL); + } + } + + val = gmac_get_intr_flags(netdev, 4); + orr |= val; + + if (val & (GMAC0_MIB_INT_BIT << (netdev->dev_id * 8))) + gmac_update_hw_stats(netdev); + + if (val & (GMAC0_RX_OVERRUN_INT_BIT << (netdev->dev_id * 8))) { + writel(GMAC0_RXDERR_INT_BIT << (netdev->dev_id * 8), + geth->base + GLOBAL_INTERRUPT_STATUS_4_REG); + + spin_lock(&geth->irq_lock); + u64_stats_update_begin(&port->ir_stats_syncp); + ++port->stats.rx_fifo_errors; + u64_stats_update_end(&port->ir_stats_syncp); + spin_unlock(&geth->irq_lock); + } + + return orr ? IRQ_HANDLED : IRQ_NONE; +} + +static void gmac_start_dma(struct gemini_ethernet_port *port) +{ + void __iomem *dma_ctrl_reg = port->dma_base + GMAC_DMA_CTRL_REG; + union gmac_dma_ctrl dma_ctrl; + + dma_ctrl.bits32 = readl(dma_ctrl_reg); + dma_ctrl.bits.rd_enable = 1; + dma_ctrl.bits.td_enable = 1; + dma_ctrl.bits.loopback = 0; + dma_ctrl.bits.drop_small_ack = 0; + dma_ctrl.bits.rd_insert_bytes = NET_IP_ALIGN; + dma_ctrl.bits.rd_prot = HPROT_DATA_CACHE | HPROT_PRIVILIGED; + dma_ctrl.bits.rd_burst_size = HBURST_INCR8; + dma_ctrl.bits.rd_bus = HSIZE_8; + dma_ctrl.bits.td_prot = HPROT_DATA_CACHE; + dma_ctrl.bits.td_burst_size = HBURST_INCR8; + dma_ctrl.bits.td_bus = HSIZE_8; + + writel(dma_ctrl.bits32, dma_ctrl_reg); +} + +static void gmac_stop_dma(struct gemini_ethernet_port *port) +{ + void __iomem *dma_ctrl_reg = port->dma_base + GMAC_DMA_CTRL_REG; + union gmac_dma_ctrl dma_ctrl; + + dma_ctrl.bits32 = readl(dma_ctrl_reg); + dma_ctrl.bits.rd_enable = 0; + dma_ctrl.bits.td_enable = 0; + writel(dma_ctrl.bits32, dma_ctrl_reg); +} + +static int gmac_open(struct net_device *netdev) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + int err; + + if (!netdev->phydev) { + err = gmac_setup_phy(netdev); + if (err) { + netif_err(port, ifup, netdev, + "PHY init failed: %d\n", err); + return err; + } + } + + err = request_irq(netdev->irq, gmac_irq, + IRQF_SHARED, netdev->name, netdev); + if (err) { + netdev_err(netdev, "no IRQ\n"); + return err; + } + + netif_carrier_off(netdev); + phy_start(netdev->phydev); + + err = geth_resize_freeq(port); + if (err) { + netdev_err(netdev, "could not resize freeq\n"); + goto err_stop_phy; + } + + err = gmac_setup_rxq(netdev); + if (err) { + netdev_err(netdev, "could not setup RXQ\n"); + goto err_stop_phy; + } + + err = gmac_setup_txqs(netdev); + if (err) { + netdev_err(netdev, "could not setup TXQs\n"); + gmac_cleanup_rxq(netdev); + goto err_stop_phy; + } + + napi_enable(&port->napi); + + gmac_start_dma(port); + gmac_enable_irq(netdev, 1); + gmac_enable_tx_rx(netdev); + netif_tx_start_all_queues(netdev); + + hrtimer_init(&port->rx_coalesce_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + port->rx_coalesce_timer.function = &gmac_coalesce_delay_expired; + + netdev_info(netdev, "opened\n"); + + return 0; + +err_stop_phy: + phy_stop(netdev->phydev); + free_irq(netdev->irq, netdev); + return err; +} + +static int gmac_stop(struct net_device *netdev) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + + hrtimer_cancel(&port->rx_coalesce_timer); + netif_tx_stop_all_queues(netdev); + gmac_disable_tx_rx(netdev); + gmac_stop_dma(port); + napi_disable(&port->napi); + + gmac_enable_irq(netdev, 0); + gmac_cleanup_rxq(netdev); + gmac_cleanup_txqs(netdev); + + phy_stop(netdev->phydev); + free_irq(netdev->irq, netdev); + + gmac_update_hw_stats(netdev); + return 0; +} + +static void gmac_set_rx_mode(struct net_device *netdev) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + union gmac_rx_fltr filter = { .bits = { + .broadcast = 1, + .multicast = 1, + .unicast = 1, + } }; + struct netdev_hw_addr *ha; + unsigned int bit_nr; + u32 mc_filter[2]; + + mc_filter[1] = 0; + mc_filter[0] = 0; + + if (netdev->flags & IFF_PROMISC) { + filter.bits.error = 1; + filter.bits.promiscuous = 1; + mc_filter[1] = ~0; + mc_filter[0] = ~0; + } else if (netdev->flags & IFF_ALLMULTI) { + mc_filter[1] = ~0; + mc_filter[0] = ~0; + } else { + netdev_for_each_mc_addr(ha, netdev) { + bit_nr = ~crc32_le(~0, ha->addr, ETH_ALEN) & 0x3f; + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 0x1f); + } + } + + writel(mc_filter[0], port->gmac_base + GMAC_MCAST_FIL0); + writel(mc_filter[1], port->gmac_base + GMAC_MCAST_FIL1); + writel(filter.bits32, port->gmac_base + GMAC_RX_FLTR); +} + +static void gmac_write_mac_address(struct net_device *netdev) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + __le32 addr[3]; + + memset(addr, 0, sizeof(addr)); + memcpy(addr, netdev->dev_addr, ETH_ALEN); + + writel(le32_to_cpu(addr[0]), port->gmac_base + GMAC_STA_ADD0); + writel(le32_to_cpu(addr[1]), port->gmac_base + GMAC_STA_ADD1); + writel(le32_to_cpu(addr[2]), port->gmac_base + GMAC_STA_ADD2); +} + +static int gmac_set_mac_address(struct net_device *netdev, void *addr) +{ + struct sockaddr *sa = addr; + + memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN); + gmac_write_mac_address(netdev); + + return 0; +} + +static void gmac_clear_hw_stats(struct net_device *netdev) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + + readl(port->gmac_base + GMAC_IN_DISCARDS); + readl(port->gmac_base + GMAC_IN_ERRORS); + readl(port->gmac_base + GMAC_IN_MCAST); + readl(port->gmac_base + GMAC_IN_BCAST); + readl(port->gmac_base + GMAC_IN_MAC1); + readl(port->gmac_base + GMAC_IN_MAC2); +} + +static void gmac_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + unsigned int start; + + gmac_update_hw_stats(netdev); + + /* Racing with RX NAPI */ + do { + start = u64_stats_fetch_begin(&port->rx_stats_syncp); + + stats->rx_packets = port->stats.rx_packets; + stats->rx_bytes = port->stats.rx_bytes; + stats->rx_errors = port->stats.rx_errors; + stats->rx_dropped = port->stats.rx_dropped; + + stats->rx_length_errors = port->stats.rx_length_errors; + stats->rx_over_errors = port->stats.rx_over_errors; + stats->rx_crc_errors = port->stats.rx_crc_errors; + stats->rx_frame_errors = port->stats.rx_frame_errors; + + } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start)); + + /* Racing with MIB and TX completion interrupts */ + do { + start = u64_stats_fetch_begin(&port->ir_stats_syncp); + + stats->tx_errors = port->stats.tx_errors; + stats->tx_packets = port->stats.tx_packets; + stats->tx_bytes = port->stats.tx_bytes; + + stats->multicast = port->stats.multicast; + stats->rx_missed_errors = port->stats.rx_missed_errors; + stats->rx_fifo_errors = port->stats.rx_fifo_errors; + + } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start)); + + /* Racing with hard_start_xmit */ + do { + start = u64_stats_fetch_begin(&port->tx_stats_syncp); + + stats->tx_dropped = port->stats.tx_dropped; + + } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start)); + + stats->rx_dropped += stats->rx_missed_errors; +} + +static int gmac_change_mtu(struct net_device *netdev, int new_mtu) +{ + int max_len = gmac_pick_rx_max_len(new_mtu); + + if (max_len < 0) + return -EINVAL; + + gmac_disable_tx_rx(netdev); + + netdev->mtu = new_mtu; + gmac_update_config0_reg(netdev, max_len << CONFIG0_MAXLEN_SHIFT, + CONFIG0_MAXLEN_MASK); + + netdev_update_features(netdev); + + gmac_enable_tx_rx(netdev); + + return 0; +} + +static netdev_features_t gmac_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + if (netdev->mtu + ETH_HLEN + VLAN_HLEN > MTU_SIZE_BIT_MASK) + features &= ~GMAC_OFFLOAD_FEATURES; + + return features; +} + +static int gmac_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + int enable = features & NETIF_F_RXCSUM; + unsigned long flags; + u32 reg; + + spin_lock_irqsave(&port->config_lock, flags); + + reg = readl(port->gmac_base + GMAC_CONFIG0); + reg = enable ? reg | CONFIG0_RX_CHKSUM : reg & ~CONFIG0_RX_CHKSUM; + writel(reg, port->gmac_base + GMAC_CONFIG0); + + spin_unlock_irqrestore(&port->config_lock, flags); + return 0; +} + +static int gmac_get_sset_count(struct net_device *netdev, int sset) +{ + return sset == ETH_SS_STATS ? GMAC_STATS_NUM : 0; +} + +static void gmac_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + if (stringset != ETH_SS_STATS) + return; + + memcpy(data, gmac_stats_strings, sizeof(gmac_stats_strings)); +} + +static void gmac_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *estats, u64 *values) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + unsigned int start; + u64 *p; + int i; + + gmac_update_hw_stats(netdev); + + /* Racing with MIB interrupt */ + do { + p = values; + start = u64_stats_fetch_begin(&port->ir_stats_syncp); + + for (i = 0; i < RX_STATS_NUM; i++) + *p++ = port->hw_stats[i]; + + } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start)); + values = p; + + /* Racing with RX NAPI */ + do { + p = values; + start = u64_stats_fetch_begin(&port->rx_stats_syncp); + + for (i = 0; i < RX_STATUS_NUM; i++) + *p++ = port->rx_stats[i]; + for (i = 0; i < RX_CHKSUM_NUM; i++) + *p++ = port->rx_csum_stats[i]; + *p++ = port->rx_napi_exits; + + } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start)); + values = p; + + /* Racing with TX start_xmit */ + do { + p = values; + start = u64_stats_fetch_begin(&port->tx_stats_syncp); + + for (i = 0; i < TX_MAX_FRAGS; i++) { + *values++ = port->tx_frag_stats[i]; + port->tx_frag_stats[i] = 0; + } + *values++ = port->tx_frags_linearized; + *values++ = port->tx_hw_csummed; + + } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start)); +} + +static int gmac_get_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + if (!netdev->phydev) + return -ENXIO; + phy_ethtool_ksettings_get(netdev->phydev, cmd); + + return 0; +} + +static int gmac_set_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + if (!netdev->phydev) + return -ENXIO; + return phy_ethtool_ksettings_set(netdev->phydev, cmd); +} + +static int gmac_nway_reset(struct net_device *netdev) +{ + if (!netdev->phydev) + return -ENXIO; + return phy_start_aneg(netdev->phydev); +} + +static void gmac_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pparam) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + union gmac_config0 config0; + + config0.bits32 = readl(port->gmac_base + GMAC_CONFIG0); + + pparam->rx_pause = config0.bits.rx_fc_en; + pparam->tx_pause = config0.bits.tx_fc_en; + pparam->autoneg = true; +} + +static void gmac_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *rp) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + union gmac_config0 config0; + + config0.bits32 = readl(port->gmac_base + GMAC_CONFIG0); + + rp->rx_max_pending = 1 << 15; + rp->rx_mini_max_pending = 0; + rp->rx_jumbo_max_pending = 0; + rp->tx_max_pending = 1 << 15; + + rp->rx_pending = 1 << port->rxq_order; + rp->rx_mini_pending = 0; + rp->rx_jumbo_pending = 0; + rp->tx_pending = 1 << port->txq_order; +} + +static int gmac_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *rp) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + int err = 0; + + if (netif_running(netdev)) + return -EBUSY; + + if (rp->rx_pending) { + port->rxq_order = min(15, ilog2(rp->rx_pending - 1) + 1); + err = geth_resize_freeq(port); + } + if (rp->tx_pending) { + port->txq_order = min(15, ilog2(rp->tx_pending - 1) + 1); + port->irq_every_tx_packets = 1 << (port->txq_order - 2); + } + + return err; +} + +static int gmac_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ecmd) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + + ecmd->rx_max_coalesced_frames = 1; + ecmd->tx_max_coalesced_frames = port->irq_every_tx_packets; + ecmd->rx_coalesce_usecs = port->rx_coalesce_nsecs / 1000; + + return 0; +} + +static int gmac_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ecmd) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + + if (ecmd->tx_max_coalesced_frames < 1) + return -EINVAL; + if (ecmd->tx_max_coalesced_frames >= 1 << port->txq_order) + return -EINVAL; + + port->irq_every_tx_packets = ecmd->tx_max_coalesced_frames; + port->rx_coalesce_nsecs = ecmd->rx_coalesce_usecs * 1000; + + return 0; +} + +static u32 gmac_get_msglevel(struct net_device *netdev) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + + return port->msg_enable; +} + +static void gmac_set_msglevel(struct net_device *netdev, u32 level) +{ + struct gemini_ethernet_port *port = netdev_priv(netdev); + + port->msg_enable = level; +} + +static void gmac_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->bus_info, netdev->dev_id ? "1" : "0"); +} + +static const struct net_device_ops gmac_351x_ops = { + .ndo_init = gmac_init, + .ndo_uninit = gmac_uninit, + .ndo_open = gmac_open, + .ndo_stop = gmac_stop, + .ndo_start_xmit = gmac_start_xmit, + .ndo_tx_timeout = gmac_tx_timeout, + .ndo_set_rx_mode = gmac_set_rx_mode, + .ndo_set_mac_address = gmac_set_mac_address, + .ndo_get_stats64 = gmac_get_stats64, + .ndo_change_mtu = gmac_change_mtu, + .ndo_fix_features = gmac_fix_features, + .ndo_set_features = gmac_set_features, +}; + +static const struct ethtool_ops gmac_351x_ethtool_ops = { + .get_sset_count = gmac_get_sset_count, + .get_strings = gmac_get_strings, + .get_ethtool_stats = gmac_get_ethtool_stats, + .get_link = ethtool_op_get_link, + .get_link_ksettings = gmac_get_ksettings, + .set_link_ksettings = gmac_set_ksettings, + .nway_reset = gmac_nway_reset, + .get_pauseparam = gmac_get_pauseparam, + .get_ringparam = gmac_get_ringparam, + .set_ringparam = gmac_set_ringparam, + .get_coalesce = gmac_get_coalesce, + .set_coalesce = gmac_set_coalesce, + .get_msglevel = gmac_get_msglevel, + .set_msglevel = gmac_set_msglevel, + .get_drvinfo = gmac_get_drvinfo, +}; + +static irqreturn_t gemini_port_irq_thread(int irq, void *data) +{ + unsigned long irqmask = SWFQ_EMPTY_INT_BIT; + struct gemini_ethernet_port *port = data; + struct gemini_ethernet *geth; + unsigned long flags; + + geth = port->geth; + /* The queue is half empty so refill it */ + geth_fill_freeq(geth, true); + + spin_lock_irqsave(&geth->irq_lock, flags); + /* ACK queue interrupt */ + writel(irqmask, geth->base + GLOBAL_INTERRUPT_STATUS_4_REG); + /* Enable queue interrupt again */ + irqmask |= readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); + writel(irqmask, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); + spin_unlock_irqrestore(&geth->irq_lock, flags); + + return IRQ_HANDLED; +} + +static irqreturn_t gemini_port_irq(int irq, void *data) +{ + struct gemini_ethernet_port *port = data; + struct gemini_ethernet *geth; + irqreturn_t ret = IRQ_NONE; + u32 val, en; + + geth = port->geth; + spin_lock(&geth->irq_lock); + + val = readl(geth->base + GLOBAL_INTERRUPT_STATUS_4_REG); + en = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); + + if (val & en & SWFQ_EMPTY_INT_BIT) { + /* Disable the queue empty interrupt while we work on + * processing the queue. Also disable overrun interrupts + * as there is not much we can do about it here. + */ + en &= ~(SWFQ_EMPTY_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT + | GMAC1_RX_OVERRUN_INT_BIT); + writel(en, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); + ret = IRQ_WAKE_THREAD; + } + + spin_unlock(&geth->irq_lock); + + return ret; +} + +static void gemini_port_remove(struct gemini_ethernet_port *port) +{ + if (port->netdev) + unregister_netdev(port->netdev); + clk_disable_unprepare(port->pclk); + geth_cleanup_freeq(port->geth); +} + +static void gemini_ethernet_init(struct gemini_ethernet *geth) +{ + writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG); + writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG); + writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_2_REG); + writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_3_REG); + writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); + + /* Interrupt config: + * + * GMAC0 intr bits ------> int0 ----> eth0 + * GMAC1 intr bits ------> int1 ----> eth1 + * TOE intr -------------> int1 ----> eth1 + * Classification Intr --> int0 ----> eth0 + * Default Q0 -----------> int0 ----> eth0 + * Default Q1 -----------> int1 ----> eth1 + * FreeQ intr -----------> int1 ----> eth1 + */ + writel(0xCCFC0FC0, geth->base + GLOBAL_INTERRUPT_SELECT_0_REG); + writel(0x00F00002, geth->base + GLOBAL_INTERRUPT_SELECT_1_REG); + writel(0xFFFFFFFF, geth->base + GLOBAL_INTERRUPT_SELECT_2_REG); + writel(0xFFFFFFFF, geth->base + GLOBAL_INTERRUPT_SELECT_3_REG); + writel(0xFF000003, geth->base + GLOBAL_INTERRUPT_SELECT_4_REG); + + /* edge-triggered interrupts packed to level-triggered one... */ + writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_0_REG); + writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_1_REG); + writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_2_REG); + writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_3_REG); + writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_4_REG); + + /* Set up queue */ + writel(0, geth->base + GLOBAL_SW_FREEQ_BASE_SIZE_REG); + writel(0, geth->base + GLOBAL_HW_FREEQ_BASE_SIZE_REG); + writel(0, geth->base + GLOBAL_SWFQ_RWPTR_REG); + writel(0, geth->base + GLOBAL_HWFQ_RWPTR_REG); + + geth->freeq_frag_order = DEFAULT_RX_BUF_ORDER; + /* This makes the queue resize on probe() so that we + * set up and enable the queue IRQ. FIXME: fragile. + */ + geth->freeq_order = 1; +} + +static void gemini_port_save_mac_addr(struct gemini_ethernet_port *port) +{ + port->mac_addr[0] = + cpu_to_le32(readl(port->gmac_base + GMAC_STA_ADD0)); + port->mac_addr[1] = + cpu_to_le32(readl(port->gmac_base + GMAC_STA_ADD1)); + port->mac_addr[2] = + cpu_to_le32(readl(port->gmac_base + GMAC_STA_ADD2)); +} + +static int gemini_ethernet_port_probe(struct platform_device *pdev) +{ + char *port_names[2] = { "ethernet0", "ethernet1" }; + struct gemini_ethernet_port *port; + struct device *dev = &pdev->dev; + struct gemini_ethernet *geth; + struct net_device *netdev; + struct resource *gmacres; + struct resource *dmares; + struct device *parent; + unsigned int id; + int irq; + int ret; + + parent = dev->parent; + geth = dev_get_drvdata(parent); + + if (!strcmp(dev_name(dev), "60008000.ethernet-port")) + id = 0; + else if (!strcmp(dev_name(dev), "6000c000.ethernet-port")) + id = 1; + else + return -ENODEV; + + dev_info(dev, "probe %s ID %d\n", dev_name(dev), id); + + netdev = alloc_etherdev_mq(sizeof(*port), TX_QUEUE_NUM); + if (!netdev) { + dev_err(dev, "Can't allocate ethernet device #%d\n", id); + return -ENOMEM; + } + + port = netdev_priv(netdev); + SET_NETDEV_DEV(netdev, dev); + port->netdev = netdev; + port->id = id; + port->geth = geth; + port->dev = dev; + + /* DMA memory */ + dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!dmares) { + dev_err(dev, "no DMA resource\n"); + return -ENODEV; + } + port->dma_base = devm_ioremap_resource(dev, dmares); + if (IS_ERR(port->dma_base)) + return PTR_ERR(port->dma_base); + + /* GMAC config memory */ + gmacres = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!gmacres) { + dev_err(dev, "no GMAC resource\n"); + return -ENODEV; + } + port->gmac_base = devm_ioremap_resource(dev, gmacres); + if (IS_ERR(port->gmac_base)) + return PTR_ERR(port->gmac_base); + + /* Interrupt */ + irq = platform_get_irq(pdev, 0); + if (irq <= 0) { + dev_err(dev, "no IRQ\n"); + return irq ? irq : -ENODEV; + } + port->irq = irq; + + /* Clock the port */ + port->pclk = devm_clk_get(dev, "PCLK"); + if (IS_ERR(port->pclk)) { + dev_err(dev, "no PCLK\n"); + return PTR_ERR(port->pclk); + } + ret = clk_prepare_enable(port->pclk); + if (ret) + return ret; + + /* Maybe there is a nice ethernet address we should use */ + gemini_port_save_mac_addr(port); + + /* Reset the port */ + port->reset = devm_reset_control_get_exclusive(dev, NULL); + if (IS_ERR(port->reset)) { + dev_err(dev, "no reset\n"); + return PTR_ERR(port->reset); + } + reset_control_reset(port->reset); + usleep_range(100, 500); + + /* Assign pointer in the main state container */ + if (!id) + geth->port0 = port; + else + geth->port1 = port; + platform_set_drvdata(pdev, port); + + /* Set up and register the netdev */ + netdev->dev_id = port->id; + netdev->irq = irq; + netdev->netdev_ops = &gmac_351x_ops; + netdev->ethtool_ops = &gmac_351x_ethtool_ops; + + spin_lock_init(&port->config_lock); + gmac_clear_hw_stats(netdev); + + netdev->hw_features = GMAC_OFFLOAD_FEATURES; + netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO; + + port->freeq_refill = 0; + netif_napi_add(netdev, &port->napi, gmac_napi_poll, + DEFAULT_NAPI_WEIGHT); + + if (is_valid_ether_addr((void *)port->mac_addr)) { + memcpy(netdev->dev_addr, port->mac_addr, ETH_ALEN); + } else { + dev_dbg(dev, "ethernet address 0x%08x%08x%08x invalid\n", + port->mac_addr[0], port->mac_addr[1], + port->mac_addr[2]); + dev_info(dev, "using a random ethernet address\n"); + random_ether_addr(netdev->dev_addr); + } + gmac_write_mac_address(netdev); + + ret = devm_request_threaded_irq(port->dev, + port->irq, + gemini_port_irq, + gemini_port_irq_thread, + IRQF_SHARED, + port_names[port->id], + port); + if (ret) + return ret; + + ret = register_netdev(netdev); + if (!ret) { + netdev_info(netdev, + "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n", + port->irq, &dmares->start, + &gmacres->start); + ret = gmac_setup_phy(netdev); + if (ret) + netdev_info(netdev, + "PHY init failed, deferring to ifup time\n"); + return 0; + } + + port->netdev = NULL; + free_netdev(netdev); + return ret; +} + +static int gemini_ethernet_port_remove(struct platform_device *pdev) +{ + struct gemini_ethernet_port *port = platform_get_drvdata(pdev); + + gemini_port_remove(port); + return 0; +} + +static const struct of_device_id gemini_ethernet_port_of_match[] = { + { + .compatible = "cortina,gemini-ethernet-port", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, gemini_ethernet_port_of_match); + +static struct platform_driver gemini_ethernet_port_driver = { + .driver = { + .name = "gemini-ethernet-port", + .of_match_table = of_match_ptr(gemini_ethernet_port_of_match), + }, + .probe = gemini_ethernet_port_probe, + .remove = gemini_ethernet_port_remove, +}; + +static int gemini_ethernet_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct gemini_ethernet *geth; + unsigned int retry = 5; + struct resource *res; + u32 val; + + /* Global registers */ + geth = devm_kzalloc(dev, sizeof(*geth), GFP_KERNEL); + if (!geth) + return -ENOMEM; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + geth->base = devm_ioremap_resource(dev, res); + if (IS_ERR(geth->base)) + return PTR_ERR(geth->base); + geth->dev = dev; + + /* Wait for ports to stabilize */ + do { + udelay(2); + val = readl(geth->base + GLOBAL_TOE_VERSION_REG); + barrier(); + } while (!val && --retry); + if (!retry) { + dev_err(dev, "failed to reset ethernet\n"); + return -EIO; + } + dev_info(dev, "Ethernet device ID: 0x%03x, revision 0x%01x\n", + (val >> 4) & 0xFFFU, val & 0xFU); + + spin_lock_init(&geth->irq_lock); + spin_lock_init(&geth->freeq_lock); + gemini_ethernet_init(geth); + + /* The children will use this */ + platform_set_drvdata(pdev, geth); + + /* Spawn child devices for the two ports */ + return devm_of_platform_populate(dev); +} + +static int gemini_ethernet_remove(struct platform_device *pdev) +{ + struct gemini_ethernet *geth = platform_get_drvdata(pdev); + + gemini_ethernet_init(geth); + geth_cleanup_freeq(geth); + + return 0; +} + +static const struct of_device_id gemini_ethernet_of_match[] = { + { + .compatible = "cortina,gemini-ethernet", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, gemini_ethernet_of_match); + +static struct platform_driver gemini_ethernet_driver = { + .driver = { + .name = DRV_NAME, + .of_match_table = of_match_ptr(gemini_ethernet_of_match), + }, + .probe = gemini_ethernet_probe, + .remove = gemini_ethernet_remove, +}; + +static int __init gemini_ethernet_module_init(void) +{ + int ret; + + ret = platform_driver_register(&gemini_ethernet_port_driver); + if (ret) + return ret; + + ret = platform_driver_register(&gemini_ethernet_driver); + if (ret) { + platform_driver_unregister(&gemini_ethernet_port_driver); + return ret; + } + + return 0; +} +module_init(gemini_ethernet_module_init); + +static void __exit gemini_ethernet_module_exit(void) +{ + platform_driver_unregister(&gemini_ethernet_driver); + platform_driver_unregister(&gemini_ethernet_port_driver); +} +module_exit(gemini_ethernet_module_exit); + +MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>"); +MODULE_DESCRIPTION("StorLink SL351x (Gemini) ethernet driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/net/ethernet/cortina/gemini.h b/drivers/net/ethernet/cortina/gemini.h new file mode 100644 index 000000000000..0b12f89bf89a --- /dev/null +++ b/drivers/net/ethernet/cortina/gemini.h @@ -0,0 +1,958 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Register definitions for Gemini GMAC Ethernet device driver + * + * Copyright (C) 2006 Storlink, Corp. + * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt> + * Copyright (C) 2010 MichaÅ‚ MirosÅ‚aw <mirq-linux@rere.qmqm.pl> + * Copytight (C) 2017 Linus Walleij <linus.walleij@linaro.org> + */ +#ifndef _GEMINI_ETHERNET_H +#define _GEMINI_ETHERNET_H + +#include <linux/bitops.h> + +/* Base Registers */ +#define TOE_NONTOE_QUE_HDR_BASE 0x2000 +#define TOE_TOE_QUE_HDR_BASE 0x3000 + +/* Queue ID */ +#define TOE_SW_FREE_QID 0x00 +#define TOE_HW_FREE_QID 0x01 +#define TOE_GMAC0_SW_TXQ0_QID 0x02 +#define TOE_GMAC0_SW_TXQ1_QID 0x03 +#define TOE_GMAC0_SW_TXQ2_QID 0x04 +#define TOE_GMAC0_SW_TXQ3_QID 0x05 +#define TOE_GMAC0_SW_TXQ4_QID 0x06 +#define TOE_GMAC0_SW_TXQ5_QID 0x07 +#define TOE_GMAC0_HW_TXQ0_QID 0x08 +#define TOE_GMAC0_HW_TXQ1_QID 0x09 +#define TOE_GMAC0_HW_TXQ2_QID 0x0A +#define TOE_GMAC0_HW_TXQ3_QID 0x0B +#define TOE_GMAC1_SW_TXQ0_QID 0x12 +#define TOE_GMAC1_SW_TXQ1_QID 0x13 +#define TOE_GMAC1_SW_TXQ2_QID 0x14 +#define TOE_GMAC1_SW_TXQ3_QID 0x15 +#define TOE_GMAC1_SW_TXQ4_QID 0x16 +#define TOE_GMAC1_SW_TXQ5_QID 0x17 +#define TOE_GMAC1_HW_TXQ0_QID 0x18 +#define TOE_GMAC1_HW_TXQ1_QID 0x19 +#define TOE_GMAC1_HW_TXQ2_QID 0x1A +#define TOE_GMAC1_HW_TXQ3_QID 0x1B +#define TOE_GMAC0_DEFAULT_QID 0x20 +#define TOE_GMAC1_DEFAULT_QID 0x21 +#define TOE_CLASSIFICATION_QID(x) (0x22 + x) /* 0x22 ~ 0x2F */ +#define TOE_TOE_QID(x) (0x40 + x) /* 0x40 ~ 0x7F */ + +/* TOE DMA Queue Size should be 2^n, n = 6...12 + * TOE DMA Queues are the following queue types: + * SW Free Queue, HW Free Queue, + * GMAC 0/1 SW TX Q0-5, and GMAC 0/1 HW TX Q0-5 + * The base address and descriptor number are configured at + * DMA Queues Descriptor Ring Base Address/Size Register (offset 0x0004) + */ +#define GET_WPTR(addr) readw((addr) + 2) +#define GET_RPTR(addr) readw((addr)) +#define SET_WPTR(addr, data) writew((data), (addr) + 2) +#define SET_RPTR(addr, data) writew((data), (addr)) +#define __RWPTR_NEXT(x, mask) (((unsigned int)(x) + 1) & (mask)) +#define __RWPTR_PREV(x, mask) (((unsigned int)(x) - 1) & (mask)) +#define __RWPTR_DISTANCE(r, w, mask) (((unsigned int)(w) - (r)) & (mask)) +#define __RWPTR_MASK(order) ((1 << (order)) - 1) +#define RWPTR_NEXT(x, order) __RWPTR_NEXT((x), __RWPTR_MASK((order))) +#define RWPTR_PREV(x, order) __RWPTR_PREV((x), __RWPTR_MASK((order))) +#define RWPTR_DISTANCE(r, w, order) __RWPTR_DISTANCE((r), (w), \ + __RWPTR_MASK((order))) + +/* Global registers */ +#define GLOBAL_TOE_VERSION_REG 0x0000 +#define GLOBAL_SW_FREEQ_BASE_SIZE_REG 0x0004 +#define GLOBAL_HW_FREEQ_BASE_SIZE_REG 0x0008 +#define GLOBAL_DMA_SKB_SIZE_REG 0x0010 +#define GLOBAL_SWFQ_RWPTR_REG 0x0014 +#define GLOBAL_HWFQ_RWPTR_REG 0x0018 +#define GLOBAL_INTERRUPT_STATUS_0_REG 0x0020 +#define GLOBAL_INTERRUPT_ENABLE_0_REG 0x0024 +#define GLOBAL_INTERRUPT_SELECT_0_REG 0x0028 +#define GLOBAL_INTERRUPT_STATUS_1_REG 0x0030 +#define GLOBAL_INTERRUPT_ENABLE_1_REG 0x0034 +#define GLOBAL_INTERRUPT_SELECT_1_REG 0x0038 +#define GLOBAL_INTERRUPT_STATUS_2_REG 0x0040 +#define GLOBAL_INTERRUPT_ENABLE_2_REG 0x0044 +#define GLOBAL_INTERRUPT_SELECT_2_REG 0x0048 +#define GLOBAL_INTERRUPT_STATUS_3_REG 0x0050 +#define GLOBAL_INTERRUPT_ENABLE_3_REG 0x0054 +#define GLOBAL_INTERRUPT_SELECT_3_REG 0x0058 +#define GLOBAL_INTERRUPT_STATUS_4_REG 0x0060 +#define GLOBAL_INTERRUPT_ENABLE_4_REG 0x0064 +#define GLOBAL_INTERRUPT_SELECT_4_REG 0x0068 +#define GLOBAL_HASH_TABLE_BASE_REG 0x006C +#define GLOBAL_QUEUE_THRESHOLD_REG 0x0070 + +/* GMAC 0/1 DMA/TOE register */ +#define GMAC_DMA_CTRL_REG 0x0000 +#define GMAC_TX_WEIGHTING_CTRL_0_REG 0x0004 +#define GMAC_TX_WEIGHTING_CTRL_1_REG 0x0008 +#define GMAC_SW_TX_QUEUE0_PTR_REG 0x000C +#define GMAC_SW_TX_QUEUE1_PTR_REG 0x0010 +#define GMAC_SW_TX_QUEUE2_PTR_REG 0x0014 +#define GMAC_SW_TX_QUEUE3_PTR_REG 0x0018 +#define GMAC_SW_TX_QUEUE4_PTR_REG 0x001C +#define GMAC_SW_TX_QUEUE5_PTR_REG 0x0020 +#define GMAC_SW_TX_QUEUE_PTR_REG(i) (GMAC_SW_TX_QUEUE0_PTR_REG + 4 * (i)) +#define GMAC_HW_TX_QUEUE0_PTR_REG 0x0024 +#define GMAC_HW_TX_QUEUE1_PTR_REG 0x0028 +#define GMAC_HW_TX_QUEUE2_PTR_REG 0x002C +#define GMAC_HW_TX_QUEUE3_PTR_REG 0x0030 +#define GMAC_HW_TX_QUEUE_PTR_REG(i) (GMAC_HW_TX_QUEUE0_PTR_REG + 4 * (i)) +#define GMAC_DMA_TX_FIRST_DESC_REG 0x0038 +#define GMAC_DMA_TX_CURR_DESC_REG 0x003C +#define GMAC_DMA_TX_DESC_WORD0_REG 0x0040 +#define GMAC_DMA_TX_DESC_WORD1_REG 0x0044 +#define GMAC_DMA_TX_DESC_WORD2_REG 0x0048 +#define GMAC_DMA_TX_DESC_WORD3_REG 0x004C +#define GMAC_SW_TX_QUEUE_BASE_REG 0x0050 +#define GMAC_HW_TX_QUEUE_BASE_REG 0x0054 +#define GMAC_DMA_RX_FIRST_DESC_REG 0x0058 +#define GMAC_DMA_RX_CURR_DESC_REG 0x005C +#define GMAC_DMA_RX_DESC_WORD0_REG 0x0060 +#define GMAC_DMA_RX_DESC_WORD1_REG 0x0064 +#define GMAC_DMA_RX_DESC_WORD2_REG 0x0068 +#define GMAC_DMA_RX_DESC_WORD3_REG 0x006C +#define GMAC_HASH_ENGINE_REG0 0x0070 +#define GMAC_HASH_ENGINE_REG1 0x0074 +/* matching rule 0 Control register 0 */ +#define GMAC_MR0CR0 0x0078 +#define GMAC_MR0CR1 0x007C +#define GMAC_MR0CR2 0x0080 +#define GMAC_MR1CR0 0x0084 +#define GMAC_MR1CR1 0x0088 +#define GMAC_MR1CR2 0x008C +#define GMAC_MR2CR0 0x0090 +#define GMAC_MR2CR1 0x0094 +#define GMAC_MR2CR2 0x0098 +#define GMAC_MR3CR0 0x009C +#define GMAC_MR3CR1 0x00A0 +#define GMAC_MR3CR2 0x00A4 +/* Support Protocol Register 0 */ +#define GMAC_SPR0 0x00A8 +#define GMAC_SPR1 0x00AC +#define GMAC_SPR2 0x00B0 +#define GMAC_SPR3 0x00B4 +#define GMAC_SPR4 0x00B8 +#define GMAC_SPR5 0x00BC +#define GMAC_SPR6 0x00C0 +#define GMAC_SPR7 0x00C4 +/* GMAC Hash/Rx/Tx AHB Weighting register */ +#define GMAC_AHB_WEIGHT_REG 0x00C8 + +/* TOE GMAC 0/1 register */ +#define GMAC_STA_ADD0 0x0000 +#define GMAC_STA_ADD1 0x0004 +#define GMAC_STA_ADD2 0x0008 +#define GMAC_RX_FLTR 0x000c +#define GMAC_MCAST_FIL0 0x0010 +#define GMAC_MCAST_FIL1 0x0014 +#define GMAC_CONFIG0 0x0018 +#define GMAC_CONFIG1 0x001c +#define GMAC_CONFIG2 0x0020 +#define GMAC_CONFIG3 0x0024 +#define GMAC_RESERVED 0x0028 +#define GMAC_STATUS 0x002c +#define GMAC_IN_DISCARDS 0x0030 +#define GMAC_IN_ERRORS 0x0034 +#define GMAC_IN_MCAST 0x0038 +#define GMAC_IN_BCAST 0x003c +#define GMAC_IN_MAC1 0x0040 /* for STA 1 MAC Address */ +#define GMAC_IN_MAC2 0x0044 /* for STA 2 MAC Address */ + +#define RX_STATS_NUM 6 + +/* DMA Queues description Ring Base Address/Size Register (offset 0x0004) */ +union dma_q_base_size { + unsigned int bits32; + unsigned int base_size; +}; + +#define DMA_Q_BASE_MASK (~0x0f) + +/* DMA SKB Buffer register (offset 0x0008) */ +union dma_skb_size { + unsigned int bits32; + struct bit_0008 { + unsigned int sw_skb_size : 16; /* SW Free poll SKB Size */ + unsigned int hw_skb_size : 16; /* HW Free poll SKB Size */ + } bits; +}; + +/* DMA SW Free Queue Read/Write Pointer Register (offset 0x000c) */ +union dma_rwptr { + unsigned int bits32; + struct bit_000c { + unsigned int rptr : 16; /* Read Ptr, RO */ + unsigned int wptr : 16; /* Write Ptr, RW */ + } bits; +}; + +/* Interrupt Status Register 0 (offset 0x0020) + * Interrupt Mask Register 0 (offset 0x0024) + * Interrupt Select Register 0 (offset 0x0028) + */ +#define GMAC1_TXDERR_INT_BIT BIT(31) +#define GMAC1_TXPERR_INT_BIT BIT(30) +#define GMAC0_TXDERR_INT_BIT BIT(29) +#define GMAC0_TXPERR_INT_BIT BIT(28) +#define GMAC1_RXDERR_INT_BIT BIT(27) +#define GMAC1_RXPERR_INT_BIT BIT(26) +#define GMAC0_RXDERR_INT_BIT BIT(25) +#define GMAC0_RXPERR_INT_BIT BIT(24) +#define GMAC1_SWTQ15_FIN_INT_BIT BIT(23) +#define GMAC1_SWTQ14_FIN_INT_BIT BIT(22) +#define GMAC1_SWTQ13_FIN_INT_BIT BIT(21) +#define GMAC1_SWTQ12_FIN_INT_BIT BIT(20) +#define GMAC1_SWTQ11_FIN_INT_BIT BIT(19) +#define GMAC1_SWTQ10_FIN_INT_BIT BIT(18) +#define GMAC0_SWTQ05_FIN_INT_BIT BIT(17) +#define GMAC0_SWTQ04_FIN_INT_BIT BIT(16) +#define GMAC0_SWTQ03_FIN_INT_BIT BIT(15) +#define GMAC0_SWTQ02_FIN_INT_BIT BIT(14) +#define GMAC0_SWTQ01_FIN_INT_BIT BIT(13) +#define GMAC0_SWTQ00_FIN_INT_BIT BIT(12) +#define GMAC1_SWTQ15_EOF_INT_BIT BIT(11) +#define GMAC1_SWTQ14_EOF_INT_BIT BIT(10) +#define GMAC1_SWTQ13_EOF_INT_BIT BIT(9) +#define GMAC1_SWTQ12_EOF_INT_BIT BIT(8) +#define GMAC1_SWTQ11_EOF_INT_BIT BIT(7) +#define GMAC1_SWTQ10_EOF_INT_BIT BIT(6) +#define GMAC0_SWTQ05_EOF_INT_BIT BIT(5) +#define GMAC0_SWTQ04_EOF_INT_BIT BIT(4) +#define GMAC0_SWTQ03_EOF_INT_BIT BIT(3) +#define GMAC0_SWTQ02_EOF_INT_BIT BIT(2) +#define GMAC0_SWTQ01_EOF_INT_BIT BIT(1) +#define GMAC0_SWTQ00_EOF_INT_BIT BIT(0) + +/* Interrupt Status Register 1 (offset 0x0030) + * Interrupt Mask Register 1 (offset 0x0034) + * Interrupt Select Register 1 (offset 0x0038) + */ +#define TOE_IQ3_FULL_INT_BIT BIT(31) +#define TOE_IQ2_FULL_INT_BIT BIT(30) +#define TOE_IQ1_FULL_INT_BIT BIT(29) +#define TOE_IQ0_FULL_INT_BIT BIT(28) +#define TOE_IQ3_INT_BIT BIT(27) +#define TOE_IQ2_INT_BIT BIT(26) +#define TOE_IQ1_INT_BIT BIT(25) +#define TOE_IQ0_INT_BIT BIT(24) +#define GMAC1_HWTQ13_EOF_INT_BIT BIT(23) +#define GMAC1_HWTQ12_EOF_INT_BIT BIT(22) +#define GMAC1_HWTQ11_EOF_INT_BIT BIT(21) +#define GMAC1_HWTQ10_EOF_INT_BIT BIT(20) +#define GMAC0_HWTQ03_EOF_INT_BIT BIT(19) +#define GMAC0_HWTQ02_EOF_INT_BIT BIT(18) +#define GMAC0_HWTQ01_EOF_INT_BIT BIT(17) +#define GMAC0_HWTQ00_EOF_INT_BIT BIT(16) +#define CLASS_RX_INT_BIT(x) BIT((x + 2)) +#define DEFAULT_Q1_INT_BIT BIT(1) +#define DEFAULT_Q0_INT_BIT BIT(0) + +#define TOE_IQ_INT_BITS (TOE_IQ0_INT_BIT | TOE_IQ1_INT_BIT | \ + TOE_IQ2_INT_BIT | TOE_IQ3_INT_BIT) +#define TOE_IQ_FULL_BITS (TOE_IQ0_FULL_INT_BIT | TOE_IQ1_FULL_INT_BIT | \ + TOE_IQ2_FULL_INT_BIT | TOE_IQ3_FULL_INT_BIT) +#define TOE_IQ_ALL_BITS (TOE_IQ_INT_BITS | TOE_IQ_FULL_BITS) +#define TOE_CLASS_RX_INT_BITS 0xfffc + +/* Interrupt Status Register 2 (offset 0x0040) + * Interrupt Mask Register 2 (offset 0x0044) + * Interrupt Select Register 2 (offset 0x0048) + */ +#define TOE_QL_FULL_INT_BIT(x) BIT(x) + +/* Interrupt Status Register 3 (offset 0x0050) + * Interrupt Mask Register 3 (offset 0x0054) + * Interrupt Select Register 3 (offset 0x0058) + */ +#define TOE_QH_FULL_INT_BIT(x) BIT(x - 32) + +/* Interrupt Status Register 4 (offset 0x0060) + * Interrupt Mask Register 4 (offset 0x0064) + * Interrupt Select Register 4 (offset 0x0068) + */ +#define GMAC1_RESERVED_INT_BIT BIT(31) +#define GMAC1_MIB_INT_BIT BIT(30) +#define GMAC1_RX_PAUSE_ON_INT_BIT BIT(29) +#define GMAC1_TX_PAUSE_ON_INT_BIT BIT(28) +#define GMAC1_RX_PAUSE_OFF_INT_BIT BIT(27) +#define GMAC1_TX_PAUSE_OFF_INT_BIT BIT(26) +#define GMAC1_RX_OVERRUN_INT_BIT BIT(25) +#define GMAC1_STATUS_CHANGE_INT_BIT BIT(24) +#define GMAC0_RESERVED_INT_BIT BIT(23) +#define GMAC0_MIB_INT_BIT BIT(22) +#define GMAC0_RX_PAUSE_ON_INT_BIT BIT(21) +#define GMAC0_TX_PAUSE_ON_INT_BIT BIT(20) +#define GMAC0_RX_PAUSE_OFF_INT_BIT BIT(19) +#define GMAC0_TX_PAUSE_OFF_INT_BIT BIT(18) +#define GMAC0_RX_OVERRUN_INT_BIT BIT(17) +#define GMAC0_STATUS_CHANGE_INT_BIT BIT(16) +#define CLASS_RX_FULL_INT_BIT(x) BIT(x + 2) +#define HWFQ_EMPTY_INT_BIT BIT(1) +#define SWFQ_EMPTY_INT_BIT BIT(0) + +#define GMAC0_INT_BITS (GMAC0_RESERVED_INT_BIT | GMAC0_MIB_INT_BIT | \ + GMAC0_RX_PAUSE_ON_INT_BIT | \ + GMAC0_TX_PAUSE_ON_INT_BIT | \ + GMAC0_RX_PAUSE_OFF_INT_BIT | \ + GMAC0_TX_PAUSE_OFF_INT_BIT | \ + GMAC0_RX_OVERRUN_INT_BIT | \ + GMAC0_STATUS_CHANGE_INT_BIT) +#define GMAC1_INT_BITS (GMAC1_RESERVED_INT_BIT | GMAC1_MIB_INT_BIT | \ + GMAC1_RX_PAUSE_ON_INT_BIT | \ + GMAC1_TX_PAUSE_ON_INT_BIT | \ + GMAC1_RX_PAUSE_OFF_INT_BIT | \ + GMAC1_TX_PAUSE_OFF_INT_BIT | \ + GMAC1_RX_OVERRUN_INT_BIT | \ + GMAC1_STATUS_CHANGE_INT_BIT) + +#define CLASS_RX_FULL_INT_BITS 0xfffc + +/* GLOBAL_QUEUE_THRESHOLD_REG (offset 0x0070) */ +union queue_threshold { + unsigned int bits32; + struct bit_0070_2 { + /* 7:0 Software Free Queue Empty Threshold */ + unsigned int swfq_empty:8; + /* 15:8 Hardware Free Queue Empty Threshold */ + unsigned int hwfq_empty:8; + /* 23:16 */ + unsigned int intrq:8; + /* 31:24 */ + unsigned int toe_class:8; + } bits; +}; + +/* GMAC DMA Control Register + * GMAC0 offset 0x8000 + * GMAC1 offset 0xC000 + */ +union gmac_dma_ctrl { + unsigned int bits32; + struct bit_8000 { + /* bit 1:0 Peripheral Bus Width */ + unsigned int td_bus:2; + /* bit 3:2 TxDMA max burst size for every AHB request */ + unsigned int td_burst_size:2; + /* bit 7:4 TxDMA protection control */ + unsigned int td_prot:4; + /* bit 9:8 Peripheral Bus Width */ + unsigned int rd_bus:2; + /* bit 11:10 DMA max burst size for every AHB request */ + unsigned int rd_burst_size:2; + /* bit 15:12 DMA Protection Control */ + unsigned int rd_prot:4; + /* bit 17:16 */ + unsigned int rd_insert_bytes:2; + /* bit 27:18 */ + unsigned int reserved:10; + /* bit 28 1: Drop, 0: Accept */ + unsigned int drop_small_ack:1; + /* bit 29 Loopback TxDMA to RxDMA */ + unsigned int loopback:1; + /* bit 30 Tx DMA Enable */ + unsigned int td_enable:1; + /* bit 31 Rx DMA Enable */ + unsigned int rd_enable:1; + } bits; +}; + +/* GMAC Tx Weighting Control Register 0 + * GMAC0 offset 0x8004 + * GMAC1 offset 0xC004 + */ +union gmac_tx_wcr0 { + unsigned int bits32; + struct bit_8004 { + /* bit 5:0 HW TX Queue 3 */ + unsigned int hw_tq0:6; + /* bit 11:6 HW TX Queue 2 */ + unsigned int hw_tq1:6; + /* bit 17:12 HW TX Queue 1 */ + unsigned int hw_tq2:6; + /* bit 23:18 HW TX Queue 0 */ + unsigned int hw_tq3:6; + /* bit 31:24 */ + unsigned int reserved:8; + } bits; +}; + +/* GMAC Tx Weighting Control Register 1 + * GMAC0 offset 0x8008 + * GMAC1 offset 0xC008 + */ +union gmac_tx_wcr1 { + unsigned int bits32; + struct bit_8008 { + /* bit 4:0 SW TX Queue 0 */ + unsigned int sw_tq0:5; + /* bit 9:5 SW TX Queue 1 */ + unsigned int sw_tq1:5; + /* bit 14:10 SW TX Queue 2 */ + unsigned int sw_tq2:5; + /* bit 19:15 SW TX Queue 3 */ + unsigned int sw_tq3:5; + /* bit 24:20 SW TX Queue 4 */ + unsigned int sw_tq4:5; + /* bit 29:25 SW TX Queue 5 */ + unsigned int sw_tq5:5; + /* bit 31:30 */ + unsigned int reserved:2; + } bits; +}; + +/* GMAC DMA Tx Description Word 0 Register + * GMAC0 offset 0x8040 + * GMAC1 offset 0xC040 + */ +union gmac_txdesc_0 { + unsigned int bits32; + struct bit_8040 { + /* bit 15:0 Transfer size */ + unsigned int buffer_size:16; + /* bit 21:16 number of descriptors used for the current frame */ + unsigned int desc_count:6; + /* bit 22 Tx Status, 1: Successful 0: Failed */ + unsigned int status_tx_ok:1; + /* bit 28:23 Tx Status, Reserved bits */ + unsigned int status_rvd:6; + /* bit 29 protocol error during processing this descriptor */ + unsigned int perr:1; + /* bit 30 data error during processing this descriptor */ + unsigned int derr:1; + /* bit 31 */ + unsigned int reserved:1; + } bits; +}; + +/* GMAC DMA Tx Description Word 1 Register + * GMAC0 offset 0x8044 + * GMAC1 offset 0xC044 + */ +union gmac_txdesc_1 { + unsigned int bits32; + struct txdesc_word1 { + /* bit 15: 0 Tx Frame Byte Count */ + unsigned int byte_count:16; + /* bit 16 TSS segmentation use MTU setting */ + unsigned int mtu_enable:1; + /* bit 17 IPV4 Header Checksum Enable */ + unsigned int ip_chksum:1; + /* bit 18 IPV6 Tx Enable */ + unsigned int ipv6_enable:1; + /* bit 19 TCP Checksum Enable */ + unsigned int tcp_chksum:1; + /* bit 20 UDP Checksum Enable */ + unsigned int udp_chksum:1; + /* bit 21 Bypass HW offload engine */ + unsigned int bypass_tss:1; + /* bit 22 Don't update IP length field */ + unsigned int ip_fixed_len:1; + /* bit 31:23 Tx Flag, Reserved */ + unsigned int reserved:9; + } bits; +}; + +#define TSS_IP_FIXED_LEN_BIT BIT(22) +#define TSS_BYPASS_BIT BIT(21) +#define TSS_UDP_CHKSUM_BIT BIT(20) +#define TSS_TCP_CHKSUM_BIT BIT(19) +#define TSS_IPV6_ENABLE_BIT BIT(18) +#define TSS_IP_CHKSUM_BIT BIT(17) +#define TSS_MTU_ENABLE_BIT BIT(16) + +#define TSS_CHECKUM_ENABLE \ + (TSS_IP_CHKSUM_BIT | TSS_IPV6_ENABLE_BIT | \ + TSS_TCP_CHKSUM_BIT | TSS_UDP_CHKSUM_BIT) + +/* GMAC DMA Tx Description Word 2 Register + * GMAC0 offset 0x8048 + * GMAC1 offset 0xC048 + */ +union gmac_txdesc_2 { + unsigned int bits32; + unsigned int buf_adr; +}; + +/* GMAC DMA Tx Description Word 3 Register + * GMAC0 offset 0x804C + * GMAC1 offset 0xC04C + */ +union gmac_txdesc_3 { + unsigned int bits32; + struct txdesc_word3 { + /* bit 12: 0 Tx Frame Byte Count */ + unsigned int mtu_size:13; + /* bit 28:13 */ + unsigned int reserved:16; + /* bit 29 End of frame interrupt enable */ + unsigned int eofie:1; + /* bit 31:30 11: only one, 10: first, 01: last, 00: linking */ + unsigned int sof_eof:2; + } bits; +}; + +#define SOF_EOF_BIT_MASK 0x3fffffff +#define SOF_BIT 0x80000000 +#define EOF_BIT 0x40000000 +#define EOFIE_BIT BIT(29) +#define MTU_SIZE_BIT_MASK 0x1fff + +/* GMAC Tx Descriptor */ +struct gmac_txdesc { + union gmac_txdesc_0 word0; + union gmac_txdesc_1 word1; + union gmac_txdesc_2 word2; + union gmac_txdesc_3 word3; +}; + +/* GMAC DMA Rx Description Word 0 Register + * GMAC0 offset 0x8060 + * GMAC1 offset 0xC060 + */ +union gmac_rxdesc_0 { + unsigned int bits32; + struct bit_8060 { + /* bit 15:0 number of descriptors used for the current frame */ + unsigned int buffer_size:16; + /* bit 21:16 number of descriptors used for the current frame */ + unsigned int desc_count:6; + /* bit 24:22 Status of rx frame */ + unsigned int status:4; + /* bit 28:26 Check Sum Status */ + unsigned int chksum_status:3; + /* bit 29 protocol error during processing this descriptor */ + unsigned int perr:1; + /* bit 30 data error during processing this descriptor */ + unsigned int derr:1; + /* bit 31 TOE/CIS Queue Full dropped packet to default queue */ + unsigned int drop:1; + } bits; +}; + +#define GMAC_RXDESC_0_T_derr BIT(30) +#define GMAC_RXDESC_0_T_perr BIT(29) +#define GMAC_RXDESC_0_T_chksum_status(x) BIT(x + 26) +#define GMAC_RXDESC_0_T_status(x) BIT(x + 22) +#define GMAC_RXDESC_0_T_desc_count(x) BIT(x + 16) + +#define RX_CHKSUM_IP_UDP_TCP_OK 0 +#define RX_CHKSUM_IP_OK_ONLY 1 +#define RX_CHKSUM_NONE 2 +#define RX_CHKSUM_IP_ERR_UNKNOWN 4 +#define RX_CHKSUM_IP_ERR 5 +#define RX_CHKSUM_TCP_UDP_ERR 6 +#define RX_CHKSUM_NUM 8 + +#define RX_STATUS_GOOD_FRAME 0 +#define RX_STATUS_TOO_LONG_GOOD_CRC 1 +#define RX_STATUS_RUNT_FRAME 2 +#define RX_STATUS_SFD_NOT_FOUND 3 +#define RX_STATUS_CRC_ERROR 4 +#define RX_STATUS_TOO_LONG_BAD_CRC 5 +#define RX_STATUS_ALIGNMENT_ERROR 6 +#define RX_STATUS_TOO_LONG_BAD_ALIGN 7 +#define RX_STATUS_RX_ERR 8 +#define RX_STATUS_DA_FILTERED 9 +#define RX_STATUS_BUFFER_FULL 10 +#define RX_STATUS_NUM 16 + +#define RX_ERROR_LENGTH(s) \ + ((s) == RX_STATUS_TOO_LONG_GOOD_CRC || \ + (s) == RX_STATUS_TOO_LONG_BAD_CRC || \ + (s) == RX_STATUS_TOO_LONG_BAD_ALIGN) +#define RX_ERROR_OVER(s) \ + ((s) == RX_STATUS_BUFFER_FULL) +#define RX_ERROR_CRC(s) \ + ((s) == RX_STATUS_CRC_ERROR || \ + (s) == RX_STATUS_TOO_LONG_BAD_CRC) +#define RX_ERROR_FRAME(s) \ + ((s) == RX_STATUS_ALIGNMENT_ERROR || \ + (s) == RX_STATUS_TOO_LONG_BAD_ALIGN) +#define RX_ERROR_FIFO(s) \ + (0) + +/* GMAC DMA Rx Description Word 1 Register + * GMAC0 offset 0x8064 + * GMAC1 offset 0xC064 + */ +union gmac_rxdesc_1 { + unsigned int bits32; + struct rxdesc_word1 { + /* bit 15: 0 Rx Frame Byte Count */ + unsigned int byte_count:16; + /* bit 31:16 Software ID */ + unsigned int sw_id:16; + } bits; +}; + +/* GMAC DMA Rx Description Word 2 Register + * GMAC0 offset 0x8068 + * GMAC1 offset 0xC068 + */ +union gmac_rxdesc_2 { + unsigned int bits32; + unsigned int buf_adr; +}; + +#define RX_INSERT_NONE 0 +#define RX_INSERT_1_BYTE 1 +#define RX_INSERT_2_BYTE 2 +#define RX_INSERT_3_BYTE 3 + +/* GMAC DMA Rx Description Word 3 Register + * GMAC0 offset 0x806C + * GMAC1 offset 0xC06C + */ +union gmac_rxdesc_3 { + unsigned int bits32; + struct rxdesc_word3 { + /* bit 7: 0 L3 data offset */ + unsigned int l3_offset:8; + /* bit 15: 8 L4 data offset */ + unsigned int l4_offset:8; + /* bit 23: 16 L7 data offset */ + unsigned int l7_offset:8; + /* bit 24 Duplicated ACK detected */ + unsigned int dup_ack:1; + /* bit 25 abnormal case found */ + unsigned int abnormal:1; + /* bit 26 IPV4 option or IPV6 extension header */ + unsigned int option:1; + /* bit 27 Out of Sequence packet */ + unsigned int out_of_seq:1; + /* bit 28 Control Flag is present */ + unsigned int ctrl_flag:1; + /* bit 29 End of frame interrupt enable */ + unsigned int eofie:1; + /* bit 31:30 11: only one, 10: first, 01: last, 00: linking */ + unsigned int sof_eof:2; + } bits; +}; + +/* GMAC Rx Descriptor, this is simply fitted over the queue registers */ +struct gmac_rxdesc { + union gmac_rxdesc_0 word0; + union gmac_rxdesc_1 word1; + union gmac_rxdesc_2 word2; + union gmac_rxdesc_3 word3; +}; + +/* GMAC Matching Rule Control Register 0 + * GMAC0 offset 0x8078 + * GMAC1 offset 0xC078 + */ +#define MR_L2_BIT BIT(31) +#define MR_L3_BIT BIT(30) +#define MR_L4_BIT BIT(29) +#define MR_L7_BIT BIT(28) +#define MR_PORT_BIT BIT(27) +#define MR_PRIORITY_BIT BIT(26) +#define MR_DA_BIT BIT(23) +#define MR_SA_BIT BIT(22) +#define MR_ETHER_TYPE_BIT BIT(21) +#define MR_VLAN_BIT BIT(20) +#define MR_PPPOE_BIT BIT(19) +#define MR_IP_VER_BIT BIT(15) +#define MR_IP_HDR_LEN_BIT BIT(14) +#define MR_FLOW_LABLE_BIT BIT(13) +#define MR_TOS_TRAFFIC_BIT BIT(12) +#define MR_SPR_BIT(x) BIT(x) +#define MR_SPR_BITS 0xff + +/* GMAC_AHB_WEIGHT registers + * GMAC0 offset 0x80C8 + * GMAC1 offset 0xC0C8 + */ +union gmac_ahb_weight { + unsigned int bits32; + struct bit_80C8 { + /* 4:0 */ + unsigned int hash_weight:5; + /* 9:5 */ + unsigned int rx_weight:5; + /* 14:10 */ + unsigned int tx_weight:5; + /* 19:15 Rx Data Pre Request FIFO Threshold */ + unsigned int pre_req:5; + /* 24:20 DMA TqCtrl to Start tqDV FIFO Threshold */ + unsigned int tq_dv_threshold:5; + /* 31:25 */ + unsigned int reserved:7; + } bits; +}; + +/* GMAC RX FLTR + * GMAC0 Offset 0xA00C + * GMAC1 Offset 0xE00C + */ +union gmac_rx_fltr { + unsigned int bits32; + struct bit1_000c { + /* Enable receive of unicast frames that are sent to STA + * address + */ + unsigned int unicast:1; + /* Enable receive of multicast frames that pass multicast + * filter + */ + unsigned int multicast:1; + /* Enable receive of broadcast frames */ + unsigned int broadcast:1; + /* Enable receive of all frames */ + unsigned int promiscuous:1; + /* Enable receive of all error frames */ + unsigned int error:1; + unsigned int reserved:27; + } bits; +}; + +/* GMAC Configuration 0 + * GMAC0 Offset 0xA018 + * GMAC1 Offset 0xE018 + */ +union gmac_config0 { + unsigned int bits32; + struct bit1_0018 { + /* 0: disable transmit */ + unsigned int dis_tx:1; + /* 1: disable receive */ + unsigned int dis_rx:1; + /* 2: transmit data loopback enable */ + unsigned int loop_back:1; + /* 3: flow control also trigged by Rx queues */ + unsigned int flow_ctrl:1; + /* 4-7: adjust IFG from 96+/-56 */ + unsigned int adj_ifg:4; + /* 8-10 maximum receive frame length allowed */ + unsigned int max_len:3; + /* 11: disable back-off function */ + unsigned int dis_bkoff:1; + /* 12: disable 16 collisions abort function */ + unsigned int dis_col:1; + /* 13: speed up timers in simulation */ + unsigned int sim_test:1; + /* 14: RX flow control enable */ + unsigned int rx_fc_en:1; + /* 15: TX flow control enable */ + unsigned int tx_fc_en:1; + /* 16: RGMII in-band status enable */ + unsigned int rgmii_en:1; + /* 17: IPv4 RX Checksum enable */ + unsigned int ipv4_rx_chksum:1; + /* 18: IPv6 RX Checksum enable */ + unsigned int ipv6_rx_chksum:1; + /* 19: Remove Rx VLAN tag */ + unsigned int rx_tag_remove:1; + /* 20 */ + unsigned int rgmm_edge:1; + /* 21 */ + unsigned int rxc_inv:1; + /* 22 */ + unsigned int ipv6_exthdr_order:1; + /* 23 */ + unsigned int rx_err_detect:1; + /* 24 */ + unsigned int port0_chk_hwq:1; + /* 25 */ + unsigned int port1_chk_hwq:1; + /* 26 */ + unsigned int port0_chk_toeq:1; + /* 27 */ + unsigned int port1_chk_toeq:1; + /* 28 */ + unsigned int port0_chk_classq:1; + /* 29 */ + unsigned int port1_chk_classq:1; + /* 30, 31 */ + unsigned int reserved:2; + } bits; +}; + +#define CONFIG0_TX_RX_DISABLE (BIT(1) | BIT(0)) +#define CONFIG0_RX_CHKSUM (BIT(18) | BIT(17)) +#define CONFIG0_FLOW_RX BIT(14) +#define CONFIG0_FLOW_TX BIT(15) +#define CONFIG0_FLOW_TX_RX (BIT(14) | BIT(15)) +#define CONFIG0_FLOW_CTL (BIT(14) | BIT(15)) + +#define CONFIG0_MAXLEN_SHIFT 8 +#define CONFIG0_MAXLEN_MASK (7 << CONFIG0_MAXLEN_SHIFT) +#define CONFIG0_MAXLEN_1536 0 +#define CONFIG0_MAXLEN_1518 1 +#define CONFIG0_MAXLEN_1522 2 +#define CONFIG0_MAXLEN_1542 3 +#define CONFIG0_MAXLEN_9k 4 /* 9212 */ +#define CONFIG0_MAXLEN_10k 5 /* 10236 */ +#define CONFIG0_MAXLEN_1518__6 6 +#define CONFIG0_MAXLEN_1518__7 7 + +/* GMAC Configuration 1 + * GMAC0 Offset 0xA01C + * GMAC1 Offset 0xE01C + */ +union gmac_config1 { + unsigned int bits32; + struct bit1_001c { + /* Flow control set threshold */ + unsigned int set_threshold:8; + /* Flow control release threshold */ + unsigned int rel_threshold:8; + unsigned int reserved:16; + } bits; +}; + +#define GMAC_FLOWCTRL_SET_MAX 32 +#define GMAC_FLOWCTRL_SET_MIN 0 +#define GMAC_FLOWCTRL_RELEASE_MAX 32 +#define GMAC_FLOWCTRL_RELEASE_MIN 0 + +/* GMAC Configuration 2 + * GMAC0 Offset 0xA020 + * GMAC1 Offset 0xE020 + */ +union gmac_config2 { + unsigned int bits32; + struct bit1_0020 { + /* Flow control set threshold */ + unsigned int set_threshold:16; + /* Flow control release threshold */ + unsigned int rel_threshold:16; + } bits; +}; + +/* GMAC Configuration 3 + * GMAC0 Offset 0xA024 + * GMAC1 Offset 0xE024 + */ +union gmac_config3 { + unsigned int bits32; + struct bit1_0024 { + /* Flow control set threshold */ + unsigned int set_threshold:16; + /* Flow control release threshold */ + unsigned int rel_threshold:16; + } bits; +}; + +/* GMAC STATUS + * GMAC0 Offset 0xA02C + * GMAC1 Offset 0xE02C + */ +union gmac_status { + unsigned int bits32; + struct bit1_002c { + /* Link status */ + unsigned int link:1; + /* Link speed(00->2.5M 01->25M 10->125M) */ + unsigned int speed:2; + /* Duplex mode */ + unsigned int duplex:1; + unsigned int reserved_1:1; + /* PHY interface type */ + unsigned int mii_rmii:2; + unsigned int reserved_2:25; + } bits; +}; + +#define GMAC_SPEED_10 0 +#define GMAC_SPEED_100 1 +#define GMAC_SPEED_1000 2 + +#define GMAC_PHY_MII 0 +#define GMAC_PHY_GMII 1 +#define GMAC_PHY_RGMII_100_10 2 +#define GMAC_PHY_RGMII_1000 3 + +/* Queue Header + * (1) TOE Queue Header + * (2) Non-TOE Queue Header + * (3) Interrupt Queue Header + * + * memory Layout + * TOE Queue Header + * 0x60003000 +---------------------------+ 0x0000 + * | TOE Queue 0 Header | + * | 8 * 4 Bytes | + * +---------------------------+ 0x0020 + * | TOE Queue 1 Header | + * | 8 * 4 Bytes | + * +---------------------------+ 0x0040 + * | ...... | + * | | + * +---------------------------+ + * + * Non TOE Queue Header + * 0x60002000 +---------------------------+ 0x0000 + * | Default Queue 0 Header | + * | 2 * 4 Bytes | + * +---------------------------+ 0x0008 + * | Default Queue 1 Header | + * | 2 * 4 Bytes | + * +---------------------------+ 0x0010 + * | Classification Queue 0 | + * | 2 * 4 Bytes | + * +---------------------------+ + * | Classification Queue 1 | + * | 2 * 4 Bytes | + * +---------------------------+ (n * 8 + 0x10) + * | ... | + * | 2 * 4 Bytes | + * +---------------------------+ (13 * 8 + 0x10) + * | Classification Queue 13 | + * | 2 * 4 Bytes | + * +---------------------------+ 0x80 + * | Interrupt Queue 0 | + * | 2 * 4 Bytes | + * +---------------------------+ + * | Interrupt Queue 1 | + * | 2 * 4 Bytes | + * +---------------------------+ + * | Interrupt Queue 2 | + * | 2 * 4 Bytes | + * +---------------------------+ + * | Interrupt Queue 3 | + * | 2 * 4 Bytes | + * +---------------------------+ + * + */ +#define TOE_QUEUE_HDR_ADDR(n) (TOE_TOE_QUE_HDR_BASE + n * 32) +#define TOE_Q_HDR_AREA_END (TOE_QUEUE_HDR_ADDR(TOE_TOE_QUEUE_MAX + 1)) +#define TOE_DEFAULT_Q_HDR_BASE(x) (TOE_NONTOE_QUE_HDR_BASE + 0x08 * (x)) +#define TOE_CLASS_Q_HDR_BASE (TOE_NONTOE_QUE_HDR_BASE + 0x10) +#define TOE_INTR_Q_HDR_BASE (TOE_NONTOE_QUE_HDR_BASE + 0x80) +#define INTERRUPT_QUEUE_HDR_ADDR(n) (TOE_INTR_Q_HDR_BASE + n * 8) +#define NONTOE_Q_HDR_AREA_END (INTERRUPT_QUEUE_HDR_ADDR(TOE_INTR_QUEUE_MAX + 1)) + +/* NONTOE Queue Header Word 0 */ +union nontoe_qhdr0 { + unsigned int bits32; + unsigned int base_size; +}; + +#define NONTOE_QHDR0_BASE_MASK (~0x0f) + +/* NONTOE Queue Header Word 1 */ +union nontoe_qhdr1 { + unsigned int bits32; + struct bit_nonqhdr1 { + /* bit 15:0 */ + unsigned int rptr:16; + /* bit 31:16 */ + unsigned int wptr:16; + } bits; +}; + +/* Non-TOE Queue Header */ +struct nontoe_qhdr { + union nontoe_qhdr0 word0; + union nontoe_qhdr1 word1; +}; + +#endif /* _GEMINI_ETHERNET_H */ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 02dd5246dfae..1a49297224ed 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -103,7 +103,7 @@ static struct be_cmd_priv_map cmd_priv_map[] = { static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem) { int i; - int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map); + int num_entries = ARRAY_SIZE(cmd_priv_map); u32 cmd_privileges = adapter->cmd_privileges; for (i = 0; i < num_entries; i++) diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index e180657a02ef..c36c81959198 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4634,8 +4634,7 @@ int be_update_queues(struct be_adapter *adapter) be_schedule_worker(adapter); - /* - * The IF was destroyed and re-created. We need to clear + /* The IF was destroyed and re-created. We need to clear * all promiscuous flags valid for the destroyed IF. * Without this promisc mode is not restored during * be_open() because the driver thinks that it is diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 5385074b3b7d..e7381f8ef89d 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -20,7 +20,8 @@ #include <linux/timecounter.h> #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) + defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ + defined(CONFIG_ARM64) /* * Just figures, Motorola would have to change the offsets for * registers in the same peripheral device on different models @@ -195,7 +196,7 @@ * Evidently, ARM SoCs have the FEC block generated in a * little endian mode so adjust endianness accordingly. */ -#if defined(CONFIG_ARM) +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) #define fec32_to_cpu le32_to_cpu #define fec16_to_cpu le16_to_cpu #define cpu_to_fec32 cpu_to_le32 diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index a74300a4459c..7a7f3a42b2aa 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -195,7 +195,8 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); * account when setting it. */ #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) + defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ + defined(CONFIG_ARM64) #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) #else #define OPT_FRAME_SIZE 0 @@ -1868,6 +1869,8 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) ret = clk_prepare_enable(fep->clk_ref); if (ret) goto failed_clk_ref; + + phy_reset_after_clk_enable(ndev->phydev); } else { clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_enet_out); @@ -2107,7 +2110,8 @@ static int fec_enet_get_regs_len(struct net_device *ndev) /* List of registers that can be safety be read to dump them with ethtool */ #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) + defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ + defined(CONFIG_ARM64) static u32 fec_enet_register_offset[] = { FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, @@ -2840,6 +2844,7 @@ fec_enet_open(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); int ret; + bool reset_again; ret = pm_runtime_get_sync(&fep->pdev->dev); if (ret < 0) @@ -2850,6 +2855,17 @@ fec_enet_open(struct net_device *ndev) if (ret) goto clk_enable; + /* During the first fec_enet_open call the PHY isn't probed at this + * point. Therefore the phy_reset_after_clk_enable() call within + * fec_enet_clk_enable() fails. As we need this reset in order to be + * sure the PHY is working correctly we check if we need to reset again + * later when the PHY is probed + */ + if (ndev->phydev && ndev->phydev->drv) + reset_again = false; + else + reset_again = true; + /* I should reset the ring buffers here, but I don't yet know * a simple way to do that. */ @@ -2866,6 +2882,12 @@ fec_enet_open(struct net_device *ndev) if (ret) goto err_enet_mii_probe; + /* Call phy_reset_after_clk_enable() again if it failed during + * phy_reset_after_clk_enable() before because the PHY wasn't probed. + */ + if (reset_again) + phy_reset_after_clk_enable(ndev->phydev); + if (fep->quirks & FEC_QUIRK_ERR006687) imx6q_cpuidle_fec_irqs_used(); @@ -3119,7 +3141,7 @@ static int fec_enet_init(struct net_device *ndev) unsigned dsize_log2 = __fls(dsize); WARN_ON(dsize != (1 << dsize_log2)); -#if defined(CONFIG_ARM) +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) fep->rx_align = 0xf; fep->tx_align = 0xf; #else diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 7f837006bb6a..3bdeb295514b 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -2932,7 +2932,7 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id) static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, struct sk_buff *skb, bool first) { - unsigned int size = lstatus & BD_LENGTH_MASK; + int size = lstatus & BD_LENGTH_MASK; struct page *page = rxb->page; bool last = !!(lstatus & BD_LFLAG(RXBD_LAST)); @@ -2947,11 +2947,16 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, if (last) size -= skb->len; - /* in case the last fragment consisted only of the FCS */ + /* Add the last fragment if it contains something other than + * the FCS, otherwise drop it and trim off any part of the FCS + * that was already received. + */ if (size > 0) skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, rxb->page_offset + RXBUF_ALIGNMENT, size, GFAR_RXB_TRUESIZE); + else if (size < 0) + pskb_trim(skb, skb->len + size); } /* try reuse page */ diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig index 30000b6aa7b8..8bcf470ff5f3 100644 --- a/drivers/net/ethernet/hisilicon/Kconfig +++ b/drivers/net/ethernet/hisilicon/Kconfig @@ -94,15 +94,6 @@ config HNS3_HCLGE compatibility layer. The engine would be used in Hisilicon hip08 family of SoCs and further upcoming SoCs. -config HNS3_ENET - tristate "Hisilicon HNS3 Ethernet Device Support" - depends on 64BIT && PCI - depends on HNS3 && HNS3_HCLGE - ---help--- - This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08 - family of SoCs. This module depends upon HNAE3 driver to access the HNAE3 - devices and their associated operations. - config HNS3_DCB bool "Hisilicon HNS3 Data Center Bridge Support" default n @@ -112,4 +103,23 @@ config HNS3_DCB If unsure, say N. +config HNS3_HCLGEVF + tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support" + depends on PCI_MSI + depends on HNS3 + depends on HNS3_HCLGE + ---help--- + This selects the HNS3 VF drivers network acceleration engine & its hardware + compatibility layer. The engine would be used in Hisilicon hip08 family of + SoCs and further upcoming SoCs. + +config HNS3_ENET + tristate "Hisilicon HNS3 Ethernet Device Support" + depends on 64BIT && PCI + depends on HNS3 + ---help--- + This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08 + family of SoCs. This module depends upon HNAE3 driver to access the HNAE3 + devices and their associated operations. + endif # NET_VENDOR_HISILICON diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 8b5cdf490850..cac86e9ae0dd 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -1168,7 +1168,7 @@ void hns_set_led_opt(struct hns_mac_cb *mac_cb) int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb, enum hnae_led_state status) { - if (!mac_cb || !mac_cb->cpld_ctrl) + if (!mac_cb) return 0; return mac_cb->dsaf_dev->misc_op->cpld_set_led_id(mac_cb, status); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index 408b63faf9a8..acf29633ec79 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -18,6 +18,7 @@ enum _dsm_op_index { HNS_OP_LED_SET_FUNC = 0x3, HNS_OP_GET_PORT_TYPE_FUNC = 0x4, HNS_OP_GET_SFP_STAT_FUNC = 0x5, + HNS_OP_LOCATE_LED_SET_FUNC = 0x6, }; enum _dsm_rst_type { @@ -43,12 +44,17 @@ static void dsaf_write_sub(struct dsaf_device *dsaf_dev, u32 reg, u32 val) static u32 dsaf_read_sub(struct dsaf_device *dsaf_dev, u32 reg) { - u32 ret; - - if (dsaf_dev->sub_ctrl) - ret = dsaf_read_syscon(dsaf_dev->sub_ctrl, reg); - else + u32 ret = 0; + int err; + + if (dsaf_dev->sub_ctrl) { + err = dsaf_read_syscon(dsaf_dev->sub_ctrl, reg, &ret); + if (err) + dev_err(dsaf_dev->dev, "dsaf_read_syscon error %d!\n", + err); + } else { ret = dsaf_read_reg(dsaf_dev->sc_base, reg); + } return ret; } @@ -81,6 +87,33 @@ static void hns_dsaf_acpi_ledctrl_by_port(struct hns_mac_cb *mac_cb, u8 op_type, ACPI_FREE(obj); } +static void hns_dsaf_acpi_locate_ledctrl_by_port(struct hns_mac_cb *mac_cb, + u8 op_type, u32 locate, + u32 port) +{ + union acpi_object obj_args[2], argv4; + union acpi_object *obj; + + obj_args[0].integer.type = ACPI_TYPE_INTEGER; + obj_args[0].integer.value = locate; + obj_args[1].integer.type = ACPI_TYPE_INTEGER; + obj_args[1].integer.value = port; + + argv4.type = ACPI_TYPE_PACKAGE; + argv4.package.count = 2; + argv4.package.elements = obj_args; + + obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev), + &hns_dsaf_acpi_dsm_guid, 0, op_type, &argv4); + if (!obj) { + dev_err(mac_cb->dev, "ledctrl fail, locate:%d port:%d!\n", + locate, port); + return; + } + + ACPI_FREE(obj); +} + static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status, u16 speed, int data) { @@ -160,15 +193,23 @@ static void cpld_led_reset_acpi(struct hns_mac_cb *mac_cb) static int cpld_set_led_id(struct hns_mac_cb *mac_cb, enum hnae_led_state status) { + u32 val = 0; + int ret; + + if (!mac_cb->cpld_ctrl) + return 0; + switch (status) { case HNAE_LED_ACTIVE: - mac_cb->cpld_led_value = - dsaf_read_syscon(mac_cb->cpld_ctrl, - mac_cb->cpld_ctrl_reg); - dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B, - CPLD_LED_ON_VALUE); + ret = dsaf_read_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg, + &val); + if (ret) + return ret; + + dsaf_set_bit(val, DSAF_LED_ANCHOR_B, CPLD_LED_ON_VALUE); dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg, - mac_cb->cpld_led_value); + val); + mac_cb->cpld_led_value = val; break; case HNAE_LED_INACTIVE: dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B, @@ -184,6 +225,30 @@ static int cpld_set_led_id(struct hns_mac_cb *mac_cb, return 0; } +static int cpld_set_led_id_acpi(struct hns_mac_cb *mac_cb, + enum hnae_led_state status) +{ + switch (status) { + case HNAE_LED_ACTIVE: + hns_dsaf_acpi_locate_ledctrl_by_port(mac_cb, + HNS_OP_LOCATE_LED_SET_FUNC, + CPLD_LED_ON_VALUE, + mac_cb->mac_id); + break; + case HNAE_LED_INACTIVE: + hns_dsaf_acpi_locate_ledctrl_by_port(mac_cb, + HNS_OP_LOCATE_LED_SET_FUNC, + CPLD_LED_DEFAULT_VALUE, + mac_cb->mac_id); + break; + default: + dev_err(mac_cb->dev, "invalid led state: %d!", status); + return -EINVAL; + } + + return 0; +} + #define RESET_REQ_OR_DREQ 1 static void hns_dsaf_acpi_srst_by_port(struct dsaf_device *dsaf_dev, u8 op_type, @@ -505,12 +570,19 @@ static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb) int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt) { + u32 val = 0; + int ret; + if (!mac_cb->cpld_ctrl) return -ENODEV; - *sfp_prsnt = !dsaf_read_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg - + MAC_SFP_PORT_OFFSET); + ret = dsaf_read_syscon(mac_cb->cpld_ctrl, + mac_cb->cpld_ctrl_reg + MAC_SFP_PORT_OFFSET, + &val); + if (ret) + return ret; + *sfp_prsnt = !val; return 0; } @@ -560,7 +632,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en) #define RX_CSR(lane, reg) ((0x4080 + (reg) * 0x0002 + (lane) * 0x0200) * 2) u64 reg_offset = RX_CSR(lane_id[mac_cb->mac_id], 0); - int sfp_prsnt; + int sfp_prsnt = 0; int ret = hns_mac_get_sfp_prsnt(mac_cb, &sfp_prsnt); if (!mac_cb->phy_dev) { @@ -572,7 +644,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en) } if (mac_cb->serdes_ctrl) { - u32 origin; + u32 origin = 0; if (!AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver)) { #define HILINK_ACCESS_SEL_CFG 0x40008 @@ -589,7 +661,10 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en) HILINK_ACCESS_SEL_CFG, 3); } - origin = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset); + ret = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset, + &origin); + if (ret) + return ret; dsaf_set_field(origin, 1ull << 10, 10, en); dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin); @@ -660,7 +735,7 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev) } else if (is_acpi_node(dsaf_dev->dev->fwnode)) { misc_op->cpld_set_led = hns_cpld_set_led_acpi; misc_op->cpld_reset_led = cpld_led_reset_acpi; - misc_op->cpld_set_led_id = cpld_set_led_id; + misc_op->cpld_set_led_id = cpld_set_led_id_acpi; misc_op->dsaf_reset = hns_dsaf_rst_acpi; misc_op->xge_srst = hns_dsaf_xge_srst_by_port_acpi; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index 46a52d9bb196..886cbbf25761 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -1034,12 +1034,9 @@ static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value) regmap_write(base, reg, value); } -static inline u32 dsaf_read_syscon(struct regmap *base, u32 reg) +static inline int dsaf_read_syscon(struct regmap *base, u32 reg, u32 *val) { - unsigned int val; - - regmap_read(base, reg, &val); - return val; + return regmap_read(base, reg, val); } #define dsaf_read_dev(a, reg) \ diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile index a9349e1f3e51..002534f12b66 100644 --- a/drivers/net/ethernet/hisilicon/hns3/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/Makefile @@ -1,7 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0+ # # Makefile for the HISILICON network device drivers. # obj-$(CONFIG_HNS3) += hns3pf/ +obj-$(CONFIG_HNS3) += hns3vf/ obj-$(CONFIG_HNS3) += hnae3.o + +obj-$(CONFIG_HNS3_ENET) += hns3.o +hns3-objs = hns3_enet.o hns3_ethtool.o + +hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h new file mode 100644 index 000000000000..3e9203ea42a6 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2017 Hisilicon Limited. */ + +#ifndef __HCLGE_MBX_H +#define __HCLGE_MBX_H +#include <linux/init.h> +#include <linux/mutex.h> +#include <linux/types.h> + +#define HCLGE_MBX_VF_MSG_DATA_NUM 16 + +enum HCLGE_MBX_OPCODE { + HCLGE_MBX_RESET = 0x01, /* (VF -> PF) assert reset */ + HCLGE_MBX_SET_UNICAST, /* (VF -> PF) set UC addr */ + HCLGE_MBX_SET_MULTICAST, /* (VF -> PF) set MC addr */ + HCLGE_MBX_SET_VLAN, /* (VF -> PF) set VLAN */ + HCLGE_MBX_MAP_RING_TO_VECTOR, /* (VF -> PF) map ring-to-vector */ + HCLGE_MBX_UNMAP_RING_TO_VECTOR, /* (VF -> PF) unamp ring-to-vector */ + HCLGE_MBX_SET_PROMISC_MODE, /* (VF -> PF) set promiscuous mode */ + HCLGE_MBX_SET_MACVLAN, /* (VF -> PF) set unicast filter */ + HCLGE_MBX_API_NEGOTIATE, /* (VF -> PF) negotiate API version */ + HCLGE_MBX_GET_QINFO, /* (VF -> PF) get queue config */ + HCLGE_MBX_GET_TCINFO, /* (VF -> PF) get TC config */ + HCLGE_MBX_GET_RETA, /* (VF -> PF) get RETA */ + HCLGE_MBX_GET_RSS_KEY, /* (VF -> PF) get RSS key */ + HCLGE_MBX_GET_MAC_ADDR, /* (VF -> PF) get MAC addr */ + HCLGE_MBX_PF_VF_RESP, /* (PF -> VF) generate respone to VF */ + HCLGE_MBX_GET_BDNUM, /* (VF -> PF) get BD num */ + HCLGE_MBX_GET_BUFSIZE, /* (VF -> PF) get buffer size */ + HCLGE_MBX_GET_STREAMID, /* (VF -> PF) get stream id */ + HCLGE_MBX_SET_AESTART, /* (VF -> PF) start ae */ + HCLGE_MBX_SET_TSOSTATS, /* (VF -> PF) get tso stats */ + HCLGE_MBX_LINK_STAT_CHANGE, /* (PF -> VF) link status has changed */ + HCLGE_MBX_GET_BASE_CONFIG, /* (VF -> PF) get config */ + HCLGE_MBX_BIND_FUNC_QUEUE, /* (VF -> PF) bind function and queue */ + HCLGE_MBX_GET_LINK_STATUS, /* (VF -> PF) get link status */ + HCLGE_MBX_QUEUE_RESET, /* (VF -> PF) reset queue */ +}; + +/* below are per-VF mac-vlan subcodes */ +enum hclge_mbx_mac_vlan_subcode { + HCLGE_MBX_MAC_VLAN_UC_MODIFY = 0, /* modify UC mac addr */ + HCLGE_MBX_MAC_VLAN_UC_ADD, /* add a new UC mac addr */ + HCLGE_MBX_MAC_VLAN_UC_REMOVE, /* remove a new UC mac addr */ + HCLGE_MBX_MAC_VLAN_MC_MODIFY, /* modify MC mac addr */ + HCLGE_MBX_MAC_VLAN_MC_ADD, /* add new MC mac addr */ + HCLGE_MBX_MAC_VLAN_MC_REMOVE, /* remove MC mac addr */ + HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, /* config func MTA enable */ +}; + +/* below are per-VF vlan cfg subcodes */ +enum hclge_mbx_vlan_cfg_subcode { + HCLGE_MBX_VLAN_FILTER = 0, /* set vlan filter */ + HCLGE_MBX_VLAN_TX_OFF_CFG, /* set tx side vlan offload */ + HCLGE_MBX_VLAN_RX_OFF_CFG, /* set rx side vlan offload */ +}; + +#define HCLGE_MBX_MAX_MSG_SIZE 16 +#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8 + +struct hclgevf_mbx_resp_status { + struct mutex mbx_mutex; /* protects against contending sync cmd resp */ + u32 origin_mbx_msg; + bool received_resp; + int resp_status; + u8 additional_info[HCLGE_MBX_MAX_RESP_DATA_SIZE]; +}; + +struct hclge_mbx_vf_to_pf_cmd { + u8 rsv; + u8 mbx_src_vfid; /* Auto filled by IMP */ + u8 rsv1[2]; + u8 msg_len; + u8 rsv2[3]; + u8 msg[HCLGE_MBX_MAX_MSG_SIZE]; +}; + +struct hclge_mbx_pf_to_vf_cmd { + u8 dest_vfid; + u8 rsv[3]; + u8 msg_len; + u8 rsv1[3]; + u16 msg[8]; +}; + +#define hclge_mbx_ring_ptr_move_crq(crq) \ + (crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num) +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 5bcb2238acb2..02145f2de820 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -196,9 +196,18 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) const struct pci_device_id *id; struct hnae3_ae_algo *ae_algo; struct hnae3_client *client; - int ret = 0; + int ret = 0, lock_acquired; + + /* we can get deadlocked if SRIOV is being enabled in context to probe + * and probe gets called again in same context. This can happen when + * pci_enable_sriov() is called to create VFs from PF probes context. + * Therefore, for simplicity uniformly defering further probing in all + * cases where we detect contention. + */ + lock_acquired = mutex_trylock(&hnae3_common_lock); + if (!lock_acquired) + return -EPROBE_DEFER; - mutex_lock(&hnae3_common_lock); list_add_tail(&ae_dev->node, &hnae3_ae_dev_list); /* Check if there are matched ae_algo */ @@ -211,6 +220,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) if (!ae_dev->ops) { dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n"); + ret = -EOPNOTSUPP; goto out_err; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 67c59e1039f2..fd06bc78c58e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -133,11 +133,16 @@ struct hnae3_vector_info { #define HNAE3_RING_TYPE_B 0 #define HNAE3_RING_TYPE_TX 0 #define HNAE3_RING_TYPE_RX 1 +#define HNAE3_RING_GL_IDX_S 0 +#define HNAE3_RING_GL_IDX_M GENMASK(1, 0) +#define HNAE3_RING_GL_RX 0 +#define HNAE3_RING_GL_TX 1 struct hnae3_ring_chain_node { struct hnae3_ring_chain_node *next; u32 tqp_index; u32 flag; + u32 int_gl_idx; }; #define HNAE3_IS_TX_RING(node) \ @@ -274,10 +279,14 @@ struct hnae3_ae_dev { * Get firmware version * get_mdix_mode() * Get media typr of phy + * enable_vlan_filter() + * Enable vlan filter * set_vlan_filter() * Set vlan filter config of Ports * set_vf_vlan_filter() * Set vlan filter config of vf + * enable_hw_strip_rxvtag() + * Enable/disable hardware strip vlan tag of packets received */ struct hnae3_ae_ops { int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev); @@ -347,7 +356,8 @@ struct hnae3_ae_ops { u32 stringset, u8 *data); int (*get_sset_count)(struct hnae3_handle *handle, int stringset); - void (*get_regs)(struct hnae3_handle *handle, void *data); + void (*get_regs)(struct hnae3_handle *handle, u32 *version, + void *data); int (*get_regs_len)(struct hnae3_handle *handle); u32 (*get_rss_key_size)(struct hnae3_handle *handle); @@ -380,12 +390,23 @@ struct hnae3_ae_ops { void (*get_mdix_mode)(struct hnae3_handle *handle, u8 *tp_mdix_ctrl, u8 *tp_mdix); + void (*enable_vlan_filter)(struct hnae3_handle *handle, bool enable); int (*set_vlan_filter)(struct hnae3_handle *handle, __be16 proto, u16 vlan_id, bool is_kill); int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid, u16 vlan, u8 qos, __be16 proto); + int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable); void (*reset_event)(struct hnae3_handle *handle, enum hnae3_reset_type reset); + void (*get_channels)(struct hnae3_handle *handle, + struct ethtool_channels *ch); + void (*get_tqps_and_rss_info)(struct hnae3_handle *h, + u16 *free_tqps, u16 *max_rss_size); + int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num); + void (*get_flowctrl_adv)(struct hnae3_handle *handle, + u32 *flowctrl_adv); + int (*set_led_id)(struct hnae3_handle *handle, + enum ethtool_phys_id_state status); }; struct hnae3_dcb_ops { @@ -435,6 +456,8 @@ struct hnae3_knic_private_info { u16 num_tqps; /* total number of TQPs in this handle */ struct hnae3_queue **tqp; /* array base of all TQPs in this instance */ const struct hnae3_dcb_ops *dcb_ops; + + u16 int_rl_setting; }; struct hnae3_roce_private_info { @@ -452,9 +475,10 @@ struct hnae3_unic_private_info { struct hnae3_queue **tqp; /* array base of all TQPs of this instance */ }; -#define HNAE3_SUPPORT_MAC_LOOPBACK 1 -#define HNAE3_SUPPORT_PHY_LOOPBACK 2 -#define HNAE3_SUPPORT_SERDES_LOOPBACK 4 +#define HNAE3_SUPPORT_MAC_LOOPBACK BIT(0) +#define HNAE3_SUPPORT_PHY_LOOPBACK BIT(1) +#define HNAE3_SUPPORT_SERDES_LOOPBACK BIT(2) +#define HNAE3_SUPPORT_VF BIT(3) struct hnae3_handle { struct hnae3_client *client; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c index 925619a7c50a..eb82700da7d0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c @@ -93,7 +93,7 @@ void hns3_dcbnl_setup(struct hnae3_handle *handle) { struct net_device *dev = handle->kinfo.netdev; - if (!handle->kinfo.dcb_ops) + if ((!handle->kinfo.dcb_ops) || (handle->flags & HNAE3_SUPPORT_VF)) return; dev->dcbnl_ops = &hns3_dcbnl_ops; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 59415090ff0f..601b6295d3f8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -52,6 +52,8 @@ static const struct pci_device_id hns3_pci_tbl[] = { HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, /* required last entry */ {0, } }; @@ -156,43 +158,68 @@ static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector) napi_disable(&tqp_vector->napi); } -static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector *tqp_vector, - u32 gl_value) +void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, + u32 rl_value) { - /* this defines the configuration for GL (Interrupt Gap Limiter) - * GL defines inter interrupt gap. - * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing - */ - writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); - writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); - writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL2_OFFSET); -} + u32 rl_reg = hns3_rl_usec_to_reg(rl_value); -static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector *tqp_vector, - u32 rl_value) -{ /* this defines the configuration for RL (Interrupt Rate Limiter). * Rl defines rate of interrupts i.e. number of interrupts-per-second * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing */ - writel(rl_value, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET); + + if (rl_reg > 0 && !tqp_vector->tx_group.gl_adapt_enable && + !tqp_vector->rx_group.gl_adapt_enable) + /* According to the hardware, the range of rl_reg is + * 0-59 and the unit is 4. + */ + rl_reg |= HNS3_INT_RL_ENABLE_MASK; + + writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET); +} + +void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector, + u32 gl_value) +{ + u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value); + + writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); +} + +void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, + u32 gl_value) +{ + u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value); + + writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); } -static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector) +static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector, + struct hns3_nic_priv *priv) { + struct hnae3_handle *h = priv->ae_handle; + /* initialize the configuration for interrupt coalescing. * 1. GL (Interrupt Gap Limiter) * 2. RL (Interrupt Rate Limiter) */ - /* Default :enable interrupt coalesce */ - tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K; + /* Default: enable interrupt coalescing self-adaptive and GL */ + tqp_vector->tx_group.gl_adapt_enable = 1; + tqp_vector->rx_group.gl_adapt_enable = 1; + tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K; - hns3_set_vector_coalesc_gl(tqp_vector, HNS3_INT_GL_50K); - /* for now we are disabling Interrupt RL - we - * will re-enable later - */ - hns3_set_vector_coalesc_rl(tqp_vector, 0); + tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K; + + hns3_set_vector_coalesce_tx_gl(tqp_vector, + tqp_vector->tx_group.int_gl); + hns3_set_vector_coalesce_rx_gl(tqp_vector, + tqp_vector->rx_group.int_gl); + + /* Default: disable RL */ + h->kinfo.int_rl_setting = 0; + hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting); + tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW; tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW; } @@ -245,6 +272,8 @@ static int hns3_nic_net_up(struct net_device *netdev) if (ret) goto out_start_err; + clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); + return 0; out_start_err: @@ -284,6 +313,9 @@ static void hns3_nic_net_down(struct net_device *netdev) const struct hnae3_ae_ops *ops; int i; + if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) + return; + /* stop ae_dev */ ops = priv->ae_handle->ae_algo->ops; if (ops->stop) @@ -721,6 +753,58 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); } +static int hns3_fill_desc_vtags(struct sk_buff *skb, + struct hns3_enet_ring *tx_ring, + u32 *inner_vlan_flag, + u32 *out_vlan_flag, + u16 *inner_vtag, + u16 *out_vtag) +{ +#define HNS3_TX_VLAN_PRIO_SHIFT 13 + + if (skb->protocol == htons(ETH_P_8021Q) && + !(tx_ring->tqp->handle->kinfo.netdev->features & + NETIF_F_HW_VLAN_CTAG_TX)) { + /* When HW VLAN acceleration is turned off, and the stack + * sets the protocol to 802.1q, the driver just need to + * set the protocol to the encapsulated ethertype. + */ + skb->protocol = vlan_get_protocol(skb); + return 0; + } + + if (skb_vlan_tag_present(skb)) { + u16 vlan_tag; + + vlan_tag = skb_vlan_tag_get(skb); + vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT; + + /* Based on hw strategy, use out_vtag in two layer tag case, + * and use inner_vtag in one tag case. + */ + if (skb->protocol == htons(ETH_P_8021Q)) { + hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1); + *out_vtag = vlan_tag; + } else { + hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1); + *inner_vtag = vlan_tag; + } + } else if (skb->protocol == htons(ETH_P_8021Q)) { + struct vlan_ethhdr *vhdr; + int rc; + + rc = skb_cow_head(skb, 0); + if (rc < 0) + return rc; + vhdr = (struct vlan_ethhdr *)skb->data; + vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7) + << HNS3_TX_VLAN_PRIO_SHIFT); + } + + skb->protocol = vlan_get_protocol(skb); + return 0; +} + static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, int size, dma_addr_t dma, int frag_end, enum hns_desc_type type) @@ -731,6 +815,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, u16 bdtp_fe_sc_vld_ra_ri = 0; u32 type_cs_vlan_tso = 0; struct sk_buff *skb; + u16 inner_vtag = 0; + u16 out_vtag = 0; u32 paylen = 0; u16 mss = 0; __be16 protocol; @@ -754,15 +840,16 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, skb = (struct sk_buff *)priv; paylen = skb->len; + ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso, + &ol_type_vlan_len_msec, + &inner_vtag, &out_vtag); + if (unlikely(ret)) + return ret; + if (skb->ip_summed == CHECKSUM_PARTIAL) { skb_reset_mac_len(skb); protocol = skb->protocol; - /* vlan packet*/ - if (protocol == htons(ETH_P_8021Q)) { - protocol = vlan_get_protocol(skb); - skb->protocol = protocol; - } ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); if (ret) return ret; @@ -788,6 +875,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, cpu_to_le32(type_cs_vlan_tso); desc->tx.paylen = cpu_to_le32(paylen); desc->tx.mss = cpu_to_le16(mss); + desc->tx.vlan_tag = cpu_to_le16(inner_vtag); + desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); } /* move ring pointer to next.*/ @@ -1029,25 +1118,50 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) static int hns3_nic_set_features(struct net_device *netdev, netdev_features_t features) { + netdev_features_t changed = netdev->features ^ features; struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + int ret; - if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { - priv->ops.fill_desc = hns3_fill_desc_tso; - priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; - } else { - priv->ops.fill_desc = hns3_fill_desc; - priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; + if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) { + if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { + priv->ops.fill_desc = hns3_fill_desc_tso; + priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; + } else { + priv->ops.fill_desc = hns3_fill_desc; + priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; + } + } + + if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) && + h->ae_algo->ops->enable_vlan_filter) { + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) + h->ae_algo->ops->enable_vlan_filter(h, true); + else + h->ae_algo->ops->enable_vlan_filter(h, false); + } + + if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && + h->ae_algo->ops->enable_hw_strip_rxvtag) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true); + else + ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false); + + if (ret) + return ret; } netdev->features = features; return 0; } -static void -hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +static void hns3_nic_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct hns3_nic_priv *priv = netdev_priv(netdev); int queue_num = priv->ae_handle->kinfo.num_tqps; + struct hnae3_handle *handle = priv->ae_handle; struct hns3_enet_ring *ring; unsigned int start; unsigned int idx; @@ -1055,6 +1169,13 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) u64 rx_bytes = 0; u64 tx_pkts = 0; u64 rx_pkts = 0; + u64 tx_drop = 0; + u64 rx_drop = 0; + + if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) + return; + + handle->ae_algo->ops->update_stats(handle, &netdev->stats); for (idx = 0; idx < queue_num; idx++) { /* fetch the tx stats */ @@ -1063,6 +1184,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) start = u64_stats_fetch_begin_irq(&ring->syncp); tx_bytes += ring->stats.tx_bytes; tx_pkts += ring->stats.tx_pkts; + tx_drop += ring->stats.tx_busy; + tx_drop += ring->stats.sw_err_cnt; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); /* fetch the rx stats */ @@ -1071,6 +1194,9 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) start = u64_stats_fetch_begin_irq(&ring->syncp); rx_bytes += ring->stats.rx_bytes; rx_pkts += ring->stats.rx_pkts; + rx_drop += ring->stats.non_vld_descs; + rx_drop += ring->stats.err_pkt_len; + rx_drop += ring->stats.l2_err; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); } @@ -1086,8 +1212,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) stats->rx_missed_errors = netdev->stats.rx_missed_errors; stats->tx_errors = netdev->stats.tx_errors; - stats->rx_dropped = netdev->stats.rx_dropped; - stats->tx_dropped = netdev->stats.tx_dropped; + stats->rx_dropped = rx_drop + netdev->stats.rx_dropped; + stats->tx_dropped = tx_drop + netdev->stats.tx_dropped; stats->collisions = netdev->stats.collisions; stats->rx_over_errors = netdev->stats.rx_over_errors; stats->rx_frame_errors = netdev->stats.rx_frame_errors; @@ -1317,6 +1443,8 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) return ret; } + netdev->mtu = new_mtu; + /* if the netdev was running earlier, bring it up again */ if (if_running && hns3_nic_net_open(netdev)) ret = -EINVAL; @@ -1476,6 +1604,8 @@ static struct pci_driver hns3_driver = { /* set default feature to hns3 */ static void hns3_set_default_feature(struct net_device *netdev) { + struct hnae3_handle *h = hns3_get_handle(netdev); + netdev->priv_flags |= IFF_UNICAST_FLT; netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | @@ -1490,6 +1620,7 @@ static void hns3_set_default_feature(struct net_device *netdev) netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | @@ -1503,11 +1634,15 @@ static void hns3_set_default_feature(struct net_device *netdev) NETIF_F_GSO_UDP_TUNNEL_CSUM; netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM; + + if (!(h->flags & HNAE3_SUPPORT_VF)) + netdev->hw_features |= + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX; } static int hns3_alloc_buffer(struct hns3_enet_ring *ring, @@ -2083,6 +2218,22 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, prefetchw(skb->data); + /* Based on hw strategy, the tag offloaded will be stored at + * ot_vlan_tag in two layer tag case, and stored at vlan_tag + * in one layer tag case. + */ + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { + u16 vlan_tag; + + vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); + if (!(vlan_tag & VLAN_VID_MASK)) + vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + if (vlan_tag & VLAN_VID_MASK) + __vlan_hwaccel_put_tag(skb, + htons(ETH_P_8021Q), + vlan_tag); + } + bnum = 1; if (length <= HNS3_RX_HEAD_SIZE) { memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); @@ -2301,25 +2452,22 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) { - u16 rx_int_gl, tx_int_gl; - bool rx, tx; - - rx = hns3_get_new_int_gl(&tqp_vector->rx_group); - tx = hns3_get_new_int_gl(&tqp_vector->tx_group); - rx_int_gl = tqp_vector->rx_group.int_gl; - tx_int_gl = tqp_vector->tx_group.int_gl; - if (rx && tx) { - if (rx_int_gl > tx_int_gl) { - tqp_vector->tx_group.int_gl = rx_int_gl; - tqp_vector->tx_group.flow_level = - tqp_vector->rx_group.flow_level; - hns3_set_vector_coalesc_gl(tqp_vector, rx_int_gl); - } else { - tqp_vector->rx_group.int_gl = tx_int_gl; - tqp_vector->rx_group.flow_level = - tqp_vector->tx_group.flow_level; - hns3_set_vector_coalesc_gl(tqp_vector, tx_int_gl); - } + struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group; + struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group; + bool rx_update, tx_update; + + if (rx_group->gl_adapt_enable) { + rx_update = hns3_get_new_int_gl(rx_group); + if (rx_update) + hns3_set_vector_coalesce_rx_gl(tqp_vector, + rx_group->int_gl); + } + + if (tx_group->gl_adapt_enable) { + tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group); + if (tx_update) + hns3_set_vector_coalesce_tx_gl(tqp_vector, + tx_group->int_gl); } } @@ -2380,6 +2528,8 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, cur_chain->tqp_index = tx_ring->tqp->tqp_index; hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, HNAE3_RING_TYPE_TX); + hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX); cur_chain->next = NULL; @@ -2395,6 +2545,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, chain->tqp_index = tx_ring->tqp->tqp_index; hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, HNAE3_RING_TYPE_TX); + hnae_set_field(chain->int_gl_idx, + HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, + HNAE3_RING_GL_TX); cur_chain = chain; } @@ -2406,6 +2560,8 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, cur_chain->tqp_index = rx_ring->tqp->tqp_index; hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, HNAE3_RING_TYPE_RX); + hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); rx_ring = rx_ring->next; } @@ -2419,6 +2575,9 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, chain->tqp_index = rx_ring->tqp->tqp_index; hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, HNAE3_RING_TYPE_RX); + hnae_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); + cur_chain = chain; rx_ring = rx_ring->next; @@ -2507,7 +2666,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) tqp_vector->rx_group.total_packets = 0; tqp_vector->tx_group.total_bytes = 0; tqp_vector->tx_group.total_packets = 0; - hns3_vector_gl_rl_init(tqp_vector); + hns3_vector_gl_rl_init(tqp_vector, priv); tqp_vector->handle = h; ret = hns3_get_vector_ring_chain(tqp_vector, @@ -2649,6 +2808,19 @@ err: return ret; } +static void hns3_put_ring_config(struct hns3_nic_priv *priv) +{ + struct hnae3_handle *h = priv->ae_handle; + int i; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + devm_kfree(priv->dev, priv->ring_data[i].ring); + devm_kfree(priv->dev, + priv->ring_data[i + h->kinfo.num_tqps].ring); + } + devm_kfree(priv->dev, priv->ring_data); +} + static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) { int ret; @@ -2785,8 +2957,12 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv) h->ae_algo->ops->reset_queue(h, i); hns3_fini_ring(priv->ring_data[i].ring); + devm_kfree(priv->dev, priv->ring_data[i].ring); hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring); + devm_kfree(priv->dev, + priv->ring_data[i + h->kinfo.num_tqps].ring); } + devm_kfree(priv->dev, priv->ring_data); return 0; } @@ -3142,8 +3318,8 @@ static int hns3_reset_notify(struct hnae3_handle *handle, switch (type) { case HNAE3_UP_CLIENT: - ret = hns3_reset_notify_up_enet(handle); - break; + ret = hns3_reset_notify_up_enet(handle); + break; case HNAE3_DOWN_CLIENT: ret = hns3_reset_notify_down_enet(handle); break; @@ -3160,6 +3336,115 @@ static int hns3_reset_notify(struct hnae3_handle *handle, return ret; } +static u16 hns3_get_max_available_channels(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + u16 free_tqps, max_rss_size, max_tqps; + + h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size); + max_tqps = h->kinfo.num_tc * max_rss_size; + + return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps)); +} + +static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = hns3_get_handle(netdev); + int ret; + + ret = h->ae_algo->ops->set_channels(h, new_tqp_num); + if (ret) + return ret; + + ret = hns3_get_ring_config(priv); + if (ret) + return ret; + + ret = hns3_nic_init_vector_data(priv); + if (ret) + goto err_uninit_vector; + + ret = hns3_init_all_ring(priv); + if (ret) + goto err_put_ring; + + return 0; + +err_put_ring: + hns3_put_ring_config(priv); +err_uninit_vector: + hns3_nic_uninit_vector_data(priv); + return ret; +} + +static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num) +{ + return (new_tqp_num / num_tc) * num_tc; +} + +int hns3_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_knic_private_info *kinfo = &h->kinfo; + bool if_running = netif_running(netdev); + u32 new_tqp_num = ch->combined_count; + u16 org_tqp_num; + int ret; + + if (ch->rx_count || ch->tx_count) + return -EINVAL; + + if (new_tqp_num > hns3_get_max_available_channels(netdev) || + new_tqp_num < kinfo->num_tc) { + dev_err(&netdev->dev, + "Change tqps fail, the tqp range is from %d to %d", + kinfo->num_tc, + hns3_get_max_available_channels(netdev)); + return -EINVAL; + } + + new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num); + if (kinfo->num_tqps == new_tqp_num) + return 0; + + if (if_running) + dev_close(netdev); + + hns3_clear_all_ring(h); + + ret = hns3_nic_uninit_vector_data(priv); + if (ret) { + dev_err(&netdev->dev, + "Unbind vector with tqp fail, nothing is changed"); + goto open_netdev; + } + + hns3_uninit_all_ring(priv); + + org_tqp_num = h->kinfo.num_tqps; + ret = hns3_modify_tqp_num(netdev, new_tqp_num); + if (ret) { + ret = hns3_modify_tqp_num(netdev, org_tqp_num); + if (ret) { + /* If revert to old tqp failed, fatal error occurred */ + dev_err(&netdev->dev, + "Revert to old tqp num fail, ret=%d", ret); + return ret; + } + dev_info(&netdev->dev, + "Change tqp num fail, Revert to old tqp num"); + } + +open_netdev: + if (if_running) + dev_open(netdev); + + return ret; +} + static const struct hnae3_client_ops client_ops = { .init_instance = hns3_client_init, .uninit_instance = hns3_client_uninit, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 8a9de759957b..213f501b30bb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -451,10 +451,14 @@ enum hns3_link_mode_bits { HNS3_LM_COUNT = 15 }; -#define HNS3_INT_GL_50K 0x000A -#define HNS3_INT_GL_20K 0x0019 -#define HNS3_INT_GL_18K 0x001B -#define HNS3_INT_GL_8K 0x003E +#define HNS3_INT_GL_MAX 0x1FE0 +#define HNS3_INT_GL_50K 0x0014 +#define HNS3_INT_GL_20K 0x0032 +#define HNS3_INT_GL_18K 0x0036 +#define HNS3_INT_GL_8K 0x007C + +#define HNS3_INT_RL_MAX 0x00EC +#define HNS3_INT_RL_ENABLE_MASK 0x40 struct hns3_enet_ring_group { /* array of pointers to rings */ @@ -464,6 +468,7 @@ struct hns3_enet_ring_group { u16 count; enum hns3_flow_level_range flow_level; u16 int_gl; + u8 gl_adapt_enable; }; struct hns3_enet_tqp_vector { @@ -594,7 +599,15 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) #define hns3_get_handle(ndev) \ (((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle) +#define hns3_gl_usec_to_reg(int_gl) (int_gl >> 1) +#define hns3_gl_round_down(int_gl) round_down(int_gl, 2) + +#define hns3_rl_usec_to_reg(int_rl) (int_rl >> 2) +#define hns3_rl_round_down(int_rl) round_down(int_rl, 4) + void hns3_ethtool_set_ops(struct net_device *netdev); +int hns3_set_channels(struct net_device *netdev, + struct ethtool_channels *ch); bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget); int hns3_init_all_ring(struct hns3_nic_priv *priv); @@ -604,6 +617,13 @@ int hns3_clean_rx_ring( struct hns3_enet_ring *ring, int budget, void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)); +void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector, + u32 gl_value); +void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, + u32 gl_value); +void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, + u32 rl_value); + #ifdef CONFIG_HNS3_DCB void hns3_dcbnl_setup(struct hnae3_handle *handle); #else diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index a21470c72da3..b034c7f24eda 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -15,26 +15,25 @@ struct hns3_stats { char stats_string[ETH_GSTRING_LEN]; - int stats_size; int stats_offset; }; /* tqp related stats */ #define HNS3_TQP_STAT(_string, _member) { \ .stats_string = _string, \ - .stats_size = FIELD_SIZEOF(struct ring_stats, _member), \ - .stats_offset = offsetof(struct hns3_enet_ring, stats), \ -} \ + .stats_offset = offsetof(struct hns3_enet_ring, stats) +\ + offsetof(struct ring_stats, _member), \ +} static const struct hns3_stats hns3_txq_stats[] = { /* Tx per-queue statistics */ - HNS3_TQP_STAT("tx_io_err_cnt", io_err_cnt), - HNS3_TQP_STAT("tx_sw_err_cnt", sw_err_cnt), - HNS3_TQP_STAT("tx_seg_pkt_cnt", seg_pkt_cnt), - HNS3_TQP_STAT("tx_pkts", tx_pkts), - HNS3_TQP_STAT("tx_bytes", tx_bytes), - HNS3_TQP_STAT("tx_err_cnt", tx_err_cnt), - HNS3_TQP_STAT("tx_restart_queue", restart_queue), + HNS3_TQP_STAT("io_err_cnt", io_err_cnt), + HNS3_TQP_STAT("tx_dropped", sw_err_cnt), + HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt), + HNS3_TQP_STAT("packets", tx_pkts), + HNS3_TQP_STAT("bytes", tx_bytes), + HNS3_TQP_STAT("errors", tx_err_cnt), + HNS3_TQP_STAT("tx_wake", restart_queue), HNS3_TQP_STAT("tx_busy", tx_busy), }; @@ -42,18 +41,18 @@ static const struct hns3_stats hns3_txq_stats[] = { static const struct hns3_stats hns3_rxq_stats[] = { /* Rx per-queue statistics */ - HNS3_TQP_STAT("rx_io_err_cnt", io_err_cnt), - HNS3_TQP_STAT("rx_sw_err_cnt", sw_err_cnt), - HNS3_TQP_STAT("rx_seg_pkt_cnt", seg_pkt_cnt), - HNS3_TQP_STAT("rx_pkts", rx_pkts), - HNS3_TQP_STAT("rx_bytes", rx_bytes), - HNS3_TQP_STAT("rx_err_cnt", rx_err_cnt), - HNS3_TQP_STAT("rx_reuse_pg_cnt", reuse_pg_cnt), - HNS3_TQP_STAT("rx_err_pkt_len", err_pkt_len), - HNS3_TQP_STAT("rx_non_vld_descs", non_vld_descs), - HNS3_TQP_STAT("rx_err_bd_num", err_bd_num), - HNS3_TQP_STAT("rx_l2_err", l2_err), - HNS3_TQP_STAT("rx_l3l4_csum_err", l3l4_csum_err), + HNS3_TQP_STAT("io_err_cnt", io_err_cnt), + HNS3_TQP_STAT("rx_dropped", sw_err_cnt), + HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt), + HNS3_TQP_STAT("packets", rx_pkts), + HNS3_TQP_STAT("bytes", rx_bytes), + HNS3_TQP_STAT("errors", rx_err_cnt), + HNS3_TQP_STAT("reuse_pg_cnt", reuse_pg_cnt), + HNS3_TQP_STAT("err_pkt_len", err_pkt_len), + HNS3_TQP_STAT("non_vld_descs", non_vld_descs), + HNS3_TQP_STAT("err_bd_num", err_bd_num), + HNS3_TQP_STAT("l2_err", l2_err), + HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err), }; #define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats) @@ -389,9 +388,9 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset) } static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, - u32 stat_count, u32 num_tqps) + u32 stat_count, u32 num_tqps, const char *prefix) { -#define MAX_PREFIX_SIZE (8 + 4) +#define MAX_PREFIX_SIZE (6 + 4) u32 size_left; u32 i, j; u32 n1; @@ -401,7 +400,8 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, data[ETH_GSTRING_LEN - 1] = '\0'; /* first, prepend the prefix string */ - n1 = snprintf(data, MAX_PREFIX_SIZE, "rcb_q%d_", i); + n1 = snprintf(data, MAX_PREFIX_SIZE, "%s#%d_", + prefix, i); n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1); size_left = (ETH_GSTRING_LEN - 1) - n1; @@ -417,14 +417,16 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; + const char tx_prefix[] = "txq"; + const char rx_prefix[] = "rxq"; /* get strings for Tx */ data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT, - kinfo->num_tqps); + kinfo->num_tqps, tx_prefix); /* get strings for Rx */ data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT, - kinfo->num_tqps); + kinfo->num_tqps, rx_prefix); return data; } @@ -455,13 +457,13 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hns3_enet_ring *ring; u8 *stat; - u32 i; + int i, j; /* get stats for Tx */ for (i = 0; i < kinfo->num_tqps; i++) { ring = nic_priv->ring_data[i].ring; - for (i = 0; i < HNS3_TXQ_STATS_COUNT; i++) { - stat = (u8 *)ring + hns3_txq_stats[i].stats_offset; + for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) { + stat = (u8 *)ring + hns3_txq_stats[j].stats_offset; *data++ = *(u64 *)stat; } } @@ -469,8 +471,8 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) /* get stats for Rx */ for (i = 0; i < kinfo->num_tqps; i++) { ring = nic_priv->ring_data[i + kinfo->num_tqps].ring; - for (i = 0; i < HNS3_RXQ_STATS_COUNT; i++) { - stat = (u8 *)ring + hns3_rxq_stats[i].stats_offset; + for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) { + stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset; *data++ = *(u64 *)stat; } } @@ -559,10 +561,23 @@ static void hns3_get_pauseparam(struct net_device *netdev, ¶m->rx_pause, ¶m->tx_pause); } +static int hns3_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *param) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo->ops->set_pauseparam) + return h->ae_algo->ops->set_pauseparam(h, param->autoneg, + param->rx_pause, + param->tx_pause); + return -EOPNOTSUPP; +} + static int hns3_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct hnae3_handle *h = hns3_get_handle(netdev); + u32 flowctrl_adv = 0; u32 supported_caps; u32 advertised_caps; u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN; @@ -638,6 +653,8 @@ static int hns3_get_link_ksettings(struct net_device *netdev, if (!cmd->base.autoneg) advertised_caps &= ~HNS3_LM_AUTONEG_BIT; + advertised_caps &= ~HNS3_LM_PAUSE_BIT; + /* now, map driver link modes to ethtool link modes */ hns3_driv_to_eth_caps(supported_caps, cmd, false); hns3_driv_to_eth_caps(advertised_caps, cmd, true); @@ -650,6 +667,18 @@ static int hns3_get_link_ksettings(struct net_device *netdev, /* 4.mdio_support */ cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22; + /* 5.get flow control setttings */ + if (h->ae_algo->ops->get_flowctrl_adv) + h->ae_algo->ops->get_flowctrl_adv(h, &flowctrl_adv); + + if (flowctrl_adv & ADVERTISED_Pause) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Pause); + + if (flowctrl_adv & ADVERTISED_Asym_Pause) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + return 0; } @@ -730,7 +759,7 @@ static int hns3_get_rxnfc(struct net_device *netdev, switch (cmd->cmd) { case ETHTOOL_GRXRINGS: - cmd->data = h->kinfo.num_tc * h->kinfo.rss_size; + cmd->data = h->kinfo.rss_size; break; case ETHTOOL_GRXFH: return h->ae_algo->ops->get_rss_tuple(h, cmd); @@ -849,6 +878,241 @@ static int hns3_nway_reset(struct net_device *netdev) return genphy_restart_aneg(phy); } +static void hns3_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo->ops->get_channels) + h->ae_algo->ops->get_channels(h, ch); +} + +static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue, + struct ethtool_coalesce *cmd) +{ + struct hns3_enet_tqp_vector *tx_vector, *rx_vector; + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + u16 queue_num = h->kinfo.num_tqps; + + if (queue >= queue_num) { + netdev_err(netdev, + "Invalid queue value %d! Queue max id=%d\n", + queue, queue_num - 1); + return -EINVAL; + } + + tx_vector = priv->ring_data[queue].ring->tqp_vector; + rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector; + + cmd->use_adaptive_tx_coalesce = tx_vector->tx_group.gl_adapt_enable; + cmd->use_adaptive_rx_coalesce = rx_vector->rx_group.gl_adapt_enable; + + cmd->tx_coalesce_usecs = tx_vector->tx_group.int_gl; + cmd->rx_coalesce_usecs = rx_vector->rx_group.int_gl; + + cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting; + cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting; + + return 0; +} + +static int hns3_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *cmd) +{ + return hns3_get_coalesce_per_queue(netdev, 0, cmd); +} + +static int hns3_check_gl_coalesce_para(struct net_device *netdev, + struct ethtool_coalesce *cmd) +{ + u32 rx_gl, tx_gl; + + if (cmd->rx_coalesce_usecs > HNS3_INT_GL_MAX) { + netdev_err(netdev, + "Invalid rx-usecs value, rx-usecs range is 0-%d\n", + HNS3_INT_GL_MAX); + return -EINVAL; + } + + if (cmd->tx_coalesce_usecs > HNS3_INT_GL_MAX) { + netdev_err(netdev, + "Invalid tx-usecs value, tx-usecs range is 0-%d\n", + HNS3_INT_GL_MAX); + return -EINVAL; + } + + rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs); + if (rx_gl != cmd->rx_coalesce_usecs) { + netdev_info(netdev, + "rx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n", + cmd->rx_coalesce_usecs, rx_gl); + } + + tx_gl = hns3_gl_round_down(cmd->tx_coalesce_usecs); + if (tx_gl != cmd->tx_coalesce_usecs) { + netdev_info(netdev, + "tx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n", + cmd->tx_coalesce_usecs, tx_gl); + } + + return 0; +} + +static int hns3_check_rl_coalesce_para(struct net_device *netdev, + struct ethtool_coalesce *cmd) +{ + u32 rl; + + if (cmd->tx_coalesce_usecs_high != cmd->rx_coalesce_usecs_high) { + netdev_err(netdev, + "tx_usecs_high must be same as rx_usecs_high.\n"); + return -EINVAL; + } + + if (cmd->rx_coalesce_usecs_high > HNS3_INT_RL_MAX) { + netdev_err(netdev, + "Invalid usecs_high value, usecs_high range is 0-%d\n", + HNS3_INT_RL_MAX); + return -EINVAL; + } + + rl = hns3_rl_round_down(cmd->rx_coalesce_usecs_high); + if (rl != cmd->rx_coalesce_usecs_high) { + netdev_info(netdev, + "usecs_high(%d) rounded down to %d, because it must be multiple of 4.\n", + cmd->rx_coalesce_usecs_high, rl); + } + + return 0; +} + +static int hns3_check_coalesce_para(struct net_device *netdev, + struct ethtool_coalesce *cmd) +{ + int ret; + + ret = hns3_check_gl_coalesce_para(netdev, cmd); + if (ret) { + netdev_err(netdev, + "Check gl coalesce param fail. ret = %d\n", ret); + return ret; + } + + ret = hns3_check_rl_coalesce_para(netdev, cmd); + if (ret) { + netdev_err(netdev, + "Check rl coalesce param fail. ret = %d\n", ret); + return ret; + } + + if (cmd->use_adaptive_tx_coalesce == 1 || + cmd->use_adaptive_rx_coalesce == 1) { + netdev_info(netdev, + "adaptive-tx=%d and adaptive-rx=%d, tx_usecs or rx_usecs will changed dynamically.\n", + cmd->use_adaptive_tx_coalesce, + cmd->use_adaptive_rx_coalesce); + } + + return 0; +} + +static void hns3_set_coalesce_per_queue(struct net_device *netdev, + struct ethtool_coalesce *cmd, + u32 queue) +{ + struct hns3_enet_tqp_vector *tx_vector, *rx_vector; + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + int queue_num = h->kinfo.num_tqps; + + tx_vector = priv->ring_data[queue].ring->tqp_vector; + rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector; + + tx_vector->tx_group.gl_adapt_enable = cmd->use_adaptive_tx_coalesce; + rx_vector->rx_group.gl_adapt_enable = cmd->use_adaptive_rx_coalesce; + + tx_vector->tx_group.int_gl = cmd->tx_coalesce_usecs; + rx_vector->rx_group.int_gl = cmd->rx_coalesce_usecs; + + hns3_set_vector_coalesce_tx_gl(tx_vector, tx_vector->tx_group.int_gl); + hns3_set_vector_coalesce_rx_gl(rx_vector, rx_vector->rx_group.int_gl); + + hns3_set_vector_coalesce_rl(tx_vector, h->kinfo.int_rl_setting); + hns3_set_vector_coalesce_rl(rx_vector, h->kinfo.int_rl_setting); +} + +static int hns3_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *cmd) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + u16 queue_num = h->kinfo.num_tqps; + int ret; + int i; + + ret = hns3_check_coalesce_para(netdev, cmd); + if (ret) + return ret; + + h->kinfo.int_rl_setting = + hns3_rl_round_down(cmd->rx_coalesce_usecs_high); + + for (i = 0; i < queue_num; i++) + hns3_set_coalesce_per_queue(netdev, cmd, i); + + return 0; +} + +static int hns3_get_regs_len(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!h->ae_algo->ops->get_regs_len) + return -EOPNOTSUPP; + + return h->ae_algo->ops->get_regs_len(h); +} + +static void hns3_get_regs(struct net_device *netdev, + struct ethtool_regs *cmd, void *data) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!h->ae_algo->ops->get_regs) + return; + + h->ae_algo->ops->get_regs(h, &cmd->version, data); +} + +static int hns3_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_led_id) + return -EOPNOTSUPP; + + return h->ae_algo->ops->set_led_id(h, state); +} + +static const struct ethtool_ops hns3vf_ethtool_ops = { + .get_drvinfo = hns3_get_drvinfo, + .get_ringparam = hns3_get_ringparam, + .set_ringparam = hns3_set_ringparam, + .get_strings = hns3_get_strings, + .get_ethtool_stats = hns3_get_stats, + .get_sset_count = hns3_get_sset_count, + .get_rxnfc = hns3_get_rxnfc, + .get_rxfh_key_size = hns3_get_rss_key_size, + .get_rxfh_indir_size = hns3_get_rss_indir_size, + .get_rxfh = hns3_get_rss, + .set_rxfh = hns3_set_rss, + .get_link_ksettings = hns3_get_link_ksettings, + .get_channels = hns3_get_channels, + .get_coalesce = hns3_get_coalesce, + .set_coalesce = hns3_set_coalesce, +}; + static const struct ethtool_ops hns3_ethtool_ops = { .self_test = hns3_self_test, .get_drvinfo = hns3_get_drvinfo, @@ -856,6 +1120,7 @@ static const struct ethtool_ops hns3_ethtool_ops = { .get_ringparam = hns3_get_ringparam, .set_ringparam = hns3_set_ringparam, .get_pauseparam = hns3_get_pauseparam, + .set_pauseparam = hns3_set_pauseparam, .get_strings = hns3_get_strings, .get_ethtool_stats = hns3_get_stats, .get_sset_count = hns3_get_sset_count, @@ -868,9 +1133,21 @@ static const struct ethtool_ops hns3_ethtool_ops = { .get_link_ksettings = hns3_get_link_ksettings, .set_link_ksettings = hns3_set_link_ksettings, .nway_reset = hns3_nway_reset, + .get_channels = hns3_get_channels, + .set_channels = hns3_set_channels, + .get_coalesce = hns3_get_coalesce, + .set_coalesce = hns3_set_coalesce, + .get_regs_len = hns3_get_regs_len, + .get_regs = hns3_get_regs, + .set_phys_id = hns3_set_phys_id, }; void hns3_ethtool_set_ops(struct net_device *netdev) { - netdev->ethtool_ops = &hns3_ethtool_ops; + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->flags & HNAE3_SUPPORT_VF) + netdev->ethtool_ops = &hns3vf_ethtool_ops; + else + netdev->ethtool_ops = &hns3_ethtool_ops; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile index d2b20d01a58c..cb8ddd043476 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0+ # # Makefile for the HISILICON network device drivers. # @@ -5,11 +6,6 @@ ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 obj-$(CONFIG_HNS3_HCLGE) += hclge.o -hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o +hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o - -obj-$(CONFIG_HNS3_ENET) += hns3.o -hns3-objs = hns3_enet.o hns3_ethtool.o - -hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index ce5ed8845042..3fd10a6bec53 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -102,6 +102,10 @@ enum hclge_opcode_type { HCLGE_OPC_STATS_64_BIT = 0x0030, HCLGE_OPC_STATS_32_BIT = 0x0031, HCLGE_OPC_STATS_MAC = 0x0032, + + HCLGE_OPC_QUERY_REG_NUM = 0x0040, + HCLGE_OPC_QUERY_32_BIT_REG = 0x0041, + HCLGE_OPC_QUERY_64_BIT_REG = 0x0042, /* Device management command */ /* MAC commond */ @@ -111,6 +115,7 @@ enum hclge_opcode_type { HCLGE_OPC_QUERY_LINK_STATUS = 0x0307, HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308, HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309, + HCLGE_OPC_STATS_MAC_TRAFFIC = 0x0314, /* MACSEC command */ /* PFC/Pause CMD*/ @@ -180,6 +185,10 @@ enum hclge_opcode_type { /* Promisuous mode command */ HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01, + /* Vlan offload command */ + HCLGE_OPC_VLAN_PORT_TX_CFG = 0x0F01, + HCLGE_OPC_VLAN_PORT_RX_CFG = 0x0F02, + /* Interrupts cmd */ HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503, HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504, @@ -191,6 +200,7 @@ enum hclge_opcode_type { HCLGE_OPC_MAC_VLAN_INSERT = 0x1003, HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010, HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011, + HCLGE_OPC_MAC_VLAN_MASK_SET = 0x1012, /* Multicast linear table cmd */ HCLGE_OPC_MTA_MAC_MODE_CFG = 0x1020, @@ -218,6 +228,9 @@ enum hclge_opcode_type { /* Mailbox cmd */ HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000, + + /* Led command */ + HCLGE_OPC_LED_STATUS_CFG = 0xB000, }; #define HCLGE_TQP_REG_OFFSET 0x80000 @@ -399,6 +412,8 @@ struct hclge_pf_res_cmd { #define HCLGE_CFG_MAC_ADDR_H_M GENMASK(15, 0) #define HCLGE_CFG_DEFAULT_SPEED_S 16 #define HCLGE_CFG_DEFAULT_SPEED_M GENMASK(23, 16) +#define HCLGE_CFG_RSS_SIZE_S 24 +#define HCLGE_CFG_RSS_SIZE_M GENMASK(31, 24) struct hclge_cfg_param_cmd { __le32 offset; @@ -549,8 +564,6 @@ struct hclge_config_auto_neg_cmd { u8 rsv[20]; }; -#define HCLGE_MAC_MIN_MTU 64 -#define HCLGE_MAC_MAX_MTU 9728 #define HCLGE_MAC_UPLINK_PORT 0x100 struct hclge_config_max_frm_size_cmd { @@ -587,6 +600,37 @@ struct hclge_mac_vlan_tbl_entry_cmd { u8 rsv2[6]; }; +#define HCLGE_VLAN_MASK_EN_B 0x0 +struct hclge_mac_vlan_mask_entry_cmd { + u8 rsv0[2]; + u8 vlan_mask; + u8 rsv1; + u8 mac_mask[6]; + u8 rsv2[14]; +}; + +#define HCLGE_MAC_MGR_MASK_VLAN_B BIT(0) +#define HCLGE_MAC_MGR_MASK_MAC_B BIT(1) +#define HCLGE_MAC_MGR_MASK_ETHERTYPE_B BIT(2) +#define HCLGE_MAC_ETHERTYPE_LLDP 0x88cc + +struct hclge_mac_mgr_tbl_entry_cmd { + u8 flags; + u8 resp_code; + __le16 vlan_tag; + __le32 mac_addr_hi32; + __le16 mac_addr_lo16; + __le16 rsv1; + __le16 ethter_type; + __le16 egress_port; + __le16 egress_queue; + u8 sw_port_id_aware; + u8 rsv2; + u8 i_port_bitmap; + u8 i_port_direction; + u8 rsv3[2]; +}; + #define HCLGE_CFG_MTA_MAC_SEL_S 0x0 #define HCLGE_CFG_MTA_MAC_SEL_M GENMASK(1, 0) #define HCLGE_CFG_MTA_MAC_EN_B 0x7 @@ -658,6 +702,47 @@ struct hclge_vlan_filter_vf_cfg_cmd { u8 vf_bitmap[16]; }; +#define HCLGE_ACCEPT_TAG_B 0 +#define HCLGE_ACCEPT_UNTAG_B 1 +#define HCLGE_PORT_INS_TAG1_EN_B 2 +#define HCLGE_PORT_INS_TAG2_EN_B 3 +#define HCLGE_CFG_NIC_ROCE_SEL_B 4 +struct hclge_vport_vtag_tx_cfg_cmd { + u8 vport_vlan_cfg; + u8 vf_offset; + u8 rsv1[2]; + __le16 def_vlan_tag1; + __le16 def_vlan_tag2; + u8 vf_bitmap[8]; + u8 rsv2[8]; +}; + +#define HCLGE_REM_TAG1_EN_B 0 +#define HCLGE_REM_TAG2_EN_B 1 +#define HCLGE_SHOW_TAG1_EN_B 2 +#define HCLGE_SHOW_TAG2_EN_B 3 +struct hclge_vport_vtag_rx_cfg_cmd { + u8 vport_vlan_cfg; + u8 vf_offset; + u8 rsv1[6]; + u8 vf_bitmap[8]; + u8 rsv2[8]; +}; + +struct hclge_tx_vlan_type_cfg_cmd { + __le16 ot_vlan_type; + __le16 in_vlan_type; + u8 rsv[20]; +}; + +struct hclge_rx_vlan_type_cfg_cmd { + __le16 ot_fst_vlan_type; + __le16 ot_sec_vlan_type; + __le16 in_fst_vlan_type; + __le16 in_sec_vlan_type; + u8 rsv[16]; +}; + struct hclge_cfg_com_tqp_queue_cmd { __le16 tqp_id; __le16 stream_id; @@ -726,6 +811,23 @@ struct hclge_reset_cmd { #define HCLGE_NIC_CMQ_DESC_NUM 1024 #define HCLGE_NIC_CMQ_DESC_NUM_S 3 +#define HCLGE_LED_PORT_SPEED_STATE_S 0 +#define HCLGE_LED_PORT_SPEED_STATE_M GENMASK(5, 0) +#define HCLGE_LED_ACTIVITY_STATE_S 0 +#define HCLGE_LED_ACTIVITY_STATE_M GENMASK(1, 0) +#define HCLGE_LED_LINK_STATE_S 0 +#define HCLGE_LED_LINK_STATE_M GENMASK(1, 0) +#define HCLGE_LED_LOCATE_STATE_S 0 +#define HCLGE_LED_LOCATE_STATE_M GENMASK(1, 0) + +struct hclge_set_led_state_cmd { + u8 port_speed_led_config; + u8 link_led_config; + u8 activity_led_config; + u8 locate_led_config; + u8 rsv[20]; +}; + int hclge_cmd_init(struct hclge_dev *hdev); static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 59ed806a52c3..32bc6f68e297 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -17,10 +17,12 @@ #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/platform_device.h> - +#include <linux/if_vlan.h> +#include <net/rtnetlink.h> #include "hclge_cmd.h" #include "hclge_dcb.h" #include "hclge_main.h" +#include "hclge_mbx.h" #include "hclge_mdio.h" #include "hclge_tm.h" #include "hnae3.h" @@ -34,8 +36,10 @@ static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, enum hclge_mta_dmac_sel_type mta_mac_sel, bool enable); +static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu); static int hclge_init_vlan_config(struct hclge_dev *hdev); static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); +static int hclge_update_led_status(struct hclge_dev *hdev); static struct hnae3_ae_algo ae_algo; @@ -278,8 +282,8 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = { HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, {"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, - {"mac_tx_overrsize_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_tx_overrsize_pkt_num)}, + {"mac_tx_oversize_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)}, {"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, {"mac_tx_65_127_oct_pkt_num", @@ -292,8 +296,24 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = { HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, {"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, - {"mac_tx_1519_max_oct_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num)}, + {"mac_tx_1519_2047_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)}, + {"mac_tx_2048_4095_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)}, + {"mac_tx_4096_8191_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)}, + {"mac_tx_8192_12287_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_12287_oct_pkt_num)}, + {"mac_tx_8192_9216_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)}, + {"mac_tx_9217_12287_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)}, + {"mac_tx_12288_16383_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)}, + {"mac_tx_1519_max_good_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)}, + {"mac_tx_1519_max_bad_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)}, {"mac_rx_total_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, {"mac_rx_total_oct_num", @@ -314,8 +334,8 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = { HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, {"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, - {"mac_rx_overrsize_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_rx_overrsize_pkt_num)}, + {"mac_rx_oversize_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)}, {"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, {"mac_rx_65_127_oct_pkt_num", @@ -328,33 +348,59 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = { HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, {"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, - {"mac_rx_1519_max_oct_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num)}, - - {"mac_trans_fragment_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_trans_fragment_pkt_num)}, - {"mac_trans_undermin_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_trans_undermin_pkt_num)}, - {"mac_trans_jabber_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_trans_jabber_pkt_num)}, - {"mac_trans_err_all_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_trans_err_all_pkt_num)}, - {"mac_trans_from_app_good_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_good_pkt_num)}, - {"mac_trans_from_app_bad_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_bad_pkt_num)}, - {"mac_rcv_fragment_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fragment_pkt_num)}, - {"mac_rcv_undermin_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_undermin_pkt_num)}, - {"mac_rcv_jabber_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_jabber_pkt_num)}, - {"mac_rcv_fcs_err_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fcs_err_pkt_num)}, - {"mac_rcv_send_app_good_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_good_pkt_num)}, - {"mac_rcv_send_app_bad_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_bad_pkt_num)} + {"mac_rx_1519_2047_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)}, + {"mac_rx_2048_4095_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)}, + {"mac_rx_4096_8191_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)}, + {"mac_rx_8192_12287_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_12287_oct_pkt_num)}, + {"mac_rx_8192_9216_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)}, + {"mac_rx_9217_12287_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)}, + {"mac_rx_12288_16383_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)}, + {"mac_rx_1519_max_good_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)}, + {"mac_rx_1519_max_bad_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)}, + + {"mac_tx_fragment_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)}, + {"mac_tx_undermin_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)}, + {"mac_tx_jabber_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)}, + {"mac_tx_err_all_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)}, + {"mac_tx_from_app_good_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)}, + {"mac_tx_from_app_bad_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)}, + {"mac_rx_fragment_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)}, + {"mac_rx_undermin_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)}, + {"mac_rx_jabber_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)}, + {"mac_rx_fcs_err_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)}, + {"mac_rx_send_app_good_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)}, + {"mac_rx_send_app_bad_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)} +}; + +static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { + { + .flags = HCLGE_MAC_MGR_MASK_VLAN_B, + .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP), + .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)), + .mac_addr_lo16 = cpu_to_le16(htons(0x000E)), + .i_port_bitmap = 0x1, + }, }; static int hclge_64_bit_update_stats(struct hclge_dev *hdev) @@ -460,9 +506,41 @@ static int hclge_32_bit_update_stats(struct hclge_dev *hdev) return 0; } +static int hclge_mac_get_traffic_stats(struct hclge_dev *hdev) +{ + struct hclge_mac_stats *mac_stats = &hdev->hw_stats.mac_stats; + struct hclge_desc desc; + __le64 *desc_data; + int ret; + + /* for fiber port, need to query the total rx/tx packets statstics, + * used for data transferring checking. + */ + if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) + return 0; + + if (test_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) + return 0; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_STATS_MAC_TRAFFIC, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get MAC total pkt stats fail, ret = %d\n", ret); + + return ret; + } + + desc_data = (__le64 *)(&desc.data[0]); + mac_stats->mac_tx_total_pkt_num += le64_to_cpu(*desc_data++); + mac_stats->mac_rx_total_pkt_num += le64_to_cpu(*desc_data); + + return 0; +} + static int hclge_mac_update_stats(struct hclge_dev *hdev) { -#define HCLGE_MAC_CMD_NUM 17 +#define HCLGE_MAC_CMD_NUM 21 #define HCLGE_RTN_DATA_NUM 4 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); @@ -524,7 +602,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle) return ret; } tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += - le32_to_cpu(desc[0].data[4]); + le32_to_cpu(desc[0].data[1]); } for (i = 0; i < kinfo->num_tqps; i++) { @@ -544,7 +622,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle) return ret; } tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += - le32_to_cpu(desc[0].data[4]); + le32_to_cpu(desc[0].data[1]); } return 0; @@ -586,7 +664,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) for (i = 0; i < kinfo->num_tqps; i++) { struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i], struct hclge_tqp, q); - snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd", + snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd", tqp->index); buff = buff + ETH_GSTRING_LEN; } @@ -594,7 +672,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) for (i = 0; i < kinfo->num_tqps; i++) { struct hclge_tqp *tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); - snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd", + snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd", tqp->index); buff = buff + ETH_GSTRING_LEN; } @@ -642,23 +720,22 @@ static void hclge_update_netstat(struct hclge_hw_stats *hw_stats, net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num; net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num; - net_stats->rx_errors = hw_stats->mac_stats.mac_rx_overrsize_pkt_num; + net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num; net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num; - net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_err_pkt; net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt; net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt; - net_stats->rx_errors += hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num; + net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num; net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num; - net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num; + net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; net_stats->rx_length_errors = hw_stats->mac_stats.mac_rx_undersize_pkt_num; net_stats->rx_length_errors += - hw_stats->mac_stats.mac_rx_overrsize_pkt_num; + hw_stats->mac_stats.mac_rx_oversize_pkt_num; net_stats->rx_over_errors = - hw_stats->mac_stats.mac_rx_overrsize_pkt_num; + hw_stats->mac_stats.mac_rx_oversize_pkt_num; } static void hclge_update_stats_for_all(struct hclge_dev *hdev) @@ -698,6 +775,9 @@ static void hclge_update_stats(struct hnae3_handle *handle, struct hclge_hw_stats *hw_stats = &hdev->hw_stats; int status; + if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) + return; + status = hclge_mac_update_stats(hdev); if (status) dev_err(&hdev->pdev->dev, @@ -723,6 +803,8 @@ static void hclge_update_stats(struct hnae3_handle *handle, status); hclge_update_netstat(hw_stats, net_stats); + + clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); } static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) @@ -981,6 +1063,10 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]), HCLGE_CFG_DEFAULT_SPEED_M, HCLGE_CFG_DEFAULT_SPEED_S); + cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]), + HCLGE_CFG_RSS_SIZE_M, + HCLGE_CFG_RSS_SIZE_S); + for (i = 0; i < ETH_ALEN; i++) cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; @@ -1058,7 +1144,7 @@ static int hclge_configure(struct hclge_dev *hdev) hdev->num_vmdq_vport = cfg.vmdq_vport_num; hdev->base_tqp_pid = 0; - hdev->rss_size_max = 1; + hdev->rss_size_max = cfg.rss_size_max; hdev->rx_buf_len = cfg.rx_buf_len; ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); hdev->hw.mac.media_type = cfg.media_type; @@ -1095,10 +1181,7 @@ static int hclge_configure(struct hclge_dev *hdev) for (i = 0; i < hdev->tm_info.num_tc; i++) hnae_set_bit(hdev->hw_tc_map, i, 1); - if (!hdev->num_vmdq_vport && !hdev->num_req_vfs) - hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; - else - hdev->tx_sch_mode = HCLGE_FLAG_VNET_BASE_SCH_MODE; + hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; return ret; } @@ -2132,28 +2215,6 @@ static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed, return 0; } -static int hclge_query_autoneg_result(struct hclge_dev *hdev) -{ - struct hclge_mac *mac = &hdev->hw.mac; - struct hclge_query_an_speed_dup_cmd *req; - struct hclge_desc desc; - int ret; - - req = (struct hclge_query_an_speed_dup_cmd *)desc.data; - - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true); - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "autoneg result query cmd failed %d.\n", ret); - return ret; - } - - mac->autoneg = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_AN_B); - - return 0; -} - static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) { struct hclge_config_auto_neg_cmd *req; @@ -2189,15 +2250,45 @@ static int hclge_get_autoneg(struct hnae3_handle *handle) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; + struct phy_device *phydev = hdev->hw.mac.phydev; - hclge_query_autoneg_result(hdev); + if (phydev) + return phydev->autoneg; return hdev->hw.mac.autoneg; } +static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev, + bool mask_vlan, + u8 *mac_mask) +{ + struct hclge_mac_vlan_mask_entry_cmd *req; + struct hclge_desc desc; + int status; + + req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false); + + hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B, + mask_vlan ? 1 : 0); + ether_addr_copy(req->mac_mask, mac_mask); + + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "Config mac_vlan_mask failed for cmd_send, ret =%d\n", + status); + + return status; +} + static int hclge_mac_init(struct hclge_dev *hdev) { + struct hnae3_handle *handle = &hdev->vport[0].nic; + struct net_device *netdev = handle->kinfo.netdev; struct hclge_mac *mac = &hdev->hw.mac; + u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + int mtu; int ret; ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL); @@ -2223,7 +2314,45 @@ static int hclge_mac_init(struct hclge_dev *hdev) return ret; } - return hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc); + ret = hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc); + if (ret) { + dev_err(&hdev->pdev->dev, + "set mta filter mode fail ret=%d\n", ret); + return ret; + } + + ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask); + if (ret) { + dev_err(&hdev->pdev->dev, + "set default mac_vlan_mask fail ret=%d\n", ret); + return ret; + } + + if (netdev) + mtu = netdev->mtu; + else + mtu = ETH_DATA_LEN; + + ret = hclge_set_mtu(handle, mtu); + if (ret) { + dev_err(&hdev->pdev->dev, + "set mtu failed ret=%d\n", ret); + return ret; + } + + return 0; +} + +static void hclge_mbx_task_schedule(struct hclge_dev *hdev) +{ + if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) + schedule_work(&hdev->mbx_service_task); +} + +static void hclge_reset_task_schedule(struct hclge_dev *hdev) +{ + if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) + schedule_work(&hdev->rst_service_task); } static void hclge_task_schedule(struct hclge_dev *hdev) @@ -2350,6 +2479,7 @@ static void hclge_service_timer(struct timer_list *t) struct hclge_dev *hdev = from_timer(hdev, t, service_timer); mod_timer(&hdev->service_timer, jiffies + HZ); + hdev->hw_stats.stats_timer++; hclge_task_schedule(hdev); } @@ -2362,6 +2492,64 @@ static void hclge_service_complete(struct hclge_dev *hdev) clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); } +static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) +{ + u32 rst_src_reg; + u32 cmdq_src_reg; + + /* fetch the events from their corresponding regs */ + rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG); + cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); + + /* Assumption: If by any chance reset and mailbox events are reported + * together then we will only process reset event in this go and will + * defer the processing of the mailbox events. Since, we would have not + * cleared RX CMDQ event this time we would receive again another + * interrupt from H/W just for the mailbox. + */ + + /* check for vector0 reset event sources */ + if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) { + set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); + *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); + return HCLGE_VECTOR0_EVENT_RST; + } + + if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) { + set_bit(HNAE3_CORE_RESET, &hdev->reset_pending); + *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); + return HCLGE_VECTOR0_EVENT_RST; + } + + if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) { + set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); + *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); + return HCLGE_VECTOR0_EVENT_RST; + } + + /* check for vector0 mailbox(=CMDQ RX) event source */ + if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { + cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B); + *clearval = cmdq_src_reg; + return HCLGE_VECTOR0_EVENT_MBX; + } + + return HCLGE_VECTOR0_EVENT_OTHER; +} + +static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, + u32 regclr) +{ + switch (event_type) { + case HCLGE_VECTOR0_EVENT_RST: + hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); + break; + case HCLGE_VECTOR0_EVENT_MBX: + hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); + break; + } +} + static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable) { writel(enable ? 1 : 0, vector->addr); @@ -2370,10 +2558,38 @@ static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable) static irqreturn_t hclge_misc_irq_handle(int irq, void *data) { struct hclge_dev *hdev = data; + u32 event_cause; + u32 clearval; hclge_enable_vector(&hdev->misc_vector, false); - if (!test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) - schedule_work(&hdev->service_task); + event_cause = hclge_check_event_cause(hdev, &clearval); + + /* vector 0 interrupt is shared with reset and mailbox source events.*/ + switch (event_cause) { + case HCLGE_VECTOR0_EVENT_RST: + hclge_reset_task_schedule(hdev); + break; + case HCLGE_VECTOR0_EVENT_MBX: + /* If we are here then, + * 1. Either we are not handling any mbx task and we are not + * scheduled as well + * OR + * 2. We could be handling a mbx task but nothing more is + * scheduled. + * In both cases, we should schedule mbx task as there are more + * mbx messages reported by this interrupt. + */ + hclge_mbx_task_schedule(hdev); + + default: + dev_dbg(&hdev->pdev->dev, + "received unknown or unhandled event of vector0\n"); + break; + } + + /* we should clear the source of interrupt */ + hclge_clear_event_cause(hdev, event_cause, clearval); + hclge_enable_vector(&hdev->misc_vector, true); return IRQ_HANDLED; } @@ -2404,9 +2620,9 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev) hclge_get_misc_vector(hdev); - ret = devm_request_irq(&hdev->pdev->dev, - hdev->misc_vector.vector_irq, - hclge_misc_irq_handle, 0, "hclge_misc", hdev); + /* this would be explicitly freed in the end */ + ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, + 0, "hclge_misc", hdev); if (ret) { hclge_free_vector(hdev, 0); dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", @@ -2416,6 +2632,12 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev) return ret; } +static void hclge_misc_irq_uninit(struct hclge_dev *hdev) +{ + free_irq(hdev->misc_vector.vector_irq, hdev); + hclge_free_vector(hdev, 0); +} + static int hclge_notify_client(struct hclge_dev *hdev, enum hnae3_reset_notify_type type) { @@ -2471,12 +2693,6 @@ static int hclge_reset_wait(struct hclge_dev *hdev) cnt++; } - /* must clear reset status register to - * prevent driver detect reset interrupt again - */ - reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG); - hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, reg); - if (cnt >= HCLGE_RESET_WAIT_CNT) { dev_warn(&hdev->pdev->dev, "Wait for reset timeout: %d\n", hdev->reset_type); @@ -2505,12 +2721,12 @@ static int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) return ret; } -static void hclge_do_reset(struct hclge_dev *hdev, enum hnae3_reset_type type) +static void hclge_do_reset(struct hclge_dev *hdev) { struct pci_dev *pdev = hdev->pdev; u32 val; - switch (type) { + switch (hdev->reset_type) { case HNAE3_GLOBAL_RESET: val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); @@ -2526,30 +2742,62 @@ static void hclge_do_reset(struct hclge_dev *hdev, enum hnae3_reset_type type) case HNAE3_FUNC_RESET: dev_info(&pdev->dev, "PF Reset requested\n"); hclge_func_reset_cmd(hdev, 0); + /* schedule again to check later */ + set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); + hclge_reset_task_schedule(hdev); break; default: dev_warn(&pdev->dev, - "Unsupported reset type: %d\n", type); + "Unsupported reset type: %d\n", hdev->reset_type); break; } } -static enum hnae3_reset_type hclge_detected_reset_event(struct hclge_dev *hdev) +static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev, + unsigned long *addr) { enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; - u32 rst_reg_val; - rst_reg_val = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG); - if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_reg_val) + /* return the highest priority reset level amongst all */ + if (test_bit(HNAE3_GLOBAL_RESET, addr)) rst_level = HNAE3_GLOBAL_RESET; - else if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_reg_val) + else if (test_bit(HNAE3_CORE_RESET, addr)) rst_level = HNAE3_CORE_RESET; - else if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_reg_val) + else if (test_bit(HNAE3_IMP_RESET, addr)) rst_level = HNAE3_IMP_RESET; + else if (test_bit(HNAE3_FUNC_RESET, addr)) + rst_level = HNAE3_FUNC_RESET; + + /* now, clear all other resets */ + clear_bit(HNAE3_GLOBAL_RESET, addr); + clear_bit(HNAE3_CORE_RESET, addr); + clear_bit(HNAE3_IMP_RESET, addr); + clear_bit(HNAE3_FUNC_RESET, addr); return rst_level; } +static void hclge_reset(struct hclge_dev *hdev) +{ + /* perform reset of the stack & ae device for a client */ + + hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + + if (!hclge_reset_wait(hdev)) { + rtnl_lock(); + hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); + hclge_reset_ae_dev(hdev->ae_dev); + hclge_notify_client(hdev, HNAE3_INIT_CLIENT); + rtnl_unlock(); + } else { + /* schedule again to check pending resets later */ + set_bit(hdev->reset_type, &hdev->reset_pending); + hclge_reset_task_schedule(hdev); + } + + hclge_notify_client(hdev, HNAE3_UP_CLIENT); +} + static void hclge_reset_event(struct hnae3_handle *handle, enum hnae3_reset_type reset) { @@ -2563,14 +2811,9 @@ static void hclge_reset_event(struct hnae3_handle *handle, case HNAE3_FUNC_RESET: case HNAE3_CORE_RESET: case HNAE3_GLOBAL_RESET: - if (test_bit(HCLGE_STATE_RESET_INT, &hdev->state)) { - dev_err(&hdev->pdev->dev, "Already in reset state"); - return; - } - hdev->reset_type = reset; - set_bit(HCLGE_STATE_RESET_INT, &hdev->state); - set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); - schedule_work(&hdev->service_task); + /* request reset & schedule reset task */ + set_bit(reset, &hdev->reset_request); + hclge_reset_task_schedule(hdev); break; default: dev_warn(&hdev->pdev->dev, "Unsupported reset event:%d", reset); @@ -2580,49 +2823,55 @@ static void hclge_reset_event(struct hnae3_handle *handle, static void hclge_reset_subtask(struct hclge_dev *hdev) { - bool do_reset; + /* check if there is any ongoing reset in the hardware. This status can + * be checked from reset_pending. If there is then, we need to wait for + * hardware to complete reset. + * a. If we are able to figure out in reasonable time that hardware + * has fully resetted then, we can proceed with driver, client + * reset. + * b. else, we can come back later to check this status so re-sched + * now. + */ + hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending); + if (hdev->reset_type != HNAE3_NONE_RESET) + hclge_reset(hdev); - do_reset = hdev->reset_type != HNAE3_NONE_RESET; + /* check if we got any *new* reset requests to be honored */ + hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request); + if (hdev->reset_type != HNAE3_NONE_RESET) + hclge_do_reset(hdev); - /* Reset is detected by interrupt */ - if (hdev->reset_type == HNAE3_NONE_RESET) - hdev->reset_type = hclge_detected_reset_event(hdev); + hdev->reset_type = HNAE3_NONE_RESET; +} - if (hdev->reset_type == HNAE3_NONE_RESET) +static void hclge_reset_service_task(struct work_struct *work) +{ + struct hclge_dev *hdev = + container_of(work, struct hclge_dev, rst_service_task); + + if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) return; - switch (hdev->reset_type) { - case HNAE3_FUNC_RESET: - case HNAE3_CORE_RESET: - case HNAE3_GLOBAL_RESET: - case HNAE3_IMP_RESET: - hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); - if (do_reset) - hclge_do_reset(hdev, hdev->reset_type); - else - set_bit(HCLGE_STATE_RESET_INT, &hdev->state); + hclge_reset_subtask(hdev); - if (!hclge_reset_wait(hdev)) { - hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); - hclge_reset_ae_dev(hdev->ae_dev); - hclge_notify_client(hdev, HNAE3_INIT_CLIENT); - clear_bit(HCLGE_STATE_RESET_INT, &hdev->state); - } - hclge_notify_client(hdev, HNAE3_UP_CLIENT); - break; - default: - dev_err(&hdev->pdev->dev, "Unsupported reset type:%d\n", - hdev->reset_type); - break; - } - hdev->reset_type = HNAE3_NONE_RESET; + clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); } -static void hclge_misc_irq_service_task(struct hclge_dev *hdev) +static void hclge_mailbox_service_task(struct work_struct *work) { - hclge_reset_subtask(hdev); - hclge_enable_vector(&hdev->misc_vector, true); + struct hclge_dev *hdev = + container_of(work, struct hclge_dev, mbx_service_task); + + if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) + return; + + clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); + + hclge_mbx_handler(hdev); + + clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); } static void hclge_service_task(struct work_struct *work) @@ -2630,10 +2879,20 @@ static void hclge_service_task(struct work_struct *work) struct hclge_dev *hdev = container_of(work, struct hclge_dev, service_task); - hclge_misc_irq_service_task(hdev); + /* The total rx/tx packets statstics are wanted to be updated + * per second. Both hclge_update_stats_for_all() and + * hclge_mac_get_traffic_stats() can do it. + */ + if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) { + hclge_update_stats_for_all(hdev); + hdev->hw_stats.stats_timer = 0; + } else { + hclge_mac_get_traffic_stats(hdev); + } + hclge_update_speed_duplex(hdev); hclge_update_link_status(hdev); - hclge_update_stats_for_all(hdev); + hclge_update_led_status(hdev); hclge_service_complete(hdev); } @@ -3174,49 +3433,53 @@ err: return ret; } -int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id, - struct hnae3_ring_chain_node *ring_chain) +int hclge_bind_ring_with_vector(struct hclge_vport *vport, + int vector_id, bool en, + struct hnae3_ring_chain_node *ring_chain) { struct hclge_dev *hdev = vport->back; - struct hclge_ctrl_vector_chain_cmd *req; struct hnae3_ring_chain_node *node; struct hclge_desc desc; - int ret; + struct hclge_ctrl_vector_chain_cmd *req + = (struct hclge_ctrl_vector_chain_cmd *)desc.data; + enum hclge_cmd_status status; + enum hclge_opcode_type op; + u16 tqp_type_and_id; int i; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ADD_RING_TO_VECTOR, false); - - req = (struct hclge_ctrl_vector_chain_cmd *)desc.data; + op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR; + hclge_cmd_setup_basic_desc(&desc, op, false); req->int_vector_id = vector_id; i = 0; for (node = ring_chain; node; node = node->next) { - u16 type_and_id = 0; - - hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S, + tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); + hnae_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, + HCLGE_INT_TYPE_S, hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); - hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S, - node->tqp_index); - hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M, + hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, + HCLGE_TQP_ID_S, node->tqp_index); + hnae_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M, HCLGE_INT_GL_IDX_S, - hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); - req->tqp_type_and_id[i] = cpu_to_le16(type_and_id); - req->vfid = vport->vport_id; - + hnae_get_field(node->int_gl_idx, + HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S)); + req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; + req->vfid = vport->vport_id; - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) { dev_err(&hdev->pdev->dev, "Map TQP fail, status is %d.\n", - ret); - return ret; + status); + return -EIO; } i = 0; hclge_cmd_setup_basic_desc(&desc, - HCLGE_OPC_ADD_RING_TO_VECTOR, + op, false); req->int_vector_id = vector_id; } @@ -3224,21 +3487,21 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id, if (i > 0) { req->int_cause_num = i; - - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + req->vfid = vport->vport_id; + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) { dev_err(&hdev->pdev->dev, - "Map TQP fail, status is %d.\n", ret); - return ret; + "Map TQP fail, status is %d.\n", status); + return -EIO; } } return 0; } -static int hclge_map_handle_ring_to_vector( - struct hnae3_handle *handle, int vector, - struct hnae3_ring_chain_node *ring_chain) +static int hclge_map_ring_to_vector(struct hnae3_handle *handle, + int vector, + struct hnae3_ring_chain_node *ring_chain) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; @@ -3247,24 +3510,20 @@ static int hclge_map_handle_ring_to_vector( vector_id = hclge_get_vector_index(hdev, vector); if (vector_id < 0) { dev_err(&hdev->pdev->dev, - "Get vector index fail. ret =%d\n", vector_id); + "Get vector index fail. vector_id =%d\n", vector_id); return vector_id; } - return hclge_map_vport_ring_to_vector(vport, vector_id, ring_chain); + return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain); } -static int hclge_unmap_ring_from_vector( - struct hnae3_handle *handle, int vector, - struct hnae3_ring_chain_node *ring_chain) +static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, + int vector, + struct hnae3_ring_chain_node *ring_chain) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - struct hclge_ctrl_vector_chain_cmd *req; - struct hnae3_ring_chain_node *node; - struct hclge_desc desc; - int i, vector_id; - int ret; + int vector_id, ret; vector_id = hclge_get_vector_index(hdev, vector); if (vector_id < 0) { @@ -3273,54 +3532,17 @@ static int hclge_unmap_ring_from_vector( return vector_id; } - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_DEL_RING_TO_VECTOR, false); - - req = (struct hclge_ctrl_vector_chain_cmd *)desc.data; - req->int_vector_id = vector_id; - - i = 0; - for (node = ring_chain; node; node = node->next) { - u16 type_and_id = 0; - - hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S, - hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); - hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S, - node->tqp_index); - hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M, - HCLGE_INT_GL_IDX_S, - hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); - - req->tqp_type_and_id[i] = cpu_to_le16(type_and_id); - req->vfid = vport->vport_id; - - if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { - req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; - - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "Unmap TQP fail, status is %d.\n", - ret); - return ret; - } - i = 0; - hclge_cmd_setup_basic_desc(&desc, - HCLGE_OPC_DEL_RING_TO_VECTOR, - false); - req->int_vector_id = vector_id; - } + ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain); + if (ret) { + dev_err(&handle->pdev->dev, + "Unmap ring from vector fail. vectorid=%d, ret =%d\n", + vector_id, + ret); + return ret; } - if (i > 0) { - req->int_cause_num = i; - - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "Unmap TQP fail, status is %d.\n", ret); - return ret; - } - } + /* Free this MSIX or MSI vector */ + hclge_free_vector(hdev, vector_id); return 0; } @@ -4077,6 +4299,91 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, return status; } +static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, + u16 cmdq_resp, u8 resp_code) +{ +#define HCLGE_ETHERTYPE_SUCCESS_ADD 0 +#define HCLGE_ETHERTYPE_ALREADY_ADD 1 +#define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2 +#define HCLGE_ETHERTYPE_KEY_CONFLICT 3 + + int return_status; + + if (cmdq_resp) { + dev_err(&hdev->pdev->dev, + "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n", + cmdq_resp); + return -EIO; + } + + switch (resp_code) { + case HCLGE_ETHERTYPE_SUCCESS_ADD: + case HCLGE_ETHERTYPE_ALREADY_ADD: + return_status = 0; + break; + case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW: + dev_err(&hdev->pdev->dev, + "add mac ethertype failed for manager table overflow.\n"); + return_status = -EIO; + break; + case HCLGE_ETHERTYPE_KEY_CONFLICT: + dev_err(&hdev->pdev->dev, + "add mac ethertype failed for key conflict.\n"); + return_status = -EIO; + break; + default: + dev_err(&hdev->pdev->dev, + "add mac ethertype failed for undefined, code=%d.\n", + resp_code); + return_status = -EIO; + } + + return return_status; +} + +static int hclge_add_mgr_tbl(struct hclge_dev *hdev, + const struct hclge_mac_mgr_tbl_entry_cmd *req) +{ + struct hclge_desc desc; + u8 resp_code; + u16 retval; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false); + memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd)); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "add mac ethertype failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + + resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; + retval = le16_to_cpu(desc.retval); + + return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code); +} + +static int init_mgr_tbl(struct hclge_dev *hdev) +{ + int ret; + int i; + + for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) { + ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]); + if (ret) { + dev_err(&hdev->pdev->dev, + "add mac ethertype failed, ret =%d.\n", + ret); + return ret; + } + } + + return 0; +} + static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) { struct hclge_vport *vport = hclge_get_vport(handle); @@ -4090,6 +4397,7 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p) const unsigned char *new_addr = (const unsigned char *)p; struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; + int ret; /* mac addr check */ if (is_zero_ether_addr(new_addr) || @@ -4101,14 +4409,39 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p) return -EINVAL; } - hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr); + ret = hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr); + if (ret) + dev_warn(&hdev->pdev->dev, + "remove old uc mac address fail, ret =%d.\n", + ret); - if (!hclge_add_uc_addr(handle, new_addr)) { - ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); - return 0; + ret = hclge_add_uc_addr(handle, new_addr); + if (ret) { + dev_err(&hdev->pdev->dev, + "add uc mac address fail, ret =%d.\n", + ret); + + ret = hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr); + if (ret) { + dev_err(&hdev->pdev->dev, + "restore uc mac address fail, ret =%d.\n", + ret); + } + + return -EIO; } - return -EIO; + ret = hclge_mac_pause_addr_cfg(hdev, new_addr); + if (ret) { + dev_err(&hdev->pdev->dev, + "configure mac pause address fail, ret =%d.\n", + ret); + return -EIO; + } + + ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); + + return 0; } static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, @@ -4134,6 +4467,17 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, return 0; } +#define HCLGE_FILTER_TYPE_VF 0 +#define HCLGE_FILTER_TYPE_PORT 1 + +static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable); +} + int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, bool is_kill, u16 vlan, u8 qos, __be16 proto) { @@ -4250,43 +4594,204 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto); } +static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) +{ + struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; + struct hclge_vport_vtag_tx_cfg_cmd *req; + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + int status; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false); + + req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; + req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); + req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); + hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG_B, + vcfg->accept_tag ? 1 : 0); + hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG_B, + vcfg->accept_untag ? 1 : 0); + hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, + vcfg->insert_tag1_en ? 1 : 0); + hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, + vcfg->insert_tag2_en ? 1 : 0); + hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); + + req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; + req->vf_bitmap[req->vf_offset] = + 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); + + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "Send port txvlan cfg command fail, ret =%d\n", + status); + + return status; +} + +static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) +{ + struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg; + struct hclge_vport_vtag_rx_cfg_cmd *req; + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + int status; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); + + req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; + hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, + vcfg->strip_tag1_en ? 1 : 0); + hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, + vcfg->strip_tag2_en ? 1 : 0); + hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, + vcfg->vlan1_vlan_prionly ? 1 : 0); + hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, + vcfg->vlan2_vlan_prionly ? 1 : 0); + + req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; + req->vf_bitmap[req->vf_offset] = + 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); + + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "Send port rxvlan cfg command fail, ret =%d\n", + status); + + return status; +} + +static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) +{ + struct hclge_rx_vlan_type_cfg_cmd *rx_req; + struct hclge_tx_vlan_type_cfg_cmd *tx_req; + struct hclge_desc desc; + int status; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false); + rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data; + rx_req->ot_fst_vlan_type = + cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); + rx_req->ot_sec_vlan_type = + cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); + rx_req->in_fst_vlan_type = + cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); + rx_req->in_sec_vlan_type = + cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); + + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) { + dev_err(&hdev->pdev->dev, + "Send rxvlan protocol type command fail, ret =%d\n", + status); + return status; + } + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false); + + tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data; + tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); + tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); + + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "Send txvlan protocol type command fail, ret =%d\n", + status); + + return status; +} + static int hclge_init_vlan_config(struct hclge_dev *hdev) { -#define HCLGE_VLAN_TYPE_VF_TABLE 0 -#define HCLGE_VLAN_TYPE_PORT_TABLE 1 +#define HCLGE_DEF_VLAN_TYPE 0x8100 + struct hnae3_handle *handle; + struct hclge_vport *vport; int ret; + int i; - ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE, - true); + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true); if (ret) return ret; - ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE, - true); + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true); if (ret) return ret; + hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; + hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; + hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; + hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; + hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE; + hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE; + + ret = hclge_set_vlan_protocol_type(hdev); + if (ret) + return ret; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + vport->txvlan_cfg.accept_tag = true; + vport->txvlan_cfg.accept_untag = true; + vport->txvlan_cfg.insert_tag1_en = false; + vport->txvlan_cfg.insert_tag2_en = false; + vport->txvlan_cfg.default_tag1 = 0; + vport->txvlan_cfg.default_tag2 = 0; + + ret = hclge_set_vlan_tx_offload_cfg(vport); + if (ret) + return ret; + + vport->rxvlan_cfg.strip_tag1_en = false; + vport->rxvlan_cfg.strip_tag2_en = true; + vport->rxvlan_cfg.vlan1_vlan_prionly = false; + vport->rxvlan_cfg.vlan2_vlan_prionly = false; + + ret = hclge_set_vlan_rx_offload_cfg(vport); + if (ret) + return ret; + } + handle = &hdev->vport[0].nic; return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); } +static int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + vport->rxvlan_cfg.strip_tag1_en = false; + vport->rxvlan_cfg.strip_tag2_en = enable; + vport->rxvlan_cfg.vlan1_vlan_prionly = false; + vport->rxvlan_cfg.vlan2_vlan_prionly = false; + + return hclge_set_vlan_rx_offload_cfg(vport); +} + static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_config_max_frm_size_cmd *req; struct hclge_dev *hdev = vport->back; struct hclge_desc desc; + int max_frm_size; int ret; - if ((new_mtu < HCLGE_MAC_MIN_MTU) || (new_mtu > HCLGE_MAC_MAX_MTU)) + max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + + if (max_frm_size < HCLGE_MAC_MIN_FRAME || + max_frm_size > HCLGE_MAC_MAX_FRAME) return -EINVAL; - hdev->mps = new_mtu; + max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME); + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); req = (struct hclge_config_max_frm_size_cmd *)desc.data; - req->max_frm_size = cpu_to_le16(new_mtu); + req->max_frm_size = cpu_to_le16(max_frm_size); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { @@ -4294,6 +4799,8 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) return ret; } + hdev->mps = max_frm_size; + return 0; } @@ -4341,7 +4848,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); } -static void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) +void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; @@ -4392,6 +4899,100 @@ static u32 hclge_get_fw_version(struct hnae3_handle *handle) return hdev->fw_version; } +static void hclge_get_flowctrl_adv(struct hnae3_handle *handle, + u32 *flowctrl_adv) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct phy_device *phydev = hdev->hw.mac.phydev; + + if (!phydev) + return; + + *flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) | + (phydev->advertising & ADVERTISED_Asym_Pause); +} + +static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) +{ + struct phy_device *phydev = hdev->hw.mac.phydev; + + if (!phydev) + return; + + phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); + + if (rx_en) + phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; + + if (tx_en) + phydev->advertising ^= ADVERTISED_Asym_Pause; +} + +static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) +{ + int ret; + + if (rx_en && tx_en) + hdev->fc_mode_last_time = HCLGE_FC_FULL; + else if (rx_en && !tx_en) + hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; + else if (!rx_en && tx_en) + hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; + else + hdev->fc_mode_last_time = HCLGE_FC_NONE; + + if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) + return 0; + + ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); + if (ret) { + dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n", + ret); + return ret; + } + + hdev->tm_info.fc_mode = hdev->fc_mode_last_time; + + return 0; +} + +int hclge_cfg_flowctrl(struct hclge_dev *hdev) +{ + struct phy_device *phydev = hdev->hw.mac.phydev; + u16 remote_advertising = 0; + u16 local_advertising = 0; + u32 rx_pause, tx_pause; + u8 flowctl; + + if (!phydev->link || !phydev->autoneg) + return 0; + + if (phydev->advertising & ADVERTISED_Pause) + local_advertising = ADVERTISE_PAUSE_CAP; + + if (phydev->advertising & ADVERTISED_Asym_Pause) + local_advertising |= ADVERTISE_PAUSE_ASYM; + + if (phydev->pause) + remote_advertising = LPA_PAUSE_CAP; + + if (phydev->asym_pause) + remote_advertising |= LPA_PAUSE_ASYM; + + flowctl = mii_resolve_flowctrl_fdx(local_advertising, + remote_advertising); + tx_pause = flowctl & FLOW_CTRL_TX; + rx_pause = flowctl & FLOW_CTRL_RX; + + if (phydev->duplex == HCLGE_MAC_HALF) { + tx_pause = 0; + rx_pause = 0; + } + + return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause); +} + static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, u32 *rx_en, u32 *tx_en) { @@ -4421,6 +5022,41 @@ static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, } } +static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, + u32 rx_en, u32 tx_en) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct phy_device *phydev = hdev->hw.mac.phydev; + u32 fc_autoneg; + + /* Only support flow control negotiation for netdev with + * phy attached for now. + */ + if (!phydev) + return -EOPNOTSUPP; + + fc_autoneg = hclge_get_autoneg(handle); + if (auto_neg != fc_autoneg) { + dev_info(&hdev->pdev->dev, + "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); + return -EOPNOTSUPP; + } + + if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { + dev_info(&hdev->pdev->dev, + "Priority flow control enabled. Cannot set link flow control.\n"); + return -EOPNOTSUPP; + } + + hclge_set_flowctrl_adv(hdev, rx_en, tx_en); + + if (!fc_autoneg) + return hclge_cfg_pauseparam(hdev, rx_en, tx_en); + + return phy_start_aneg(phydev); +} + static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, u8 *auto_neg, u32 *speed, u8 *duplex) { @@ -4661,6 +5297,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hdev->pdev = pdev; hdev->ae_dev = ae_dev; hdev->reset_type = HNAE3_NONE_RESET; + hdev->reset_request = 0; + hdev->reset_pending = 0; ae_dev->priv = hdev; ret = hclge_pci_init(hdev); @@ -4768,16 +5406,28 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } + ret = init_mgr_tbl(hdev); + if (ret) { + dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret); + return ret; + } + hclge_dcb_ops_set(hdev); timer_setup(&hdev->service_timer, hclge_service_timer, 0); INIT_WORK(&hdev->service_task, hclge_service_task); + INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task); + INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task); /* Enable MISC vector(vector0) */ hclge_enable_vector(&hdev->misc_vector, true); set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); set_bit(HCLGE_STATE_DOWN, &hdev->state); + clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); + clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); + clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); + clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); return 0; @@ -4889,25 +5539,471 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) del_timer_sync(&hdev->service_timer); if (hdev->service_task.func) cancel_work_sync(&hdev->service_task); + if (hdev->rst_service_task.func) + cancel_work_sync(&hdev->rst_service_task); + if (hdev->mbx_service_task.func) + cancel_work_sync(&hdev->mbx_service_task); if (mac->phydev) mdiobus_unregister(mac->mdio_bus); /* Disable MISC vector(vector0) */ hclge_enable_vector(&hdev->misc_vector, false); - hclge_free_vector(hdev, 0); hclge_destroy_cmd_queue(&hdev->hw); + hclge_misc_irq_uninit(hdev); hclge_pci_uninit(hdev); ae_dev->priv = NULL; } +static u32 hclge_get_max_channels(struct hnae3_handle *handle) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); +} + +static void hclge_get_channels(struct hnae3_handle *handle, + struct ethtool_channels *ch) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + ch->max_combined = hclge_get_max_channels(handle); + ch->other_count = 1; + ch->max_other = 1; + ch->combined_count = vport->alloc_tqps; +} + +static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, + u16 *free_tqps, u16 *max_rss_size) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u16 temp_tqps = 0; + int i; + + for (i = 0; i < hdev->num_tqps; i++) { + if (!hdev->htqp[i].alloced) + temp_tqps++; + } + *free_tqps = temp_tqps; + *max_rss_size = hdev->rss_size_max; +} + +static void hclge_release_tqp(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + int i; + + for (i = 0; i < kinfo->num_tqps; i++) { + struct hclge_tqp *tqp = + container_of(kinfo->tqp[i], struct hclge_tqp, q); + + tqp->q.handle = NULL; + tqp->q.tqp_index = 0; + tqp->alloced = false; + } + + devm_kfree(&hdev->pdev->dev, kinfo->tqp); + kinfo->tqp = NULL; +} + +static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + int cur_rss_size = kinfo->rss_size; + int cur_tqps = kinfo->num_tqps; + u16 tc_offset[HCLGE_MAX_TC_NUM]; + u16 tc_valid[HCLGE_MAX_TC_NUM]; + u16 tc_size[HCLGE_MAX_TC_NUM]; + u16 roundup_size; + u32 *rss_indir; + int ret, i; + + hclge_release_tqp(vport); + + ret = hclge_knic_setup(vport, new_tqps_num); + if (ret) { + dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret); + return ret; + } + + ret = hclge_map_tqp_to_vport(hdev, vport); + if (ret) { + dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret); + return ret; + } + + ret = hclge_tm_schd_init(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret); + return ret; + } + + roundup_size = roundup_pow_of_two(kinfo->rss_size); + roundup_size = ilog2(roundup_size); + /* Set the RSS TC mode according to the new RSS size */ + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + tc_valid[i] = 0; + + if (!(hdev->hw_tc_map & BIT(i))) + continue; + + tc_valid[i] = 1; + tc_size[i] = roundup_size; + tc_offset[i] = kinfo->rss_size * i; + } + ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); + if (ret) + return ret; + + /* Reinitializes the rss indirect table according to the new RSS size */ + rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); + if (!rss_indir) + return -ENOMEM; + + for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) + rss_indir[i] = i % kinfo->rss_size; + + ret = hclge_set_rss(handle, rss_indir, NULL, 0); + if (ret) + dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", + ret); + + kfree(rss_indir); + + if (!ret) + dev_info(&hdev->pdev->dev, + "Channels changed, rss_size from %d to %d, tqps from %d to %d", + cur_rss_size, kinfo->rss_size, + cur_tqps, kinfo->rss_size * kinfo->num_tc); + + return ret; +} + +static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit, + u32 *regs_num_64_bit) +{ + struct hclge_desc desc; + u32 total_num; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Query register number cmd failed, ret = %d.\n", ret); + return ret; + } + + *regs_num_32_bit = le32_to_cpu(desc.data[0]); + *regs_num_64_bit = le32_to_cpu(desc.data[1]); + + total_num = *regs_num_32_bit + *regs_num_64_bit; + if (!total_num) + return -EINVAL; + + return 0; +} + +static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num, + void *data) +{ +#define HCLGE_32_BIT_REG_RTN_DATANUM 8 + + struct hclge_desc *desc; + u32 *reg_val = data; + __le32 *desc_data; + int cmd_num; + int i, k, n; + int ret; + + if (regs_num == 0) + return 0; + + cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM); + desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true); + ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); + if (ret) { + dev_err(&hdev->pdev->dev, + "Query 32 bit register cmd failed, ret = %d.\n", ret); + kfree(desc); + return ret; + } + + for (i = 0; i < cmd_num; i++) { + if (i == 0) { + desc_data = (__le32 *)(&desc[i].data[0]); + n = HCLGE_32_BIT_REG_RTN_DATANUM - 2; + } else { + desc_data = (__le32 *)(&desc[i]); + n = HCLGE_32_BIT_REG_RTN_DATANUM; + } + for (k = 0; k < n; k++) { + *reg_val++ = le32_to_cpu(*desc_data++); + + regs_num--; + if (!regs_num) + break; + } + } + + kfree(desc); + return 0; +} + +static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, + void *data) +{ +#define HCLGE_64_BIT_REG_RTN_DATANUM 4 + + struct hclge_desc *desc; + u64 *reg_val = data; + __le64 *desc_data; + int cmd_num; + int i, k, n; + int ret; + + if (regs_num == 0) + return 0; + + cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM); + desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true); + ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); + if (ret) { + dev_err(&hdev->pdev->dev, + "Query 64 bit register cmd failed, ret = %d.\n", ret); + kfree(desc); + return ret; + } + + for (i = 0; i < cmd_num; i++) { + if (i == 0) { + desc_data = (__le64 *)(&desc[i].data[0]); + n = HCLGE_64_BIT_REG_RTN_DATANUM - 1; + } else { + desc_data = (__le64 *)(&desc[i]); + n = HCLGE_64_BIT_REG_RTN_DATANUM; + } + for (k = 0; k < n; k++) { + *reg_val++ = le64_to_cpu(*desc_data++); + + regs_num--; + if (!regs_num) + break; + } + } + + kfree(desc); + return 0; +} + +static int hclge_get_regs_len(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u32 regs_num_32_bit, regs_num_64_bit; + int ret; + + ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get register number failed, ret = %d.\n", ret); + return -EOPNOTSUPP; + } + + return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64); +} + +static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, + void *data) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u32 regs_num_32_bit, regs_num_64_bit; + int ret; + + *version = hdev->fw_version; + + ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get register number failed, ret = %d.\n", ret); + return; + } + + ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get 32 bit register failed, ret = %d.\n", ret); + return; + } + + data = (u32 *)data + regs_num_32_bit; + ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, + data); + if (ret) + dev_err(&hdev->pdev->dev, + "Get 64 bit register failed, ret = %d.\n", ret); +} + +static int hclge_set_led_status_sfp(struct hclge_dev *hdev, u8 speed_led_status, + u8 act_led_status, u8 link_led_status, + u8 locate_led_status) +{ + struct hclge_set_led_state_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false); + + req = (struct hclge_set_led_state_cmd *)desc.data; + hnae_set_field(req->port_speed_led_config, HCLGE_LED_PORT_SPEED_STATE_M, + HCLGE_LED_PORT_SPEED_STATE_S, speed_led_status); + hnae_set_field(req->link_led_config, HCLGE_LED_ACTIVITY_STATE_M, + HCLGE_LED_ACTIVITY_STATE_S, act_led_status); + hnae_set_field(req->activity_led_config, HCLGE_LED_LINK_STATE_M, + HCLGE_LED_LINK_STATE_S, link_led_status); + hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, + HCLGE_LED_LOCATE_STATE_S, locate_led_status); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "Send set led state cmd error, ret =%d\n", ret); + + return ret; +} + +enum hclge_led_status { + HCLGE_LED_OFF, + HCLGE_LED_ON, + HCLGE_LED_NO_CHANGE = 0xFF, +}; + +static int hclge_set_led_id(struct hnae3_handle *handle, + enum ethtool_phys_id_state status) +{ +#define BLINK_FREQUENCY 2 + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct phy_device *phydev = hdev->hw.mac.phydev; + int ret = 0; + + if (phydev || hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) + return -EOPNOTSUPP; + + switch (status) { + case ETHTOOL_ID_ACTIVE: + ret = hclge_set_led_status_sfp(hdev, + HCLGE_LED_NO_CHANGE, + HCLGE_LED_NO_CHANGE, + HCLGE_LED_NO_CHANGE, + HCLGE_LED_ON); + break; + case ETHTOOL_ID_INACTIVE: + ret = hclge_set_led_status_sfp(hdev, + HCLGE_LED_NO_CHANGE, + HCLGE_LED_NO_CHANGE, + HCLGE_LED_NO_CHANGE, + HCLGE_LED_OFF); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +enum hclge_led_port_speed { + HCLGE_SPEED_LED_FOR_1G, + HCLGE_SPEED_LED_FOR_10G, + HCLGE_SPEED_LED_FOR_25G, + HCLGE_SPEED_LED_FOR_40G, + HCLGE_SPEED_LED_FOR_50G, + HCLGE_SPEED_LED_FOR_100G, +}; + +static u8 hclge_led_get_speed_status(u32 speed) +{ + u8 speed_led; + + switch (speed) { + case HCLGE_MAC_SPEED_1G: + speed_led = HCLGE_SPEED_LED_FOR_1G; + break; + case HCLGE_MAC_SPEED_10G: + speed_led = HCLGE_SPEED_LED_FOR_10G; + break; + case HCLGE_MAC_SPEED_25G: + speed_led = HCLGE_SPEED_LED_FOR_25G; + break; + case HCLGE_MAC_SPEED_40G: + speed_led = HCLGE_SPEED_LED_FOR_40G; + break; + case HCLGE_MAC_SPEED_50G: + speed_led = HCLGE_SPEED_LED_FOR_50G; + break; + case HCLGE_MAC_SPEED_100G: + speed_led = HCLGE_SPEED_LED_FOR_100G; + break; + default: + speed_led = HCLGE_LED_NO_CHANGE; + } + + return speed_led; +} + +static int hclge_update_led_status(struct hclge_dev *hdev) +{ + u8 port_speed_status, link_status, activity_status; + u64 rx_pkts, tx_pkts; + + if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) + return 0; + + port_speed_status = hclge_led_get_speed_status(hdev->hw.mac.speed); + + rx_pkts = hdev->hw_stats.mac_stats.mac_rx_total_pkt_num; + tx_pkts = hdev->hw_stats.mac_stats.mac_tx_total_pkt_num; + if (rx_pkts != hdev->rx_pkts_for_led || + tx_pkts != hdev->tx_pkts_for_led) + activity_status = HCLGE_LED_ON; + else + activity_status = HCLGE_LED_OFF; + hdev->rx_pkts_for_led = rx_pkts; + hdev->tx_pkts_for_led = tx_pkts; + + if (hdev->hw.mac.link) + link_status = HCLGE_LED_ON; + else + link_status = HCLGE_LED_OFF; + + return hclge_set_led_status_sfp(hdev, port_speed_status, + activity_status, link_status, + HCLGE_LED_NO_CHANGE); +} + static const struct hnae3_ae_ops hclge_ops = { .init_ae_dev = hclge_init_ae_dev, .uninit_ae_dev = hclge_uninit_ae_dev, .init_client_instance = hclge_init_client_instance, .uninit_client_instance = hclge_uninit_client_instance, - .map_ring_to_vector = hclge_map_handle_ring_to_vector, - .unmap_ring_from_vector = hclge_unmap_ring_from_vector, + .map_ring_to_vector = hclge_map_ring_to_vector, + .unmap_ring_from_vector = hclge_unmap_ring_frm_vector, .get_vector = hclge_get_vector, .set_promisc_mode = hclge_set_promisc_mode, .set_loopback = hclge_set_loopback, @@ -4934,6 +6030,7 @@ static const struct hnae3_ae_ops hclge_ops = { .set_autoneg = hclge_set_autoneg, .get_autoneg = hclge_get_autoneg, .get_pauseparam = hclge_get_pauseparam, + .set_pauseparam = hclge_set_pauseparam, .set_mtu = hclge_set_mtu, .reset_queue = hclge_reset_tqp, .get_stats = hclge_get_stats, @@ -4942,9 +6039,18 @@ static const struct hnae3_ae_ops hclge_ops = { .get_sset_count = hclge_get_sset_count, .get_fw_version = hclge_get_fw_version, .get_mdix_mode = hclge_get_mdix_mode, + .enable_vlan_filter = hclge_enable_vlan_filter, .set_vlan_filter = hclge_set_port_vlan_filter, .set_vf_vlan_filter = hclge_set_vf_vlan_filter, + .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, .reset_event = hclge_reset_event, + .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, + .set_channels = hclge_set_channels, + .get_channels = hclge_get_channels, + .get_flowctrl_adv = hclge_get_flowctrl_adv, + .get_regs_len = hclge_get_regs_len, + .get_regs = hclge_get_regs, + .set_led_id = hclge_set_led_id, }; static struct hnae3_ae_algo ae_algo = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 7027814ea5d7..d99a76a9557c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -79,6 +79,10 @@ #define HCLGE_PHY_MDIX_STATUS_B (6) #define HCLGE_PHY_SPEED_DUP_RESOLVE_B (11) +/* Factor used to calculate offset and bitmap of VF num */ +#define HCLGE_VF_NUM_PER_CMD 64 +#define HCLGE_VF_NUM_PER_BYTE 8 + /* Reset related Registers */ #define HCLGE_MISC_RESET_STS_REG 0x20700 #define HCLGE_GLOBAL_RESET_REG 0x20A00 @@ -92,6 +96,16 @@ #define HCLGE_VECTOR0_CORERESET_INT_B 6 #define HCLGE_VECTOR0_IMPRESET_INT_B 7 +/* Vector0 interrupt CMDQ event source register(RW) */ +#define HCLGE_VECTOR0_CMDQ_SRC_REG 0x27100 +/* CMDQ register bits for RX event(=MBX event) */ +#define HCLGE_VECTOR0_RX_CMDQ_INT_B 1 + +#define HCLGE_MAC_DEFAULT_FRAME \ + (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN) +#define HCLGE_MAC_MIN_FRAME 64 +#define HCLGE_MAC_MAX_FRAME 9728 + enum HCLGE_DEV_STATE { HCLGE_STATE_REINITING, HCLGE_STATE_DOWN, @@ -99,12 +113,20 @@ enum HCLGE_DEV_STATE { HCLGE_STATE_REMOVING, HCLGE_STATE_SERVICE_INITED, HCLGE_STATE_SERVICE_SCHED, + HCLGE_STATE_RST_SERVICE_SCHED, + HCLGE_STATE_RST_HANDLING, + HCLGE_STATE_MBX_SERVICE_SCHED, HCLGE_STATE_MBX_HANDLING, - HCLGE_STATE_MBX_IRQ, - HCLGE_STATE_RESET_INT, + HCLGE_STATE_STATISTICS_UPDATING, HCLGE_STATE_MAX }; +enum hclge_evt_cause { + HCLGE_VECTOR0_EVENT_RST, + HCLGE_VECTOR0_EVENT_MBX, + HCLGE_VECTOR0_EVENT_OTHER, +}; + #define HCLGE_MPF_ENBALE 1 struct hclge_caps { u16 num_tqp; @@ -208,6 +230,7 @@ struct hclge_cfg { u8 tc_num; u16 tqp_desc_num; u16 rx_buf_len; + u16 rss_size_max; u8 phy_addr; u8 media_type; u8 mac_addr[ETH_ALEN]; @@ -364,14 +387,23 @@ struct hclge_mac_stats { u64 mac_tx_multi_pkt_num; u64 mac_tx_broad_pkt_num; u64 mac_tx_undersize_pkt_num; - u64 mac_tx_overrsize_pkt_num; + u64 mac_tx_oversize_pkt_num; u64 mac_tx_64_oct_pkt_num; u64 mac_tx_65_127_oct_pkt_num; u64 mac_tx_128_255_oct_pkt_num; u64 mac_tx_256_511_oct_pkt_num; u64 mac_tx_512_1023_oct_pkt_num; u64 mac_tx_1024_1518_oct_pkt_num; - u64 mac_tx_1519_max_oct_pkt_num; + u64 mac_tx_1519_2047_oct_pkt_num; + u64 mac_tx_2048_4095_oct_pkt_num; + u64 mac_tx_4096_8191_oct_pkt_num; + u64 mac_tx_8192_12287_oct_pkt_num; /* valid for GE MAC only */ + u64 mac_tx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */ + u64 mac_tx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC */ + u64 mac_tx_12288_16383_oct_pkt_num; + u64 mac_tx_1519_max_good_oct_pkt_num; + u64 mac_tx_1519_max_bad_oct_pkt_num; + u64 mac_rx_total_pkt_num; u64 mac_rx_total_oct_num; u64 mac_rx_good_pkt_num; @@ -382,33 +414,52 @@ struct hclge_mac_stats { u64 mac_rx_multi_pkt_num; u64 mac_rx_broad_pkt_num; u64 mac_rx_undersize_pkt_num; - u64 mac_rx_overrsize_pkt_num; + u64 mac_rx_oversize_pkt_num; u64 mac_rx_64_oct_pkt_num; u64 mac_rx_65_127_oct_pkt_num; u64 mac_rx_128_255_oct_pkt_num; u64 mac_rx_256_511_oct_pkt_num; u64 mac_rx_512_1023_oct_pkt_num; u64 mac_rx_1024_1518_oct_pkt_num; - u64 mac_rx_1519_max_oct_pkt_num; - - u64 mac_trans_fragment_pkt_num; - u64 mac_trans_undermin_pkt_num; - u64 mac_trans_jabber_pkt_num; - u64 mac_trans_err_all_pkt_num; - u64 mac_trans_from_app_good_pkt_num; - u64 mac_trans_from_app_bad_pkt_num; - u64 mac_rcv_fragment_pkt_num; - u64 mac_rcv_undermin_pkt_num; - u64 mac_rcv_jabber_pkt_num; - u64 mac_rcv_fcs_err_pkt_num; - u64 mac_rcv_send_app_good_pkt_num; - u64 mac_rcv_send_app_bad_pkt_num; + u64 mac_rx_1519_2047_oct_pkt_num; + u64 mac_rx_2048_4095_oct_pkt_num; + u64 mac_rx_4096_8191_oct_pkt_num; + u64 mac_rx_8192_12287_oct_pkt_num;/* valid for GE MAC only */ + u64 mac_rx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */ + u64 mac_rx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC only */ + u64 mac_rx_12288_16383_oct_pkt_num; + u64 mac_rx_1519_max_good_oct_pkt_num; + u64 mac_rx_1519_max_bad_oct_pkt_num; + + u64 mac_tx_fragment_pkt_num; + u64 mac_tx_undermin_pkt_num; + u64 mac_tx_jabber_pkt_num; + u64 mac_tx_err_all_pkt_num; + u64 mac_tx_from_app_good_pkt_num; + u64 mac_tx_from_app_bad_pkt_num; + u64 mac_rx_fragment_pkt_num; + u64 mac_rx_undermin_pkt_num; + u64 mac_rx_jabber_pkt_num; + u64 mac_rx_fcs_err_pkt_num; + u64 mac_rx_send_app_good_pkt_num; + u64 mac_rx_send_app_bad_pkt_num; }; +#define HCLGE_STATS_TIMER_INTERVAL (60 * 5) struct hclge_hw_stats { struct hclge_mac_stats mac_stats; struct hclge_64_bit_stats all_64_bit_stats; struct hclge_32_bit_stats all_32_bit_stats; + u32 stats_timer; +}; + +struct hclge_vlan_type_cfg { + u16 rx_ot_fst_vlan_type; + u16 rx_ot_sec_vlan_type; + u16 rx_in_fst_vlan_type; + u16 rx_in_sec_vlan_type; + u16 tx_ot_vlan_type; + u16 tx_in_vlan_type; }; struct hclge_dev { @@ -420,6 +471,8 @@ struct hclge_dev { unsigned long state; enum hnae3_reset_type reset_type; + unsigned long reset_request; /* reset has been requested */ + unsigned long reset_pending; /* client rst is pending to be served */ u32 fw_version; u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */ u16 num_tqps; /* Num task queue pairs of this PF */ @@ -469,6 +522,8 @@ struct hclge_dev { unsigned long service_timer_previous; struct timer_list service_timer; struct work_struct service_task; + struct work_struct rst_service_task; + struct work_struct mbx_service_task; bool cur_promisc; int num_alloc_vfs; /* Actual number of VFs allocated */ @@ -493,6 +548,29 @@ struct hclge_dev { enum hclge_mta_dmac_sel_type mta_mac_sel_type; bool enable_mta; /* Mutilcast filter enable */ bool accept_mta_mc; /* Whether accept mta filter multicast */ + + struct hclge_vlan_type_cfg vlan_type_cfg; + + u64 rx_pkts_for_led; + u64 tx_pkts_for_led; +}; + +/* VPort level vlan tag configuration for TX direction */ +struct hclge_tx_vtag_cfg { + bool accept_tag; /* Whether accept tagged packet from host */ + bool accept_untag; /* Whether accept untagged packet from host */ + bool insert_tag1_en; /* Whether insert inner vlan tag */ + bool insert_tag2_en; /* Whether insert outer vlan tag */ + u16 default_tag1; /* The default inner vlan tag to insert */ + u16 default_tag2; /* The default outer vlan tag to insert */ +}; + +/* VPort level vlan tag configuration for RX direction */ +struct hclge_rx_vtag_cfg { + bool strip_tag1_en; /* Whether strip inner vlan tag */ + bool strip_tag2_en; /* Whether strip outer vlan tag */ + bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */ + bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */ }; struct hclge_vport { @@ -507,6 +585,9 @@ struct hclge_vport { u16 bw_limit; /* VSI BW Limit (0 = disabled) */ u8 dwrr; + struct hclge_tx_vtag_cfg txvlan_cfg; + struct hclge_rx_vtag_cfg rxvlan_cfg; + int vport_id; struct hclge_dev *back; /* Back reference to associated dev */ struct hnae3_handle nic; @@ -529,8 +610,10 @@ int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, u8 func_id, bool enable); struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle); -int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector, - struct hnae3_ring_chain_node *ring_chain); +int hclge_bind_ring_with_vector(struct hclge_vport *vport, + int vector_id, bool en, + struct hnae3_ring_chain_node *ring_chain); + static inline int hclge_get_queue_id(struct hnae3_queue *queue) { struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q); @@ -544,4 +627,8 @@ int hclge_set_vf_vlan_common(struct hclge_dev *vport, int vfid, int hclge_buffer_alloc(struct hclge_dev *hdev); int hclge_rss_init_hw(struct hclge_dev *hdev); + +void hclge_mbx_handler(struct hclge_dev *hdev); +void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id); +int hclge_cfg_flowctrl(struct hclge_dev *hdev); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c new file mode 100644 index 000000000000..f38fc5ce9f51 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -0,0 +1,418 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include "hclge_main.h" +#include "hclge_mbx.h" +#include "hnae3.h" + +/* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF + * receives a mailbox message from VF. + * @vport: pointer to struct hclge_vport + * @vf_to_pf_req: pointer to hclge_mbx_vf_to_pf_cmd of the original mailbox + * message + * @resp_status: indicate to VF whether its request success(0) or failed. + */ +static int hclge_gen_resp_to_vf(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *vf_to_pf_req, + int resp_status, + u8 *resp_data, u16 resp_data_len) +{ + struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf; + struct hclge_dev *hdev = vport->back; + enum hclge_cmd_status status; + struct hclge_desc desc; + + resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data; + + if (resp_data_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) { + dev_err(&hdev->pdev->dev, + "PF fail to gen resp to VF len %d exceeds max len %d\n", + resp_data_len, + HCLGE_MBX_MAX_RESP_DATA_SIZE); + } + + hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false); + + resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid; + resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len; + + resp_pf_to_vf->msg[0] = HCLGE_MBX_PF_VF_RESP; + resp_pf_to_vf->msg[1] = vf_to_pf_req->msg[0]; + resp_pf_to_vf->msg[2] = vf_to_pf_req->msg[1]; + resp_pf_to_vf->msg[3] = (resp_status == 0) ? 0 : 1; + + if (resp_data && resp_data_len > 0) + memcpy(&resp_pf_to_vf->msg[4], resp_data, resp_data_len); + + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "PF failed(=%d) to send response to VF\n", status); + + return status; +} + +static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, + u16 mbx_opcode, u8 dest_vfid) +{ + struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf; + struct hclge_dev *hdev = vport->back; + enum hclge_cmd_status status; + struct hclge_desc desc; + + resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false); + + resp_pf_to_vf->dest_vfid = dest_vfid; + resp_pf_to_vf->msg_len = msg_len; + resp_pf_to_vf->msg[0] = mbx_opcode; + + memcpy(&resp_pf_to_vf->msg[1], msg, msg_len); + + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "PF failed(=%d) to send mailbox message to VF\n", + status); + + return status; +} + +static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head) +{ + struct hnae3_ring_chain_node *chain_tmp, *chain; + + chain = head->next; + + while (chain) { + chain_tmp = chain->next; + kzfree(chain); + chain = chain_tmp; + } +} + +/* hclge_get_ring_chain_from_mbx: get ring type & tqpid from mailbox message + * msg[0]: opcode + * msg[1]: <not relevant to this function> + * msg[2]: ring_num + * msg[3]: first ring type (TX|RX) + * msg[4]: first tqp id + * msg[5] ~ msg[14]: other ring type and tqp id + */ +static int hclge_get_ring_chain_from_mbx( + struct hclge_mbx_vf_to_pf_cmd *req, + struct hnae3_ring_chain_node *ring_chain, + struct hclge_vport *vport) +{ +#define HCLGE_RING_NODE_VARIABLE_NUM 3 +#define HCLGE_RING_MAP_MBX_BASIC_MSG_NUM 3 + struct hnae3_ring_chain_node *cur_chain, *new_chain; + int ring_num; + int i; + + ring_num = req->msg[2]; + + hnae_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]); + ring_chain->tqp_index = + hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]); + hnae_set_field(ring_chain->int_gl_idx, HCLGE_INT_GL_IDX_M, + HCLGE_INT_GL_IDX_S, + req->msg[5]); + + cur_chain = ring_chain; + + for (i = 1; i < ring_num; i++) { + new_chain = kzalloc(sizeof(*new_chain), GFP_KERNEL); + if (!new_chain) + goto err; + + hnae_set_bit(new_chain->flag, HNAE3_RING_TYPE_B, + req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i + + HCLGE_RING_MAP_MBX_BASIC_MSG_NUM]); + + new_chain->tqp_index = + hclge_get_queue_id(vport->nic.kinfo.tqp + [req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i + + HCLGE_RING_MAP_MBX_BASIC_MSG_NUM + 1]]); + + hnae_set_field(new_chain->int_gl_idx, HCLGE_INT_GL_IDX_M, + HCLGE_INT_GL_IDX_S, + req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i + + HCLGE_RING_MAP_MBX_BASIC_MSG_NUM + 2]); + + cur_chain->next = new_chain; + cur_chain = new_chain; + } + + return 0; +err: + hclge_free_vector_ring_chain(ring_chain); + return -ENOMEM; +} + +static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en, + struct hclge_mbx_vf_to_pf_cmd *req) +{ + struct hnae3_ring_chain_node ring_chain; + int vector_id = req->msg[1]; + int ret; + + memset(&ring_chain, 0, sizeof(ring_chain)); + ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport); + if (ret) + return ret; + + ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain); + if (ret) + return ret; + + hclge_free_vector_ring_chain(&ring_chain); + + return 0; +} + +static int hclge_set_vf_promisc_mode(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *req) +{ + bool en = req->msg[1] ? true : false; + struct hclge_promisc_param param; + + /* always enable broadcast promisc bit */ + hclge_promisc_param_init(¶m, en, en, true, vport->vport_id); + return hclge_cmd_set_promisc_mode(vport->back, ¶m); +} + +static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + bool gen_resp) +{ + const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]); + struct hclge_dev *hdev = vport->back; + int status; + + if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_MODIFY) { + const u8 *old_addr = (const u8 *)(&mbx_req->msg[8]); + + hclge_rm_uc_addr_common(vport, old_addr); + status = hclge_add_uc_addr_common(vport, mac_addr); + } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_ADD) { + status = hclge_add_uc_addr_common(vport, mac_addr); + } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_REMOVE) { + status = hclge_rm_uc_addr_common(vport, mac_addr); + } else { + dev_err(&hdev->pdev->dev, + "failed to set unicast mac addr, unknown subcode %d\n", + mbx_req->msg[1]); + return -EIO; + } + + if (gen_resp) + hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0); + + return 0; +} + +static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + bool gen_resp) +{ + const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]); + struct hclge_dev *hdev = vport->back; + int status; + + if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) { + status = hclge_add_mc_addr_common(vport, mac_addr); + } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) { + status = hclge_rm_mc_addr_common(vport, mac_addr); + } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE) { + u8 func_id = vport->vport_id; + bool enable = mbx_req->msg[2]; + + status = hclge_cfg_func_mta_filter(hdev, func_id, enable); + } else { + dev_err(&hdev->pdev->dev, + "failed to set mcast mac addr, unknown subcode %d\n", + mbx_req->msg[1]); + return -EIO; + } + + if (gen_resp) + hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0); + + return 0; +} + +static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + bool gen_resp) +{ + struct hclge_dev *hdev = vport->back; + int status = 0; + + if (mbx_req->msg[1] == HCLGE_MBX_VLAN_FILTER) { + u16 vlan, proto; + bool is_kill; + + is_kill = !!mbx_req->msg[2]; + memcpy(&vlan, &mbx_req->msg[3], sizeof(vlan)); + memcpy(&proto, &mbx_req->msg[5], sizeof(proto)); + status = hclge_set_vf_vlan_common(hdev, vport->vport_id, + is_kill, vlan, 0, + cpu_to_be16(proto)); + } + + if (gen_resp) + status = hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0); + + return status; +} + +static int hclge_get_vf_tcinfo(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + bool gen_resp) +{ + struct hclge_dev *hdev = vport->back; + int ret; + + ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &hdev->hw_tc_map, + sizeof(u8)); + + return ret; +} + +static int hclge_get_vf_queue_info(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + bool gen_resp) +{ +#define HCLGE_TQPS_RSS_INFO_LEN 8 + u8 resp_data[HCLGE_TQPS_RSS_INFO_LEN]; + struct hclge_dev *hdev = vport->back; + + /* get the queue related info */ + memcpy(&resp_data[0], &vport->alloc_tqps, sizeof(u16)); + memcpy(&resp_data[2], &hdev->rss_size_max, sizeof(u16)); + memcpy(&resp_data[4], &hdev->num_desc, sizeof(u16)); + memcpy(&resp_data[6], &hdev->rx_buf_len, sizeof(u16)); + + return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, + HCLGE_TQPS_RSS_INFO_LEN); +} + +static int hclge_get_link_info(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ + struct hclge_dev *hdev = vport->back; + u16 link_status; + u8 msg_data[2]; + u8 dest_vfid; + + /* mac.link can only be 0 or 1 */ + link_status = (u16)hdev->hw.mac.link; + memcpy(&msg_data[0], &link_status, sizeof(u16)); + dest_vfid = mbx_req->mbx_src_vfid; + + /* send this requested info to VF */ + return hclge_send_mbx_msg(vport, msg_data, sizeof(u8), + HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid); +} + +static void hclge_reset_vf_queue(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ + u16 queue_id; + + memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id)); + + hclge_reset_tqp(&vport->nic, queue_id); +} + +void hclge_mbx_handler(struct hclge_dev *hdev) +{ + struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq; + struct hclge_mbx_vf_to_pf_cmd *req; + struct hclge_vport *vport; + struct hclge_desc *desc; + int ret; + + /* handle all the mailbox requests in the queue */ + while (hnae_get_bit(crq->desc[crq->next_to_use].flag, + HCLGE_CMDQ_RX_OUTVLD_B)) { + desc = &crq->desc[crq->next_to_use]; + req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data; + + vport = &hdev->vport[req->mbx_src_vfid]; + + switch (req->msg[0]) { + case HCLGE_MBX_MAP_RING_TO_VECTOR: + ret = hclge_map_unmap_ring_to_vf_vector(vport, true, + req); + break; + case HCLGE_MBX_UNMAP_RING_TO_VECTOR: + ret = hclge_map_unmap_ring_to_vf_vector(vport, false, + req); + break; + case HCLGE_MBX_SET_PROMISC_MODE: + ret = hclge_set_vf_promisc_mode(vport, req); + if (ret) + dev_err(&hdev->pdev->dev, + "PF fail(%d) to set VF promisc mode\n", + ret); + break; + case HCLGE_MBX_SET_UNICAST: + ret = hclge_set_vf_uc_mac_addr(vport, req, false); + if (ret) + dev_err(&hdev->pdev->dev, + "PF fail(%d) to set VF UC MAC Addr\n", + ret); + break; + case HCLGE_MBX_SET_MULTICAST: + ret = hclge_set_vf_mc_mac_addr(vport, req, false); + if (ret) + dev_err(&hdev->pdev->dev, + "PF fail(%d) to set VF MC MAC Addr\n", + ret); + break; + case HCLGE_MBX_SET_VLAN: + ret = hclge_set_vf_vlan_cfg(vport, req, false); + if (ret) + dev_err(&hdev->pdev->dev, + "PF failed(%d) to config VF's VLAN\n", + ret); + break; + case HCLGE_MBX_GET_QINFO: + ret = hclge_get_vf_queue_info(vport, req, true); + if (ret) + dev_err(&hdev->pdev->dev, + "PF failed(%d) to get Q info for VF\n", + ret); + break; + case HCLGE_MBX_GET_TCINFO: + ret = hclge_get_vf_tcinfo(vport, req, true); + if (ret) + dev_err(&hdev->pdev->dev, + "PF failed(%d) to get TC info for VF\n", + ret); + break; + case HCLGE_MBX_GET_LINK_STATUS: + ret = hclge_get_link_info(vport, req); + if (ret) + dev_err(&hdev->pdev->dev, + "PF fail(%d) to get link stat for VF\n", + ret); + break; + case HCLGE_MBX_QUEUE_RESET: + hclge_reset_vf_queue(vport, req); + break; + default: + dev_err(&hdev->pdev->dev, + "un-supported mailbox message, code = %d\n", + req->msg[0]); + break; + } + hclge_mbx_ring_ptr_move_crq(crq); + } + + /* Write back CMDQ_RQ header pointer, M7 need this pointer */ + hclge_write_dev(&hdev->hw, HCLGE_NIC_CRQ_HEAD_REG, crq->next_to_use); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 7069e9408d7d..c1dea3a47bdd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -17,6 +17,7 @@ #define HCLGE_PHY_SUPPORTED_FEATURES (SUPPORTED_Autoneg | \ SUPPORTED_TP | \ SUPPORTED_Pause | \ + SUPPORTED_Asym_Pause | \ PHY_10BT_FEATURES | \ PHY_100BT_FEATURES | \ PHY_1000BT_FEATURES) @@ -183,6 +184,10 @@ static void hclge_mac_adjust_link(struct net_device *netdev) ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); if (ret) netdev_err(netdev, "failed to adjust link.\n"); + + ret = hclge_cfg_flowctrl(hdev); + if (ret) + netdev_err(netdev, "failed to configure flow control.\n"); } int hclge_mac_start_phy(struct hclge_dev *hdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 7bfa2e5497cb..36bd79a77940 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -23,8 +23,8 @@ enum hclge_shaper_level { HCLGE_SHAPER_LVL_PF = 1, }; -#define HCLGE_SHAPER_BS_U_DEF 1 -#define HCLGE_SHAPER_BS_S_DEF 4 +#define HCLGE_SHAPER_BS_U_DEF 5 +#define HCLGE_SHAPER_BS_S_DEF 20 #define HCLGE_ETHER_MAX_RATE 100000 @@ -112,7 +112,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, return 0; } -static int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx) +int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx) { struct hclge_desc desc; @@ -138,6 +138,46 @@ static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, return hclge_cmd_send(&hdev->hw, &desc, 1); } +static int hclge_mac_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr, + u8 pause_trans_gap, u16 pause_trans_time) +{ + struct hclge_cfg_pause_param_cmd *pause_param; + struct hclge_desc desc; + + pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false); + + ether_addr_copy(pause_param->mac_addr, addr); + pause_param->pause_trans_gap = pause_trans_gap; + pause_param->pause_trans_time = cpu_to_le16(pause_trans_time); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +int hclge_mac_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr) +{ + struct hclge_cfg_pause_param_cmd *pause_param; + struct hclge_desc desc; + u16 trans_time; + u8 trans_gap; + int ret; + + pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + return ret; + + trans_gap = pause_param->pause_trans_gap; + trans_time = le16_to_cpu(pause_param->pause_trans_time); + + return hclge_mac_pause_param_cfg(hdev, mac_addr, trans_gap, + trans_time); +} + static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id) { u8 tc; @@ -1056,6 +1096,15 @@ static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev) return hclge_tm_schd_mode_hw(hdev); } +static int hclge_mac_pause_param_setup_hw(struct hclge_dev *hdev) +{ + struct hclge_mac *mac = &hdev->hw.mac; + + return hclge_mac_pause_param_cfg(hdev, mac->mac_addr, + HCLGE_DEFAULT_PAUSE_TRANS_GAP, + HCLGE_DEFAULT_PAUSE_TRANS_TIME); +} + static int hclge_pfc_setup_hw(struct hclge_dev *hdev) { u8 enable_bitmap = 0; @@ -1102,8 +1151,13 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev) int ret; u8 i; - if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) - return hclge_mac_pause_setup_hw(hdev); + if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) { + ret = hclge_mac_pause_setup_hw(hdev); + if (ret) + return ret; + + return hclge_mac_pause_param_setup_hw(hdev); + } /* Only DCB-supported dev supports qset back pressure and pfc cmd */ if (!hnae3_dev_dcb_supported(hdev)) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index bf59961918ab..5401e7559437 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -18,6 +18,9 @@ #define HCLGE_TM_PORT_BASE_MODE_MSK BIT(0) +#define HCLGE_DEFAULT_PAUSE_TRANS_GAP 0xFF +#define HCLGE_DEFAULT_PAUSE_TRANS_TIME 0xFFFF + /* SP or DWRR */ #define HCLGE_TM_TX_SCHD_DWRR_MSK BIT(0) #define HCLGE_TM_TX_SCHD_SP_MSK (0xFE) @@ -99,6 +102,13 @@ struct hclge_pfc_en_cmd { u8 pri_en_bitmap; }; +struct hclge_cfg_pause_param_cmd { + u8 mac_addr[ETH_ALEN]; + u8 pause_trans_gap; + u8 rsvd; + __le16 pause_trans_time; +}; + struct hclge_port_shapping_cmd { __le32 port_shapping_para; }; @@ -118,4 +128,6 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc); int hclge_tm_dwrr_cfg(struct hclge_dev *hdev); int hclge_tm_map_cfg(struct hclge_dev *hdev); int hclge_tm_init_hw(struct hclge_dev *hdev); +int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx); +int hclge_mac_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile new file mode 100644 index 000000000000..fb93bbd35845 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0+ +# +# Makefile for the HISILICON network device drivers. +# + +ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 + +obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o +hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o
\ No newline at end of file diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c new file mode 100644 index 000000000000..85985e731311 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -0,0 +1,342 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include <linux/device.h> +#include <linux/dma-direction.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include "hclgevf_cmd.h" +#include "hclgevf_main.h" +#include "hnae3.h" + +#define hclgevf_is_csq(ring) ((ring)->flag & HCLGEVF_TYPE_CSQ) +#define hclgevf_ring_to_dma_dir(ring) (hclgevf_is_csq(ring) ? \ + DMA_TO_DEVICE : DMA_FROM_DEVICE) +#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev) + +static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring) +{ + int ntc = ring->next_to_clean; + int ntu = ring->next_to_use; + int used; + + used = (ntu - ntc + ring->desc_num) % ring->desc_num; + + return ring->desc_num - used - 1; +} + +static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw) +{ + struct hclgevf_cmq_ring *csq = &hw->cmq.csq; + u16 ntc = csq->next_to_clean; + struct hclgevf_desc *desc; + int clean = 0; + u32 head; + + desc = &csq->desc[ntc]; + head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG); + while (head != ntc) { + memset(desc, 0, sizeof(*desc)); + ntc++; + if (ntc == csq->desc_num) + ntc = 0; + desc = &csq->desc[ntc]; + clean++; + } + csq->next_to_clean = ntc; + + return clean; +} + +static bool hclgevf_cmd_csq_done(struct hclgevf_hw *hw) +{ + u32 head; + + head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG); + + return head == hw->cmq.csq.next_to_use; +} + +static bool hclgevf_is_special_opcode(u16 opcode) +{ + u16 spec_opcode[] = {0x30, 0x31, 0x32}; + int i; + + for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) { + if (spec_opcode[i] == opcode) + return true; + } + + return false; +} + +static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring) +{ + int size = ring->desc_num * sizeof(struct hclgevf_desc); + + ring->desc = kzalloc(size, GFP_KERNEL); + if (!ring->desc) + return -ENOMEM; + + ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc, + size, DMA_BIDIRECTIONAL); + + if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) { + ring->desc_dma_addr = 0; + kfree(ring->desc); + ring->desc = NULL; + return -ENOMEM; + } + + return 0; +} + +static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring) +{ + dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr, + ring->desc_num * sizeof(ring->desc[0]), + hclgevf_ring_to_dma_dir(ring)); + + ring->desc_dma_addr = 0; + kfree(ring->desc); + ring->desc = NULL; +} + +static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev, + struct hclgevf_cmq_ring *ring) +{ + struct hclgevf_hw *hw = &hdev->hw; + int ring_type = ring->flag; + u32 reg_val; + int ret; + + ring->desc_num = HCLGEVF_NIC_CMQ_DESC_NUM; + spin_lock_init(&ring->lock); + ring->next_to_clean = 0; + ring->next_to_use = 0; + ring->dev = hdev; + + /* allocate CSQ/CRQ descriptor */ + ret = hclgevf_alloc_cmd_desc(ring); + if (ret) { + dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret, + (ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ"); + return ret; + } + + /* initialize the hardware registers with csq/crq dma-address, + * descriptor number, head & tail pointers + */ + switch (ring_type) { + case HCLGEVF_TYPE_CSQ: + reg_val = (u32)ring->desc_dma_addr; + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val); + reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val); + + reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); + reg_val |= HCLGEVF_NIC_CMQ_ENABLE; + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val); + + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0); + break; + case HCLGEVF_TYPE_CRQ: + reg_val = (u32)ring->desc_dma_addr; + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val); + reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val); + + reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); + reg_val |= HCLGEVF_NIC_CMQ_ENABLE; + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val); + + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0); + break; + } + + return 0; +} + +void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc, + enum hclgevf_opcode_type opcode, bool is_read) +{ + memset(desc, 0, sizeof(struct hclgevf_desc)); + desc->opcode = cpu_to_le16(opcode); + desc->flag = cpu_to_le16(HCLGEVF_CMD_FLAG_NO_INTR | + HCLGEVF_CMD_FLAG_IN); + if (is_read) + desc->flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_WR); + else + desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR); +} + +/* hclgevf_cmd_send - send command to command queue + * @hw: pointer to the hw struct + * @desc: prefilled descriptor for describing the command + * @num : the number of descriptors to be sent + * + * This is the main send command for command queue, it + * sends the queue, cleans the queue, etc + */ +int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num) +{ + struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev; + struct hclgevf_desc *desc_to_use; + bool complete = false; + u32 timeout = 0; + int handle = 0; + int status = 0; + u16 retval; + u16 opcode; + int ntc; + + spin_lock_bh(&hw->cmq.csq.lock); + + if (num > hclgevf_ring_space(&hw->cmq.csq)) { + spin_unlock_bh(&hw->cmq.csq.lock); + return -EBUSY; + } + + /* Record the location of desc in the ring for this time + * which will be use for hardware to write back + */ + ntc = hw->cmq.csq.next_to_use; + opcode = le16_to_cpu(desc[0].opcode); + while (handle < num) { + desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use]; + *desc_to_use = desc[handle]; + (hw->cmq.csq.next_to_use)++; + if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num) + hw->cmq.csq.next_to_use = 0; + handle++; + } + + /* Write to hardware */ + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, + hw->cmq.csq.next_to_use); + + /* If the command is sync, wait for the firmware to write back, + * if multi descriptors to be sent, use the first one to check + */ + if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) { + do { + if (hclgevf_cmd_csq_done(hw)) + break; + udelay(1); + timeout++; + } while (timeout < hw->cmq.tx_timeout); + } + + if (hclgevf_cmd_csq_done(hw)) { + complete = true; + handle = 0; + + while (handle < num) { + /* Get the result of hardware write back */ + desc_to_use = &hw->cmq.csq.desc[ntc]; + desc[handle] = *desc_to_use; + + if (likely(!hclgevf_is_special_opcode(opcode))) + retval = le16_to_cpu(desc[handle].retval); + else + retval = le16_to_cpu(desc[0].retval); + + if ((enum hclgevf_cmd_return_status)retval == + HCLGEVF_CMD_EXEC_SUCCESS) + status = 0; + else + status = -EIO; + hw->cmq.last_status = (enum hclgevf_cmd_status)retval; + ntc++; + handle++; + if (ntc == hw->cmq.csq.desc_num) + ntc = 0; + } + } + + if (!complete) + status = -EAGAIN; + + /* Clean the command send queue */ + handle = hclgevf_cmd_csq_clean(hw); + if (handle != num) { + dev_warn(&hdev->pdev->dev, + "cleaned %d, need to clean %d\n", handle, num); + } + + spin_unlock_bh(&hw->cmq.csq.lock); + + return status; +} + +static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw, + u32 *version) +{ + struct hclgevf_query_version_cmd *resp; + struct hclgevf_desc desc; + int status; + + resp = (struct hclgevf_query_version_cmd *)desc.data; + + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_FW_VER, 1); + status = hclgevf_cmd_send(hw, &desc, 1); + if (!status) + *version = le32_to_cpu(resp->firmware); + + return status; +} + +int hclgevf_cmd_init(struct hclgevf_dev *hdev) +{ + u32 version; + int ret; + + /* setup Tx write back timeout */ + hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT; + + /* setup queue CSQ/CRQ rings */ + hdev->hw.cmq.csq.flag = HCLGEVF_TYPE_CSQ; + ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.csq); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed(%d) to initialize CSQ ring\n", ret); + return ret; + } + + hdev->hw.cmq.crq.flag = HCLGEVF_TYPE_CRQ; + ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.crq); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed(%d) to initialize CRQ ring\n", ret); + goto err_csq; + } + + /* get firmware version */ + ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed(%d) to query firmware version\n", ret); + goto err_crq; + } + hdev->fw_version = version; + + dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version); + + return 0; +err_crq: + hclgevf_free_cmd_desc(&hdev->hw.cmq.crq); +err_csq: + hclgevf_free_cmd_desc(&hdev->hw.cmq.csq); + + return ret; +} + +void hclgevf_cmd_uninit(struct hclgevf_dev *hdev) +{ + hclgevf_free_cmd_desc(&hdev->hw.cmq.csq); + hclgevf_free_cmd_desc(&hdev->hw.cmq.crq); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h new file mode 100644 index 000000000000..2caca9317f8c --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h @@ -0,0 +1,248 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2017 Hisilicon Limited. */ + +#ifndef __HCLGEVF_CMD_H +#define __HCLGEVF_CMD_H +#include <linux/io.h> +#include <linux/types.h> +#include "hnae3.h" + +#define HCLGEVF_CMDQ_TX_TIMEOUT 200 +#define HCLGEVF_CMDQ_RX_INVLD_B 0 +#define HCLGEVF_CMDQ_RX_OUTVLD_B 1 + +struct hclgevf_hw; +struct hclgevf_dev; + +struct hclgevf_desc { + __le16 opcode; + __le16 flag; + __le16 retval; + __le16 rsv; + __le32 data[6]; +}; + +struct hclgevf_desc_cb { + dma_addr_t dma; + void *va; + u32 length; +}; + +struct hclgevf_cmq_ring { + dma_addr_t desc_dma_addr; + struct hclgevf_desc *desc; + struct hclgevf_desc_cb *desc_cb; + struct hclgevf_dev *dev; + u32 head; + u32 tail; + + u16 buf_size; + u16 desc_num; + int next_to_use; + int next_to_clean; + u8 flag; + spinlock_t lock; /* Command queue lock */ +}; + +enum hclgevf_cmd_return_status { + HCLGEVF_CMD_EXEC_SUCCESS = 0, + HCLGEVF_CMD_NO_AUTH = 1, + HCLGEVF_CMD_NOT_EXEC = 2, + HCLGEVF_CMD_QUEUE_FULL = 3, +}; + +enum hclgevf_cmd_status { + HCLGEVF_STATUS_SUCCESS = 0, + HCLGEVF_ERR_CSQ_FULL = -1, + HCLGEVF_ERR_CSQ_TIMEOUT = -2, + HCLGEVF_ERR_CSQ_ERROR = -3 +}; + +struct hclgevf_cmq { + struct hclgevf_cmq_ring csq; + struct hclgevf_cmq_ring crq; + u16 tx_timeout; /* Tx timeout */ + enum hclgevf_cmd_status last_status; +}; + +#define HCLGEVF_CMD_FLAG_IN_VALID_SHIFT 0 +#define HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT 1 +#define HCLGEVF_CMD_FLAG_NEXT_SHIFT 2 +#define HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT 3 +#define HCLGEVF_CMD_FLAG_NO_INTR_SHIFT 4 +#define HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT 5 + +#define HCLGEVF_CMD_FLAG_IN BIT(HCLGEVF_CMD_FLAG_IN_VALID_SHIFT) +#define HCLGEVF_CMD_FLAG_OUT BIT(HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT) +#define HCLGEVF_CMD_FLAG_NEXT BIT(HCLGEVF_CMD_FLAG_NEXT_SHIFT) +#define HCLGEVF_CMD_FLAG_WR BIT(HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT) +#define HCLGEVF_CMD_FLAG_NO_INTR BIT(HCLGEVF_CMD_FLAG_NO_INTR_SHIFT) +#define HCLGEVF_CMD_FLAG_ERR_INTR BIT(HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT) + +enum hclgevf_opcode_type { + /* Generic command */ + HCLGEVF_OPC_QUERY_FW_VER = 0x0001, + /* TQP command */ + HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03, + HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13, + HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20, + /* RSS cmd */ + HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01, + HCLGEVF_OPC_RSS_INDIR_TABLE = 0x0D07, + HCLGEVF_OPC_RSS_TC_MODE = 0x0D08, + /* Mailbox cmd */ + HCLGEVF_OPC_MBX_VF_TO_PF = 0x2001, +}; + +#define HCLGEVF_TQP_REG_OFFSET 0x80000 +#define HCLGEVF_TQP_REG_SIZE 0x200 + +struct hclgevf_tqp_map { + __le16 tqp_id; /* Absolute tqp id for in this pf */ + u8 tqp_vf; /* VF id */ +#define HCLGEVF_TQP_MAP_TYPE_PF 0 +#define HCLGEVF_TQP_MAP_TYPE_VF 1 +#define HCLGEVF_TQP_MAP_TYPE_B 0 +#define HCLGEVF_TQP_MAP_EN_B 1 + u8 tqp_flag; /* Indicate it's pf or vf tqp */ + __le16 tqp_vid; /* Virtual id in this pf/vf */ + u8 rsv[18]; +}; + +#define HCLGEVF_VECTOR_ELEMENTS_PER_CMD 10 + +enum hclgevf_int_type { + HCLGEVF_INT_TX = 0, + HCLGEVF_INT_RX, + HCLGEVF_INT_EVENT, +}; + +struct hclgevf_ctrl_vector_chain { + u8 int_vector_id; + u8 int_cause_num; +#define HCLGEVF_INT_TYPE_S 0 +#define HCLGEVF_INT_TYPE_M 0x3 +#define HCLGEVF_TQP_ID_S 2 +#define HCLGEVF_TQP_ID_M (0x3fff << HCLGEVF_TQP_ID_S) + __le16 tqp_type_and_id[HCLGEVF_VECTOR_ELEMENTS_PER_CMD]; + u8 vfid; + u8 resv; +}; + +struct hclgevf_query_version_cmd { + __le32 firmware; + __le32 firmware_rsv[5]; +}; + +#define HCLGEVF_RSS_HASH_KEY_OFFSET 4 +#define HCLGEVF_RSS_HASH_KEY_NUM 16 +struct hclgevf_rss_config_cmd { + u8 hash_config; + u8 rsv[7]; + u8 hash_key[HCLGEVF_RSS_HASH_KEY_NUM]; +}; + +struct hclgevf_rss_input_tuple_cmd { + u8 ipv4_tcp_en; + u8 ipv4_udp_en; + u8 ipv4_stcp_en; + u8 ipv4_fragment_en; + u8 ipv6_tcp_en; + u8 ipv6_udp_en; + u8 ipv6_stcp_en; + u8 ipv6_fragment_en; + u8 rsv[16]; +}; + +#define HCLGEVF_RSS_CFG_TBL_SIZE 16 + +struct hclgevf_rss_indirection_table_cmd { + u16 start_table_index; + u16 rss_set_bitmap; + u8 rsv[4]; + u8 rss_result[HCLGEVF_RSS_CFG_TBL_SIZE]; +}; + +#define HCLGEVF_RSS_TC_OFFSET_S 0 +#define HCLGEVF_RSS_TC_OFFSET_M (0x3ff << HCLGEVF_RSS_TC_OFFSET_S) +#define HCLGEVF_RSS_TC_SIZE_S 12 +#define HCLGEVF_RSS_TC_SIZE_M (0x7 << HCLGEVF_RSS_TC_SIZE_S) +#define HCLGEVF_RSS_TC_VALID_B 15 +#define HCLGEVF_MAX_TC_NUM 8 +struct hclgevf_rss_tc_mode_cmd { + u16 rss_tc_mode[HCLGEVF_MAX_TC_NUM]; + u8 rsv[8]; +}; + +#define HCLGEVF_LINK_STS_B 0 +#define HCLGEVF_LINK_STATUS BIT(HCLGEVF_LINK_STS_B) +struct hclgevf_link_status_cmd { + u8 status; + u8 rsv[23]; +}; + +#define HCLGEVF_RING_ID_MASK 0x3ff +#define HCLGEVF_TQP_ENABLE_B 0 + +struct hclgevf_cfg_com_tqp_queue_cmd { + __le16 tqp_id; + __le16 stream_id; + u8 enable; + u8 rsv[19]; +}; + +struct hclgevf_cfg_tx_queue_pointer_cmd { + __le16 tqp_id; + __le16 tx_tail; + __le16 tx_head; + __le16 fbd_num; + __le16 ring_offset; + u8 rsv[14]; +}; + +#define HCLGEVF_TYPE_CRQ 0 +#define HCLGEVF_TYPE_CSQ 1 +#define HCLGEVF_NIC_CSQ_BASEADDR_L_REG 0x27000 +#define HCLGEVF_NIC_CSQ_BASEADDR_H_REG 0x27004 +#define HCLGEVF_NIC_CSQ_DEPTH_REG 0x27008 +#define HCLGEVF_NIC_CSQ_TAIL_REG 0x27010 +#define HCLGEVF_NIC_CSQ_HEAD_REG 0x27014 +#define HCLGEVF_NIC_CRQ_BASEADDR_L_REG 0x27018 +#define HCLGEVF_NIC_CRQ_BASEADDR_H_REG 0x2701c +#define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020 +#define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024 +#define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028 +#define HCLGEVF_NIC_CMQ_EN_B 16 +#define HCLGEVF_NIC_CMQ_ENABLE BIT(HCLGEVF_NIC_CMQ_EN_B) +#define HCLGEVF_NIC_CMQ_DESC_NUM 1024 +#define HCLGEVF_NIC_CMQ_DESC_NUM_S 3 +#define HCLGEVF_NIC_CMDQ_INT_SRC_REG 0x27100 + +static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value) +{ + writel(value, base + reg); +} + +static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg) +{ + u8 __iomem *reg_addr = READ_ONCE(base); + + return readl(reg_addr + reg); +} + +#define hclgevf_write_dev(a, reg, value) \ + hclgevf_write_reg((a)->io_base, (reg), (value)) +#define hclgevf_read_dev(a, reg) \ + hclgevf_read_reg((a)->io_base, (reg)) + +#define HCLGEVF_SEND_SYNC(flag) \ + ((flag) & HCLGEVF_CMD_FLAG_NO_INTR) + +int hclgevf_cmd_init(struct hclgevf_dev *hdev); +void hclgevf_cmd_uninit(struct hclgevf_dev *hdev); + +int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num); +void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc, + enum hclgevf_opcode_type opcode, + bool is_read); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c new file mode 100644 index 000000000000..0d89965f7928 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -0,0 +1,1505 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include <linux/etherdevice.h> +#include "hclgevf_cmd.h" +#include "hclgevf_main.h" +#include "hclge_mbx.h" +#include "hnae3.h" + +#define HCLGEVF_NAME "hclgevf" + +static struct hnae3_ae_algo ae_algovf; + +static const struct pci_device_id ae_algovf_pci_tbl[] = { + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, + /* required last entry */ + {0, } +}; + +static inline struct hclgevf_dev *hclgevf_ae_get_hdev( + struct hnae3_handle *handle) +{ + return container_of(handle, struct hclgevf_dev, nic); +} + +static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hnae3_queue *queue; + struct hclgevf_desc desc; + struct hclgevf_tqp *tqp; + int status; + int i; + + for (i = 0; i < hdev->num_tqps; i++) { + queue = handle->kinfo.tqp[i]; + tqp = container_of(queue, struct hclgevf_tqp, q); + hclgevf_cmd_setup_basic_desc(&desc, + HCLGEVF_OPC_QUERY_RX_STATUS, + true); + + desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); + status = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (status) { + dev_err(&hdev->pdev->dev, + "Query tqp stat fail, status = %d,queue = %d\n", + status, i); + return status; + } + tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += + le32_to_cpu(desc.data[1]); + + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, + true); + + desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); + status = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (status) { + dev_err(&hdev->pdev->dev, + "Query tqp stat fail, status = %d,queue = %d\n", + status, i); + return status; + } + tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += + le32_to_cpu(desc.data[1]); + } + + return 0; +} + +static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclgevf_tqp *tqp; + u64 *buff = data; + int i; + + for (i = 0; i < hdev->num_tqps; i++) { + tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q); + *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; + } + for (i = 0; i < kinfo->num_tqps; i++) { + tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q); + *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; + } + + return buff; +} + +static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return hdev->num_tqps * 2; +} + +static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + u8 *buff = data; + int i = 0; + + for (i = 0; i < hdev->num_tqps; i++) { + struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], + struct hclgevf_tqp, q); + snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd", + tqp->index); + buff += ETH_GSTRING_LEN; + } + + for (i = 0; i < hdev->num_tqps; i++) { + struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], + struct hclgevf_tqp, q); + snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd", + tqp->index); + buff += ETH_GSTRING_LEN; + } + + return buff; +} + +static void hclgevf_update_stats(struct hnae3_handle *handle, + struct net_device_stats *net_stats) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int status; + + status = hclgevf_tqps_update_stats(handle); + if (status) + dev_err(&hdev->pdev->dev, + "VF update of TQPS stats fail, status = %d.\n", + status); +} + +static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) +{ + if (strset == ETH_SS_TEST) + return -EOPNOTSUPP; + else if (strset == ETH_SS_STATS) + return hclgevf_tqps_get_sset_count(handle, strset); + + return 0; +} + +static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, + u8 *data) +{ + u8 *p = (char *)data; + + if (strset == ETH_SS_STATS) + p = hclgevf_tqps_get_strings(handle, p); +} + +static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) +{ + hclgevf_tqps_get_stats(handle, data); +} + +static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) +{ + u8 resp_msg; + int status; + + status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, + true, &resp_msg, sizeof(u8)); + if (status) { + dev_err(&hdev->pdev->dev, + "VF request to get TC info from PF failed %d", + status); + return status; + } + + hdev->hw_tc_map = resp_msg; + + return 0; +} + +static int hclge_get_queue_info(struct hclgevf_dev *hdev) +{ +#define HCLGEVF_TQPS_RSS_INFO_LEN 8 + u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; + int status; + + status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, + true, resp_msg, + HCLGEVF_TQPS_RSS_INFO_LEN); + if (status) { + dev_err(&hdev->pdev->dev, + "VF request to get tqp info from PF failed %d", + status); + return status; + } + + memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); + memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); + memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16)); + memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16)); + + return 0; +} + +static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) +{ + struct hclgevf_tqp *tqp; + int i; + + hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, + sizeof(struct hclgevf_tqp), GFP_KERNEL); + if (!hdev->htqp) + return -ENOMEM; + + tqp = hdev->htqp; + + for (i = 0; i < hdev->num_tqps; i++) { + tqp->dev = &hdev->pdev->dev; + tqp->index = i; + + tqp->q.ae_algo = &ae_algovf; + tqp->q.buf_size = hdev->rx_buf_len; + tqp->q.desc_num = hdev->num_desc; + tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + + i * HCLGEVF_TQP_REG_SIZE; + + tqp++; + } + + return 0; +} + +static int hclgevf_knic_setup(struct hclgevf_dev *hdev) +{ + struct hnae3_handle *nic = &hdev->nic; + struct hnae3_knic_private_info *kinfo; + u16 new_tqps = hdev->num_tqps; + int i; + + kinfo = &nic->kinfo; + kinfo->num_tc = 0; + kinfo->num_desc = hdev->num_desc; + kinfo->rx_buf_len = hdev->rx_buf_len; + for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) + if (hdev->hw_tc_map & BIT(i)) + kinfo->num_tc++; + + kinfo->rss_size + = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); + new_tqps = kinfo->rss_size * kinfo->num_tc; + kinfo->num_tqps = min(new_tqps, hdev->num_tqps); + + kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, + sizeof(struct hnae3_queue *), GFP_KERNEL); + if (!kinfo->tqp) + return -ENOMEM; + + for (i = 0; i < kinfo->num_tqps; i++) { + hdev->htqp[i].q.handle = &hdev->nic; + hdev->htqp[i].q.tqp_index = i; + kinfo->tqp[i] = &hdev->htqp[i].q; + } + + return 0; +} + +static void hclgevf_request_link_info(struct hclgevf_dev *hdev) +{ + int status; + u8 resp_msg; + + status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, + 0, false, &resp_msg, sizeof(u8)); + if (status) + dev_err(&hdev->pdev->dev, + "VF failed to fetch link status(%d) from PF", status); +} + +void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) +{ + struct hnae3_handle *handle = &hdev->nic; + struct hnae3_client *client; + + client = handle->client; + + if (link_state != hdev->hw.mac.link) { + client->ops->link_status_change(handle, !!link_state); + hdev->hw.mac.link = link_state; + } +} + +static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) +{ + struct hnae3_handle *nic = &hdev->nic; + int ret; + + nic->ae_algo = &ae_algovf; + nic->pdev = hdev->pdev; + nic->numa_node_mask = hdev->numa_node_mask; + nic->flags |= HNAE3_SUPPORT_VF; + + if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { + dev_err(&hdev->pdev->dev, "unsupported device type %d\n", + hdev->ae_dev->dev_type); + return -EINVAL; + } + + ret = hclgevf_knic_setup(hdev); + if (ret) + dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", + ret); + return ret; +} + +static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) +{ + hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; + hdev->num_msi_left += 1; + hdev->num_msi_used -= 1; +} + +static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, + struct hnae3_vector_info *vector_info) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hnae3_vector_info *vector = vector_info; + int alloc = 0; + int i, j; + + vector_num = min(hdev->num_msi_left, vector_num); + + for (j = 0; j < vector_num; j++) { + for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { + if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { + vector->vector = pci_irq_vector(hdev->pdev, i); + vector->io_addr = hdev->hw.io_base + + HCLGEVF_VECTOR_REG_BASE + + (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; + hdev->vector_status[i] = 0; + hdev->vector_irq[i] = vector->vector; + + vector++; + alloc++; + + break; + } + } + } + hdev->num_msi_left -= alloc; + hdev->num_msi_used += alloc; + + return alloc; +} + +static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) +{ + int i; + + for (i = 0; i < hdev->num_msi; i++) + if (vector == hdev->vector_irq[i]) + return i; + + return -EINVAL; +} + +static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) +{ + return HCLGEVF_RSS_KEY_SIZE; +} + +static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle) +{ + return HCLGEVF_RSS_IND_TBL_SIZE; +} + +static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) +{ + const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; + struct hclgevf_rss_indirection_table_cmd *req; + struct hclgevf_desc desc; + int status; + int i, j; + + req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; + + for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, + false); + req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; + req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; + for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) + req->rss_result[j] = + indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; + + status = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (status) { + dev_err(&hdev->pdev->dev, + "VF failed(=%d) to set RSS indirection table\n", + status); + return status; + } + } + + return 0; +} + +static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) +{ + struct hclgevf_rss_tc_mode_cmd *req; + u16 tc_offset[HCLGEVF_MAX_TC_NUM]; + u16 tc_valid[HCLGEVF_MAX_TC_NUM]; + u16 tc_size[HCLGEVF_MAX_TC_NUM]; + struct hclgevf_desc desc; + u16 roundup_size; + int status; + int i; + + req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; + + roundup_size = roundup_pow_of_two(rss_size); + roundup_size = ilog2(roundup_size); + + for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { + tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); + tc_size[i] = roundup_size; + tc_offset[i] = rss_size * i; + } + + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); + for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { + hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, + (tc_valid[i] & 0x1)); + hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, + HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); + hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, + HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); + } + status = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "VF failed(=%d) to set rss tc mode\n", status); + + return status; +} + +static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash, + u8 *key) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclgevf_rss_config_cmd *req; + int lkup_times = key ? 3 : 1; + struct hclgevf_desc desc; + int key_offset; + int key_size; + int status; + + req = (struct hclgevf_rss_config_cmd *)desc.data; + lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0); + + for (key_offset = 0; key_offset < lkup_times; key_offset++) { + hclgevf_cmd_setup_basic_desc(&desc, + HCLGEVF_OPC_RSS_GENERIC_CONFIG, + true); + req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET); + + status = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (status) { + dev_err(&hdev->pdev->dev, + "failed to get hardware RSS cfg, status = %d\n", + status); + return status; + } + + if (key_offset == 2) + key_size = + HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; + else + key_size = HCLGEVF_RSS_HASH_KEY_NUM; + + if (key) + memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, + req->hash_key, + key_size); + } + + if (hash) { + if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ) + *hash = ETH_RSS_HASH_TOP; + else + *hash = ETH_RSS_HASH_UNKNOWN; + } + + return 0; +} + +static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + int i; + + if (indir) + for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) + indir[i] = rss_cfg->rss_indirection_tbl[i]; + + return hclgevf_get_rss_hw_cfg(handle, hfunc, key); +} + +static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + int i; + + /* update the shadow RSS table with user specified qids */ + for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) + rss_cfg->rss_indirection_tbl[i] = indir[i]; + + /* update the hardware */ + return hclgevf_set_rss_indir_table(hdev); +} + +static int hclgevf_get_tc_size(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + + return rss_cfg->rss_size; +} + +static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, + int vector, + struct hnae3_ring_chain_node *ring_chain) +{ +#define HCLGEVF_RING_NODE_VARIABLE_NUM 3 +#define HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM 3 + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hnae3_ring_chain_node *node; + struct hclge_mbx_vf_to_pf_cmd *req; + struct hclgevf_desc desc; + int i, vector_id; + int status; + u8 type; + + req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; + vector_id = hclgevf_get_vector_index(hdev, vector); + if (vector_id < 0) { + dev_err(&handle->pdev->dev, + "Get vector index fail. ret =%d\n", vector_id); + return vector_id; + } + + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); + type = en ? + HCLGE_MBX_MAP_RING_TO_VECTOR : HCLGE_MBX_UNMAP_RING_TO_VECTOR; + req->msg[0] = type; + req->msg[1] = vector_id; /* vector_id should be id in VF */ + + i = 0; + for (node = ring_chain; node; node = node->next) { + i++; + /* msg[2] is cause num */ + req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i] = + hnae_get_bit(node->flag, HNAE3_RING_TYPE_B); + req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i + 1] = + node->tqp_index; + req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i + 2] = + hnae_get_field(node->int_gl_idx, + HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S); + + if (i == (HCLGE_MBX_VF_MSG_DATA_NUM - + HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM) / + HCLGEVF_RING_NODE_VARIABLE_NUM) { + req->msg[2] = i; + + status = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (status) { + dev_err(&hdev->pdev->dev, + "Map TQP fail, status is %d.\n", + status); + return status; + } + i = 0; + hclgevf_cmd_setup_basic_desc(&desc, + HCLGEVF_OPC_MBX_VF_TO_PF, + false); + req->msg[0] = type; + req->msg[1] = vector_id; + } + } + + if (i > 0) { + req->msg[2] = i; + + status = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (status) { + dev_err(&hdev->pdev->dev, + "Map TQP fail, status is %d.\n", status); + return status; + } + } + + return 0; +} + +static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, + struct hnae3_ring_chain_node *ring_chain) +{ + return hclgevf_bind_ring_to_vector(handle, true, vector, ring_chain); +} + +static int hclgevf_unmap_ring_from_vector( + struct hnae3_handle *handle, + int vector, + struct hnae3_ring_chain_node *ring_chain) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int ret, vector_id; + + vector_id = hclgevf_get_vector_index(hdev, vector); + if (vector_id < 0) { + dev_err(&handle->pdev->dev, + "Get vector index fail. ret =%d\n", vector_id); + return vector_id; + } + + ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain); + if (ret) { + dev_err(&handle->pdev->dev, + "Unmap ring from vector fail. vector=%d, ret =%d\n", + vector_id, + ret); + return ret; + } + + hclgevf_free_vector(hdev, vector); + + return 0; +} + +static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en) +{ + struct hclge_mbx_vf_to_pf_cmd *req; + struct hclgevf_desc desc; + int status; + + req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; + + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); + req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; + req->msg[1] = en; + + status = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "Set promisc mode fail, status is %d.\n", status); + + return status; +} + +static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, u32 en) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + hclgevf_cmd_set_promisc_mode(hdev, en); +} + +static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, + int stream_id, bool enable) +{ + struct hclgevf_cfg_com_tqp_queue_cmd *req; + struct hclgevf_desc desc; + int status; + + req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; + + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, + false); + req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); + req->stream_id = cpu_to_le16(stream_id); + req->enable |= enable << HCLGEVF_TQP_ENABLE_B; + + status = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "TQP enable fail, status =%d.\n", status); + + return status; +} + +static int hclgevf_get_queue_id(struct hnae3_queue *queue) +{ + struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q); + + return tqp->index; +} + +static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hnae3_queue *queue; + struct hclgevf_tqp *tqp; + int i; + + for (i = 0; i < hdev->num_tqps; i++) { + queue = handle->kinfo.tqp[i]; + tqp = container_of(queue, struct hclgevf_tqp, q); + memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); + } +} + +static int hclgevf_cfg_func_mta_filter(struct hnae3_handle *handle, bool en) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + u8 msg[2] = {0}; + + msg[0] = en; + return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, + HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, + msg, 1, false, NULL, 0); +} + +static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + ether_addr_copy(p, hdev->hw.mac.mac_addr); +} + +static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; + u8 *new_mac_addr = (u8 *)p; + u8 msg_data[ETH_ALEN * 2]; + int status; + + ether_addr_copy(msg_data, new_mac_addr); + ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); + + status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, + HCLGE_MBX_MAC_VLAN_UC_MODIFY, + msg_data, ETH_ALEN * 2, + false, NULL, 0); + if (!status) + ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); + + return status; +} + +static int hclgevf_add_uc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, + HCLGE_MBX_MAC_VLAN_UC_ADD, + addr, ETH_ALEN, false, NULL, 0); +} + +static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, + HCLGE_MBX_MAC_VLAN_UC_REMOVE, + addr, ETH_ALEN, false, NULL, 0); +} + +static int hclgevf_add_mc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, + HCLGE_MBX_MAC_VLAN_MC_ADD, + addr, ETH_ALEN, false, NULL, 0); +} + +static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, + HCLGE_MBX_MAC_VLAN_MC_REMOVE, + addr, ETH_ALEN, false, NULL, 0); +} + +static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, + __be16 proto, u16 vlan_id, + bool is_kill) +{ +#define HCLGEVF_VLAN_MBX_MSG_LEN 5 + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; + + if (vlan_id > 4095) + return -EINVAL; + + if (proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + + msg_data[0] = is_kill; + memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); + memcpy(&msg_data[3], &proto, sizeof(proto)); + return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, + HCLGE_MBX_VLAN_FILTER, msg_data, + HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); +} + +static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + u8 msg_data[2]; + + memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); + + hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 2, false, + NULL, 0); +} + +static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return hdev->fw_version; +} + +static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) +{ + struct hclgevf_misc_vector *vector = &hdev->misc_vector; + + vector->vector_irq = pci_irq_vector(hdev->pdev, + HCLGEVF_MISC_VECTOR_NUM); + vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; + /* vector status always valid for Vector 0 */ + hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; + hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; + + hdev->num_msi_left -= 1; + hdev->num_msi_used += 1; +} + +static void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) +{ + if (!test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) + schedule_work(&hdev->mbx_service_task); +} + +static void hclgevf_task_schedule(struct hclgevf_dev *hdev) +{ + if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && + !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) + schedule_work(&hdev->service_task); +} + +static void hclgevf_service_timer(struct timer_list *t) +{ + struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); + + mod_timer(&hdev->service_timer, jiffies + 5 * HZ); + + hclgevf_task_schedule(hdev); +} + +static void hclgevf_mailbox_service_task(struct work_struct *work) +{ + struct hclgevf_dev *hdev; + + hdev = container_of(work, struct hclgevf_dev, mbx_service_task); + + if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) + return; + + clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); + + hclgevf_mbx_handler(hdev); + + clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); +} + +static void hclgevf_service_task(struct work_struct *work) +{ + struct hclgevf_dev *hdev; + + hdev = container_of(work, struct hclgevf_dev, service_task); + + /* request the link status from the PF. PF would be able to tell VF + * about such updates in future so we might remove this later + */ + hclgevf_request_link_info(hdev); + + clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); +} + +static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) +{ + hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); +} + +static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval) +{ + u32 cmdq_src_reg; + + /* fetch the events from their corresponding regs */ + cmdq_src_reg = hclgevf_read_dev(&hdev->hw, + HCLGEVF_VECTOR0_CMDQ_SRC_REG); + + /* check for vector0 mailbox(=CMDQ RX) event source */ + if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { + cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); + *clearval = cmdq_src_reg; + return true; + } + + dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); + + return false; +} + +static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) +{ + writel(en ? 1 : 0, vector->addr); +} + +static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) +{ + struct hclgevf_dev *hdev = data; + u32 clearval; + + hclgevf_enable_vector(&hdev->misc_vector, false); + if (!hclgevf_check_event_cause(hdev, &clearval)) + goto skip_sched; + + /* schedule the VF mailbox service task, if not already scheduled */ + hclgevf_mbx_task_schedule(hdev); + + hclgevf_clear_event_cause(hdev, clearval); + +skip_sched: + hclgevf_enable_vector(&hdev->misc_vector, true); + + return IRQ_HANDLED; +} + +static int hclgevf_configure(struct hclgevf_dev *hdev) +{ + int ret; + + /* get queue configuration from PF */ + ret = hclge_get_queue_info(hdev); + if (ret) + return ret; + /* get tc configuration from PF */ + return hclgevf_get_tc_info(hdev); +} + +static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) +{ + struct hnae3_handle *roce = &hdev->roce; + struct hnae3_handle *nic = &hdev->nic; + + roce->rinfo.num_vectors = HCLGEVF_ROCEE_VECTOR_NUM; + + if (hdev->num_msi_left < roce->rinfo.num_vectors || + hdev->num_msi_left == 0) + return -EINVAL; + + roce->rinfo.base_vector = + hdev->vector_status[hdev->num_msi_used]; + + roce->rinfo.netdev = nic->kinfo.netdev; + roce->rinfo.roce_io_base = hdev->hw.io_base; + + roce->pdev = nic->pdev; + roce->ae_algo = nic->ae_algo; + roce->numa_node_mask = nic->numa_node_mask; + + return 0; +} + +static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) +{ + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + int i, ret; + + rss_cfg->rss_size = hdev->rss_size_max; + + /* Initialize RSS indirect table for each vport */ + for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) + rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; + + ret = hclgevf_set_rss_indir_table(hdev); + if (ret) + return ret; + + return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); +} + +static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) +{ + /* other vlan config(like, VLAN TX/RX offload) would also be added + * here later + */ + return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, + false); +} + +static int hclgevf_ae_start(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int i, queue_id; + + for (i = 0; i < handle->kinfo.num_tqps; i++) { + /* ring enable */ + queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]); + if (queue_id < 0) { + dev_warn(&hdev->pdev->dev, + "Get invalid queue id, ignore it\n"); + continue; + } + + hclgevf_tqp_enable(hdev, queue_id, 0, true); + } + + /* reset tqp stats */ + hclgevf_reset_tqp_stats(handle); + + hclgevf_request_link_info(hdev); + + clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); + mod_timer(&hdev->service_timer, jiffies + HZ); + + return 0; +} + +static void hclgevf_ae_stop(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int i, queue_id; + + for (i = 0; i < hdev->num_tqps; i++) { + /* Ring disable */ + queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]); + if (queue_id < 0) { + dev_warn(&hdev->pdev->dev, + "Get invalid queue id, ignore it\n"); + continue; + } + + hclgevf_tqp_enable(hdev, queue_id, 0, false); + } + + /* reset tqp stats */ + hclgevf_reset_tqp_stats(handle); +} + +static void hclgevf_state_init(struct hclgevf_dev *hdev) +{ + /* setup tasks for the MBX */ + INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); + clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); + clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); + + /* setup tasks for service timer */ + timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); + + INIT_WORK(&hdev->service_task, hclgevf_service_task); + clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); + + mutex_init(&hdev->mbx_resp.mbx_mutex); + + /* bring the device down */ + set_bit(HCLGEVF_STATE_DOWN, &hdev->state); +} + +static void hclgevf_state_uninit(struct hclgevf_dev *hdev) +{ + set_bit(HCLGEVF_STATE_DOWN, &hdev->state); + + if (hdev->service_timer.function) + del_timer_sync(&hdev->service_timer); + if (hdev->service_task.func) + cancel_work_sync(&hdev->service_task); + if (hdev->mbx_service_task.func) + cancel_work_sync(&hdev->mbx_service_task); + + mutex_destroy(&hdev->mbx_resp.mbx_mutex); +} + +static int hclgevf_init_msi(struct hclgevf_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int vectors; + int i; + + hdev->num_msi = HCLGEVF_MAX_VF_VECTOR_NUM; + + vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, + PCI_IRQ_MSI | PCI_IRQ_MSIX); + if (vectors < 0) { + dev_err(&pdev->dev, + "failed(%d) to allocate MSI/MSI-X vectors\n", + vectors); + return vectors; + } + if (vectors < hdev->num_msi) + dev_warn(&hdev->pdev->dev, + "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", + hdev->num_msi, vectors); + + hdev->num_msi = vectors; + hdev->num_msi_left = vectors; + hdev->base_msi_vector = pdev->irq; + + hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, + sizeof(u16), GFP_KERNEL); + if (!hdev->vector_status) { + pci_free_irq_vectors(pdev); + return -ENOMEM; + } + + for (i = 0; i < hdev->num_msi; i++) + hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; + + hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, + sizeof(int), GFP_KERNEL); + if (!hdev->vector_irq) { + pci_free_irq_vectors(pdev); + return -ENOMEM; + } + + return 0; +} + +static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + + pci_free_irq_vectors(pdev); +} + +static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) +{ + int ret = 0; + + hclgevf_get_misc_vector(hdev); + + ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, + 0, "hclgevf_cmd", hdev); + if (ret) { + dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", + hdev->misc_vector.vector_irq); + return ret; + } + + /* enable misc. vector(vector 0) */ + hclgevf_enable_vector(&hdev->misc_vector, true); + + return ret; +} + +static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) +{ + /* disable misc vector(vector 0) */ + hclgevf_enable_vector(&hdev->misc_vector, false); + free_irq(hdev->misc_vector.vector_irq, hdev); + hclgevf_free_vector(hdev, 0); +} + +static int hclgevf_init_instance(struct hclgevf_dev *hdev, + struct hnae3_client *client) +{ + int ret; + + switch (client->type) { + case HNAE3_CLIENT_KNIC: + hdev->nic_client = client; + hdev->nic.client = client; + + ret = client->ops->init_instance(&hdev->nic); + if (ret) + return ret; + + if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { + struct hnae3_client *rc = hdev->roce_client; + + ret = hclgevf_init_roce_base_info(hdev); + if (ret) + return ret; + ret = rc->ops->init_instance(&hdev->roce); + if (ret) + return ret; + } + break; + case HNAE3_CLIENT_UNIC: + hdev->nic_client = client; + hdev->nic.client = client; + + ret = client->ops->init_instance(&hdev->nic); + if (ret) + return ret; + break; + case HNAE3_CLIENT_ROCE: + hdev->roce_client = client; + hdev->roce.client = client; + + if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { + ret = hclgevf_init_roce_base_info(hdev); + if (ret) + return ret; + + ret = client->ops->init_instance(&hdev->roce); + if (ret) + return ret; + } + } + + return 0; +} + +static void hclgevf_uninit_instance(struct hclgevf_dev *hdev, + struct hnae3_client *client) +{ + /* un-init roce, if it exists */ + if (hdev->roce_client) + hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); + + /* un-init nic/unic, if this was not called by roce client */ + if ((client->ops->uninit_instance) && + (client->type != HNAE3_CLIENT_ROCE)) + client->ops->uninit_instance(&hdev->nic, 0); +} + +static int hclgevf_register_client(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) +{ + struct hclgevf_dev *hdev = ae_dev->priv; + + return hclgevf_init_instance(hdev, client); +} + +static void hclgevf_unregister_client(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) +{ + struct hclgevf_dev *hdev = ae_dev->priv; + + hclgevf_uninit_instance(hdev, client); +} + +static int hclgevf_pci_init(struct hclgevf_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + struct hclgevf_hw *hw; + int ret; + + ret = pci_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "failed to enable PCI device\n"); + goto err_no_drvdata; + } + + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) { + dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); + goto err_disable_device; + } + + ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); + if (ret) { + dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); + goto err_disable_device; + } + + pci_set_master(pdev); + hw = &hdev->hw; + hw->hdev = hdev; + hw->io_base = pci_iomap(pdev, 2, 0); + if (!hw->io_base) { + dev_err(&pdev->dev, "can't map configuration register space\n"); + ret = -ENOMEM; + goto err_clr_master; + } + + return 0; + +err_clr_master: + pci_clear_master(pdev); + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); +err_no_drvdata: + pci_set_drvdata(pdev, NULL); + return ret; +} + +static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + + pci_iounmap(pdev, hdev->hw.io_base); + pci_clear_master(pdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + struct pci_dev *pdev = ae_dev->pdev; + struct hclgevf_dev *hdev; + int ret; + + hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); + if (!hdev) + return -ENOMEM; + + hdev->pdev = pdev; + hdev->ae_dev = ae_dev; + ae_dev->priv = hdev; + + ret = hclgevf_pci_init(hdev); + if (ret) { + dev_err(&pdev->dev, "PCI initialization failed\n"); + return ret; + } + + ret = hclgevf_init_msi(hdev); + if (ret) { + dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); + goto err_irq_init; + } + + hclgevf_state_init(hdev); + + ret = hclgevf_misc_irq_init(hdev); + if (ret) { + dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", + ret); + goto err_misc_irq_init; + } + + ret = hclgevf_cmd_init(hdev); + if (ret) + goto err_cmd_init; + + ret = hclgevf_configure(hdev); + if (ret) { + dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); + goto err_config; + } + + ret = hclgevf_alloc_tqps(hdev); + if (ret) { + dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); + goto err_config; + } + + ret = hclgevf_set_handle_info(hdev); + if (ret) { + dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); + goto err_config; + } + + /* Initialize VF's MTA */ + hdev->accept_mta_mc = true; + ret = hclgevf_cfg_func_mta_filter(&hdev->nic, hdev->accept_mta_mc); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed(%d) to set mta filter mode\n", ret); + goto err_config; + } + + /* Initialize RSS for this VF */ + ret = hclgevf_rss_init_hw(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed(%d) to initialize RSS\n", ret); + goto err_config; + } + + ret = hclgevf_init_vlan_config(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed(%d) to initialize VLAN config\n", ret); + goto err_config; + } + + pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); + + return 0; + +err_config: + hclgevf_cmd_uninit(hdev); +err_cmd_init: + hclgevf_misc_irq_uninit(hdev); +err_misc_irq_init: + hclgevf_state_uninit(hdev); + hclgevf_uninit_msi(hdev); +err_irq_init: + hclgevf_pci_uninit(hdev); + return ret; +} + +static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + struct hclgevf_dev *hdev = ae_dev->priv; + + hclgevf_cmd_uninit(hdev); + hclgevf_misc_irq_uninit(hdev); + hclgevf_state_uninit(hdev); + hclgevf_uninit_msi(hdev); + hclgevf_pci_uninit(hdev); + ae_dev->priv = NULL; +} + +static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) +{ + struct hnae3_handle *nic = &hdev->nic; + struct hnae3_knic_private_info *kinfo = &nic->kinfo; + + return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); +} + +/** + * hclgevf_get_channels - Get the current channels enabled and max supported. + * @handle: hardware information for network interface + * @ch: ethtool channels structure + * + * We don't support separate tx and rx queues as channels. The other count + * represents how many queues are being used for control. max_combined counts + * how many queue pairs we can support. They may not be mapped 1 to 1 with + * q_vectors since we support a lot more queue pairs than q_vectors. + **/ +static void hclgevf_get_channels(struct hnae3_handle *handle, + struct ethtool_channels *ch) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + ch->max_combined = hclgevf_get_max_channels(hdev); + ch->other_count = 0; + ch->max_other = 0; + ch->combined_count = hdev->num_tqps; +} + +static const struct hnae3_ae_ops hclgevf_ops = { + .init_ae_dev = hclgevf_init_ae_dev, + .uninit_ae_dev = hclgevf_uninit_ae_dev, + .init_client_instance = hclgevf_register_client, + .uninit_client_instance = hclgevf_unregister_client, + .start = hclgevf_ae_start, + .stop = hclgevf_ae_stop, + .map_ring_to_vector = hclgevf_map_ring_to_vector, + .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, + .get_vector = hclgevf_get_vector, + .reset_queue = hclgevf_reset_tqp, + .set_promisc_mode = hclgevf_set_promisc_mode, + .get_mac_addr = hclgevf_get_mac_addr, + .set_mac_addr = hclgevf_set_mac_addr, + .add_uc_addr = hclgevf_add_uc_addr, + .rm_uc_addr = hclgevf_rm_uc_addr, + .add_mc_addr = hclgevf_add_mc_addr, + .rm_mc_addr = hclgevf_rm_mc_addr, + .get_stats = hclgevf_get_stats, + .update_stats = hclgevf_update_stats, + .get_strings = hclgevf_get_strings, + .get_sset_count = hclgevf_get_sset_count, + .get_rss_key_size = hclgevf_get_rss_key_size, + .get_rss_indir_size = hclgevf_get_rss_indir_size, + .get_rss = hclgevf_get_rss, + .set_rss = hclgevf_set_rss, + .get_tc_size = hclgevf_get_tc_size, + .get_fw_version = hclgevf_get_fw_version, + .set_vlan_filter = hclgevf_set_vlan_filter, + .get_channels = hclgevf_get_channels, +}; + +static struct hnae3_ae_algo ae_algovf = { + .ops = &hclgevf_ops, + .name = HCLGEVF_NAME, + .pdev_id_table = ae_algovf_pci_tbl, +}; + +static int hclgevf_init(void) +{ + pr_info("%s is initializing\n", HCLGEVF_NAME); + + return hnae3_register_ae_algo(&ae_algovf); +} + +static void hclgevf_exit(void) +{ + hnae3_unregister_ae_algo(&ae_algovf); +} +module_init(hclgevf_init); +module_exit(hclgevf_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Huawei Tech. Co., Ltd."); +MODULE_DESCRIPTION("HCLGEVF Driver"); +MODULE_VERSION(HCLGEVF_MOD_VERSION); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h new file mode 100644 index 000000000000..a63bee4a3674 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2017 Hisilicon Limited. */ + +#ifndef __HCLGEVF_MAIN_H +#define __HCLGEVF_MAIN_H +#include <linux/fs.h> +#include <linux/types.h> +#include "hclge_mbx.h" +#include "hclgevf_cmd.h" +#include "hnae3.h" + +#define HCLGEVF_MOD_VERSION "v1.0" +#define HCLGEVF_DRIVER_NAME "hclgevf" + +#define HCLGEVF_ROCEE_VECTOR_NUM 0 +#define HCLGEVF_MISC_VECTOR_NUM 0 + +#define HCLGEVF_INVALID_VPORT 0xffff + +/* This number in actual depends upon the total number of VFs + * created by physical function. But the maximum number of + * possible vector-per-VF is {VFn(1-32), VECTn(32 + 1)}. + */ +#define HCLGEVF_MAX_VF_VECTOR_NUM (32 + 1) + +#define HCLGEVF_VECTOR_REG_BASE 0x20000 +#define HCLGEVF_MISC_VECTOR_REG_BASE 0x20400 +#define HCLGEVF_VECTOR_REG_OFFSET 0x4 +#define HCLGEVF_VECTOR_VF_OFFSET 0x100000 + +/* Vector0 interrupt CMDQ event source register(RW) */ +#define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100 +/* CMDQ register bits for RX event(=MBX event) */ +#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1 + +#define HCLGEVF_TQP_RESET_TRY_TIMES 10 + +#define HCLGEVF_RSS_IND_TBL_SIZE 512 +#define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff +#define HCLGEVF_RSS_KEY_SIZE 40 +#define HCLGEVF_RSS_HASH_ALGO_TOEPLITZ 0 +#define HCLGEVF_RSS_HASH_ALGO_SIMPLE 1 +#define HCLGEVF_RSS_HASH_ALGO_SYMMETRIC 2 +#define HCLGEVF_RSS_HASH_ALGO_MASK 0xf +#define HCLGEVF_RSS_CFG_TBL_NUM \ + (HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE) + +/* states of hclgevf device & tasks */ +enum hclgevf_states { + /* device states */ + HCLGEVF_STATE_DOWN, + HCLGEVF_STATE_DISABLED, + /* task states */ + HCLGEVF_STATE_SERVICE_SCHED, + HCLGEVF_STATE_MBX_SERVICE_SCHED, + HCLGEVF_STATE_MBX_HANDLING, +}; + +#define HCLGEVF_MPF_ENBALE 1 + +struct hclgevf_mac { + u8 mac_addr[ETH_ALEN]; + int link; +}; + +struct hclgevf_hw { + void __iomem *io_base; + int num_vec; + struct hclgevf_cmq cmq; + struct hclgevf_mac mac; + void *hdev; /* hchgevf device it is part of */ +}; + +/* TQP stats */ +struct hlcgevf_tqp_stats { + /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */ + u64 rcb_tx_ring_pktnum_rcd; /* 32bit */ + /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */ + u64 rcb_rx_ring_pktnum_rcd; /* 32bit */ +}; + +struct hclgevf_tqp { + struct device *dev; /* device for DMA mapping */ + struct hnae3_queue q; + struct hlcgevf_tqp_stats tqp_stats; + u16 index; /* global index in a NIC controller */ + + bool alloced; +}; + +struct hclgevf_cfg { + u8 vmdq_vport_num; + u8 tc_num; + u16 tqp_desc_num; + u16 rx_buf_len; + u8 phy_addr; + u8 media_type; + u8 mac_addr[ETH_ALEN]; + u32 numa_node_map; +}; + +struct hclgevf_rss_cfg { + u8 rss_hash_key[HCLGEVF_RSS_KEY_SIZE]; /* user configured hash keys */ + u32 hash_algo; + u32 rss_size; + u8 hw_tc_map; + u8 rss_indirection_tbl[HCLGEVF_RSS_IND_TBL_SIZE]; /* shadow table */ +}; + +struct hclgevf_misc_vector { + u8 __iomem *addr; + int vector_irq; +}; + +struct hclgevf_dev { + struct pci_dev *pdev; + struct hnae3_ae_dev *ae_dev; + struct hclgevf_hw hw; + struct hclgevf_misc_vector misc_vector; + struct hclgevf_rss_cfg rss_cfg; + unsigned long state; + + u32 fw_version; + u16 num_tqps; /* num task queue pairs of this PF */ + + u16 alloc_rss_size; /* allocated RSS task queue */ + u16 rss_size_max; /* HW defined max RSS task queue */ + + u16 num_alloc_vport; /* num vports this driver supports */ + u32 numa_node_mask; + u16 rx_buf_len; + u16 num_desc; + u8 hw_tc_map; + + u16 num_msi; + u16 num_msi_left; + u16 num_msi_used; + u32 base_msi_vector; + u16 *vector_status; + int *vector_irq; + + bool accept_mta_mc; /* whether to accept mta filter multicast */ + struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */ + + struct timer_list service_timer; + struct work_struct service_task; + struct work_struct mbx_service_task; + + struct hclgevf_tqp *htqp; + + struct hnae3_handle nic; + struct hnae3_handle roce; + + struct hnae3_client *nic_client; + struct hnae3_client *roce_client; + u32 flag; +}; + +int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode, + const u8 *msg_data, u8 msg_len, bool need_resp, + u8 *resp_data, u16 resp_len); +void hclgevf_mbx_handler(struct hclgevf_dev *hdev); +void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c new file mode 100644 index 000000000000..e39cad285fa9 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -0,0 +1,181 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include "hclge_mbx.h" +#include "hclgevf_main.h" +#include "hnae3.h" + +static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev) +{ + /* this function should be called with mbx_resp.mbx_mutex held + * to prtect the received_response from race condition + */ + hdev->mbx_resp.received_resp = false; + hdev->mbx_resp.origin_mbx_msg = 0; + hdev->mbx_resp.resp_status = 0; + memset(hdev->mbx_resp.additional_info, 0, HCLGE_MBX_MAX_RESP_DATA_SIZE); +} + +/* hclgevf_get_mbx_resp: used to get a response from PF after VF sends a mailbox + * message to PF. + * @hdev: pointer to struct hclgevf_dev + * @resp_msg: pointer to store the original message type and response status + * @len: the resp_msg data array length. + */ +static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, + u8 *resp_data, u16 resp_len) +{ +#define HCLGEVF_MAX_TRY_TIMES 500 +#define HCLGEVF_SLEEP_USCOEND 1000 + struct hclgevf_mbx_resp_status *mbx_resp; + u16 r_code0, r_code1; + int i = 0; + + if (resp_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) { + dev_err(&hdev->pdev->dev, + "VF mbx response len(=%d) exceeds maximum(=%d)\n", + resp_len, + HCLGE_MBX_MAX_RESP_DATA_SIZE); + return -EINVAL; + } + + while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) { + udelay(HCLGEVF_SLEEP_USCOEND); + i++; + } + + if (i >= HCLGEVF_MAX_TRY_TIMES) { + dev_err(&hdev->pdev->dev, + "VF could not get mbx resp(=%d) from PF in %d tries\n", + hdev->mbx_resp.received_resp, i); + return -EIO; + } + + mbx_resp = &hdev->mbx_resp; + r_code0 = (u16)(mbx_resp->origin_mbx_msg >> 16); + r_code1 = (u16)(mbx_resp->origin_mbx_msg & 0xff); + if (resp_data) + memcpy(resp_data, &mbx_resp->additional_info[0], resp_len); + + hclgevf_reset_mbx_resp_status(hdev); + + if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) { + dev_err(&hdev->pdev->dev, + "VF could not match resp code(code0=%d,code1=%d), %d", + code0, code1, mbx_resp->resp_status); + return -EIO; + } + + return 0; +} + +int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode, + const u8 *msg_data, u8 msg_len, bool need_resp, + u8 *resp_data, u16 resp_len) +{ + struct hclge_mbx_vf_to_pf_cmd *req; + struct hclgevf_desc desc; + int status; + + req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; + + /* first two bytes are reserved for code & subcode */ + if (msg_len > (HCLGE_MBX_MAX_MSG_SIZE - 2)) { + dev_err(&hdev->pdev->dev, + "VF send mbx msg fail, msg len %d exceeds max len %d\n", + msg_len, HCLGE_MBX_MAX_MSG_SIZE); + return -EINVAL; + } + + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); + req->msg[0] = code; + req->msg[1] = subcode; + memcpy(&req->msg[2], msg_data, msg_len); + + /* synchronous send */ + if (need_resp) { + mutex_lock(&hdev->mbx_resp.mbx_mutex); + hclgevf_reset_mbx_resp_status(hdev); + status = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (status) { + dev_err(&hdev->pdev->dev, + "VF failed(=%d) to send mbx message to PF\n", + status); + mutex_unlock(&hdev->mbx_resp.mbx_mutex); + return status; + } + + status = hclgevf_get_mbx_resp(hdev, code, subcode, resp_data, + resp_len); + mutex_unlock(&hdev->mbx_resp.mbx_mutex); + } else { + /* asynchronous send */ + status = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (status) { + dev_err(&hdev->pdev->dev, + "VF failed(=%d) to send mbx message to PF\n", + status); + return status; + } + } + + return status; +} + +void hclgevf_mbx_handler(struct hclgevf_dev *hdev) +{ + struct hclgevf_mbx_resp_status *resp; + struct hclge_mbx_pf_to_vf_cmd *req; + struct hclgevf_cmq_ring *crq; + struct hclgevf_desc *desc; + u16 link_status, flag; + u8 *temp; + int i; + + resp = &hdev->mbx_resp; + crq = &hdev->hw.cmq.crq; + + flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); + while (hnae_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B)) { + desc = &crq->desc[crq->next_to_use]; + req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data; + + switch (req->msg[0]) { + case HCLGE_MBX_PF_VF_RESP: + if (resp->received_resp) + dev_warn(&hdev->pdev->dev, + "VF mbx resp flag not clear(%d)\n", + req->msg[1]); + resp->received_resp = true; + + resp->origin_mbx_msg = (req->msg[1] << 16); + resp->origin_mbx_msg |= req->msg[2]; + resp->resp_status = req->msg[3]; + + temp = (u8 *)&req->msg[4]; + for (i = 0; i < HCLGE_MBX_MAX_RESP_DATA_SIZE; i++) { + resp->additional_info[i] = *temp; + temp++; + } + break; + case HCLGE_MBX_LINK_STAT_CHANGE: + link_status = le16_to_cpu(req->msg[1]); + + /* update upper layer with new link link status */ + hclgevf_update_link_status(hdev, link_status); + + break; + default: + dev_err(&hdev->pdev->dev, + "VF received unsupported(%d) mbx msg from PF\n", + req->msg[0]); + break; + } + hclge_mbx_ring_ptr_move_crq(crq); + flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); + } + + /* Write back CMDQ_RQ header pointer, M7 need this pointer */ + hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CRQ_HEAD_REG, + crq->next_to_use); +} diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 241db3199b88..354c0982847b 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -199,18 +199,18 @@ static void __emac_set_multicast_list(struct emac_instance *dev); static inline int emac_phy_supports_gige(int phy_mode) { - return phy_mode == PHY_MODE_GMII || - phy_mode == PHY_MODE_RGMII || - phy_mode == PHY_MODE_SGMII || - phy_mode == PHY_MODE_TBI || - phy_mode == PHY_MODE_RTBI; + return phy_interface_mode_is_rgmii(phy_mode) || + phy_mode == PHY_INTERFACE_MODE_GMII || + phy_mode == PHY_INTERFACE_MODE_SGMII || + phy_mode == PHY_INTERFACE_MODE_TBI || + phy_mode == PHY_INTERFACE_MODE_RTBI; } static inline int emac_phy_gpcs(int phy_mode) { - return phy_mode == PHY_MODE_SGMII || - phy_mode == PHY_MODE_TBI || - phy_mode == PHY_MODE_RTBI; + return phy_mode == PHY_INTERFACE_MODE_SGMII || + phy_mode == PHY_INTERFACE_MODE_TBI || + phy_mode == PHY_INTERFACE_MODE_RTBI; } static inline void emac_tx_enable(struct emac_instance *dev) @@ -2871,7 +2871,7 @@ static int emac_init_config(struct emac_instance *dev) /* PHY mode needs some decoding */ dev->phy_mode = of_get_phy_mode(np); if (dev->phy_mode < 0) - dev->phy_mode = PHY_MODE_NA; + dev->phy_mode = PHY_INTERFACE_MODE_NA; /* Check EMAC version */ if (of_device_is_compatible(np, "ibm,emac4sync")) { @@ -3174,7 +3174,7 @@ static int emac_probe(struct platform_device *ofdev) printk(KERN_INFO "%s: EMAC-%d %pOF, MAC %pM\n", ndev->name, dev->cell_index, np, ndev->dev_addr); - if (dev->phy_mode == PHY_MODE_SGMII) + if (dev->phy_mode == PHY_INTERFACE_MODE_SGMII) printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name); if (dev->phy.address >= 0) diff --git a/drivers/net/ethernet/ibm/emac/emac.h b/drivers/net/ethernet/ibm/emac/emac.h index c26d2631ca30..e2f80cca9bed 100644 --- a/drivers/net/ethernet/ibm/emac/emac.h +++ b/drivers/net/ethernet/ibm/emac/emac.h @@ -104,19 +104,6 @@ struct emac_regs { } u1; }; -/* - * PHY mode settings (EMAC <-> ZMII/RGMII bridge <-> PHY) - */ -#define PHY_MODE_NA PHY_INTERFACE_MODE_NA -#define PHY_MODE_MII PHY_INTERFACE_MODE_MII -#define PHY_MODE_RMII PHY_INTERFACE_MODE_RMII -#define PHY_MODE_SMII PHY_INTERFACE_MODE_SMII -#define PHY_MODE_RGMII PHY_INTERFACE_MODE_RGMII -#define PHY_MODE_TBI PHY_INTERFACE_MODE_TBI -#define PHY_MODE_GMII PHY_INTERFACE_MODE_GMII -#define PHY_MODE_RTBI PHY_INTERFACE_MODE_RTBI -#define PHY_MODE_SGMII PHY_INTERFACE_MODE_SGMII - /* EMACx_MR0 */ #define EMAC_MR0_RXI 0x80000000 #define EMAC_MR0_TXI 0x40000000 diff --git a/drivers/net/ethernet/ibm/emac/phy.c b/drivers/net/ethernet/ibm/emac/phy.c index 35865d05fccd..aa070c063e48 100644 --- a/drivers/net/ethernet/ibm/emac/phy.c +++ b/drivers/net/ethernet/ibm/emac/phy.c @@ -96,7 +96,7 @@ int emac_mii_reset_gpcs(struct mii_phy *phy) if ((val & BMCR_ISOLATE) && limit > 0) gpcs_phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE); - if (limit > 0 && phy->mode == PHY_MODE_SGMII) { + if (limit > 0 && phy->mode == PHY_INTERFACE_MODE_SGMII) { /* Configure GPCS interface to recommended setting for SGMII */ gpcs_phy_write(phy, 0x04, 0x8120); /* AsymPause, FDX */ gpcs_phy_write(phy, 0x07, 0x2801); /* msg_pg, toggle */ @@ -313,16 +313,16 @@ static int cis8201_init(struct mii_phy *phy) epcr &= ~EPCR_MODE_MASK; switch (phy->mode) { - case PHY_MODE_TBI: + case PHY_INTERFACE_MODE_TBI: epcr |= EPCR_TBI_MODE; break; - case PHY_MODE_RTBI: + case PHY_INTERFACE_MODE_RTBI: epcr |= EPCR_RTBI_MODE; break; - case PHY_MODE_GMII: + case PHY_INTERFACE_MODE_GMII: epcr |= EPCR_GMII_MODE; break; - case PHY_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII: default: epcr |= EPCR_RGMII_MODE; } diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c index c4a1ac38bba8..00f5999de3cf 100644 --- a/drivers/net/ethernet/ibm/emac/rgmii.c +++ b/drivers/net/ethernet/ibm/emac/rgmii.c @@ -52,43 +52,28 @@ /* RGMII bridge supports only GMII/TBI and RGMII/RTBI PHYs */ static inline int rgmii_valid_mode(int phy_mode) { - return phy_mode == PHY_MODE_GMII || - phy_mode == PHY_MODE_MII || - phy_mode == PHY_MODE_RGMII || - phy_mode == PHY_MODE_TBI || - phy_mode == PHY_MODE_RTBI; -} - -static inline const char *rgmii_mode_name(int mode) -{ - switch (mode) { - case PHY_MODE_RGMII: - return "RGMII"; - case PHY_MODE_TBI: - return "TBI"; - case PHY_MODE_GMII: - return "GMII"; - case PHY_MODE_MII: - return "MII"; - case PHY_MODE_RTBI: - return "RTBI"; - default: - BUG(); - } + return phy_interface_mode_is_rgmii(phy_mode) || + phy_mode == PHY_INTERFACE_MODE_GMII || + phy_mode == PHY_INTERFACE_MODE_MII || + phy_mode == PHY_INTERFACE_MODE_TBI || + phy_mode == PHY_INTERFACE_MODE_RTBI; } static inline u32 rgmii_mode_mask(int mode, int input) { switch (mode) { - case PHY_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: return RGMII_FER_RGMII(input); - case PHY_MODE_TBI: + case PHY_INTERFACE_MODE_TBI: return RGMII_FER_TBI(input); - case PHY_MODE_GMII: + case PHY_INTERFACE_MODE_GMII: return RGMII_FER_GMII(input); - case PHY_MODE_MII: + case PHY_INTERFACE_MODE_MII: return RGMII_FER_MII(input); - case PHY_MODE_RTBI: + case PHY_INTERFACE_MODE_RTBI: return RGMII_FER_RTBI(input); default: BUG(); @@ -115,7 +100,7 @@ int rgmii_attach(struct platform_device *ofdev, int input, int mode) out_be32(&p->fer, in_be32(&p->fer) | rgmii_mode_mask(mode, input)); printk(KERN_NOTICE "%pOF: input %d in %s mode\n", - ofdev->dev.of_node, input, rgmii_mode_name(mode)); + ofdev->dev.of_node, input, phy_modes(mode)); ++dev->users; diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c index 89c42d362292..fdcc734541fe 100644 --- a/drivers/net/ethernet/ibm/emac/zmii.c +++ b/drivers/net/ethernet/ibm/emac/zmii.c @@ -49,20 +49,20 @@ */ static inline int zmii_valid_mode(int mode) { - return mode == PHY_MODE_MII || - mode == PHY_MODE_RMII || - mode == PHY_MODE_SMII || - mode == PHY_MODE_NA; + return mode == PHY_INTERFACE_MODE_MII || + mode == PHY_INTERFACE_MODE_RMII || + mode == PHY_INTERFACE_MODE_SMII || + mode == PHY_INTERFACE_MODE_NA; } static inline const char *zmii_mode_name(int mode) { switch (mode) { - case PHY_MODE_MII: + case PHY_INTERFACE_MODE_MII: return "MII"; - case PHY_MODE_RMII: + case PHY_INTERFACE_MODE_RMII: return "RMII"; - case PHY_MODE_SMII: + case PHY_INTERFACE_MODE_SMII: return "SMII"; default: BUG(); @@ -72,11 +72,11 @@ static inline const char *zmii_mode_name(int mode) static inline u32 zmii_mode_mask(int mode, int input) { switch (mode) { - case PHY_MODE_MII: + case PHY_INTERFACE_MODE_MII: return ZMII_FER_MII(input); - case PHY_MODE_RMII: + case PHY_INTERFACE_MODE_RMII: return ZMII_FER_RMII(input); - case PHY_MODE_SMII: + case PHY_INTERFACE_MODE_SMII: return ZMII_FER_SMII(input); default: return 0; @@ -106,27 +106,27 @@ int zmii_attach(struct platform_device *ofdev, int input, int *mode) * Please, always specify PHY mode in your board port to avoid * any surprises. */ - if (dev->mode == PHY_MODE_NA) { - if (*mode == PHY_MODE_NA) { + if (dev->mode == PHY_INTERFACE_MODE_NA) { + if (*mode == PHY_INTERFACE_MODE_NA) { u32 r = dev->fer_save; ZMII_DBG(dev, "autodetecting mode, FER = 0x%08x" NL, r); if (r & (ZMII_FER_MII(0) | ZMII_FER_MII(1))) - dev->mode = PHY_MODE_MII; + dev->mode = PHY_INTERFACE_MODE_MII; else if (r & (ZMII_FER_RMII(0) | ZMII_FER_RMII(1))) - dev->mode = PHY_MODE_RMII; + dev->mode = PHY_INTERFACE_MODE_RMII; else - dev->mode = PHY_MODE_SMII; - } else + dev->mode = PHY_INTERFACE_MODE_SMII; + } else { dev->mode = *mode; - + } printk(KERN_NOTICE "%pOF: bridge in %s mode\n", ofdev->dev.of_node, zmii_mode_name(dev->mode)); } else { /* All inputs must use the same mode */ - if (*mode != PHY_MODE_NA && *mode != dev->mode) { + if (*mode != PHY_INTERFACE_MODE_NA && *mode != dev->mode) { printk(KERN_ERR "%pOF: invalid mode %d specified for input %d\n", ofdev->dev.of_node, *mode, input); @@ -246,7 +246,7 @@ static int zmii_probe(struct platform_device *ofdev) mutex_init(&dev->lock); dev->ofdev = ofdev; - dev->mode = PHY_MODE_NA; + dev->mode = PHY_INTERFACE_MODE_NA; rc = -ENXIO; if (of_address_to_resource(np, 0, ®s)) { diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index b65f5f3ac034..8c3058d5d191 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -59,6 +59,7 @@ #include <linux/mm.h> #include <linux/ethtool.h> #include <linux/proc_fs.h> +#include <linux/if_arp.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/ipv6.h> @@ -1177,6 +1178,9 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb, hdr_len[2] = tcp_hdrlen(skb); else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) hdr_len[2] = sizeof(struct udphdr); + } else if (skb->protocol == htons(ETH_P_ARP)) { + hdr_len[1] = arp_hdr_len(skb->dev); + hdr_len[2] = 0; } memset(hdr_data, 0, 120); @@ -1412,7 +1416,8 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) /* determine if l2/3/4 headers are sent to firmware */ if ((*hdrs >> 7) & 1 && (skb->protocol == htons(ETH_P_IP) || - skb->protocol == htons(ETH_P_IPV6))) { + skb->protocol == htons(ETH_P_IPV6) || + skb->protocol == htons(ETH_P_ARP))) { build_hdr_descs_arr(tx_buff, &num_entries, *hdrs); tx_crq.v1.n_crq_elem = num_entries; tx_buff->indir_arr[0] = tx_crq; @@ -1543,15 +1548,19 @@ static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p) crq.change_mac_addr.first = IBMVNIC_CRQ_CMD; crq.change_mac_addr.cmd = CHANGE_MAC_ADDR; ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data); + + init_completion(&adapter->fw_done); ibmvnic_send_crq(adapter, &crq); + wait_for_completion(&adapter->fw_done); /* netdev->dev_addr is changed in handle_change_mac_rsp function */ - return 0; + return adapter->fw_done_rc ? -EIO : 0; } static int ibmvnic_set_mac(struct net_device *netdev, void *p) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); struct sockaddr *addr = p; + int rc; if (adapter->state == VNIC_PROBED) { memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr)); @@ -1559,9 +1568,9 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p) return 0; } - __ibmvnic_set_mac(netdev, addr); + rc = __ibmvnic_set_mac(netdev, addr); - return 0; + return rc; } /** @@ -2484,6 +2493,12 @@ static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance) struct ibmvnic_sub_crq_queue *scrq = instance; struct ibmvnic_adapter *adapter = scrq->adapter; + /* When booting a kdump kernel we can hit pending interrupts + * prior to completing driver initialization. + */ + if (unlikely(adapter->state != VNIC_OPEN)) + return IRQ_NONE; + adapter->rx_stats_buffers[scrq->scrq_num].interrupts++; if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) { @@ -3558,8 +3573,8 @@ static void handle_error_indication(union ibmvnic_crq *crq, ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL); } -static void handle_change_mac_rsp(union ibmvnic_crq *crq, - struct ibmvnic_adapter *adapter) +static int handle_change_mac_rsp(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct device *dev = &adapter->vdev->dev; @@ -3568,10 +3583,13 @@ static void handle_change_mac_rsp(union ibmvnic_crq *crq, rc = crq->change_mac_addr_rsp.rc.code; if (rc) { dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc); - return; + goto out; } memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0], ETH_ALEN); +out: + complete(&adapter->fw_done); + return rc; } static void handle_request_cap_rsp(union ibmvnic_crq *crq, @@ -4031,7 +4049,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, break; case CHANGE_MAC_ADDR_RSP: netdev_dbg(netdev, "Got MAC address change Response\n"); - handle_change_mac_rsp(crq, adapter); + adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter); break; case ERROR_INDICATION: netdev_dbg(netdev, "Got Error Indication\n"); @@ -4335,7 +4353,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) } netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter), - IBMVNIC_MAX_TX_QUEUES); + IBMVNIC_MAX_QUEUES); if (!netdev) return -ENOMEM; diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 3aec42118db2..fe21a6e2ddae 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -39,7 +39,7 @@ #define IBMVNIC_RX_WEIGHT 16 /* when changing this, update IBMVNIC_IO_ENTITLEMENT_DEFAULT */ #define IBMVNIC_BUFFS_PER_POOL 100 -#define IBMVNIC_MAX_TX_QUEUES 5 +#define IBMVNIC_MAX_QUEUES 10 #define IBMVNIC_TSO_BUF_SZ 65536 #define IBMVNIC_TSO_BUFS 64 diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 3b3983a1ffbb..dc71e87c3260 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -1838,8 +1838,8 @@ static void e1000_get_ethtool_stats(struct net_device *netdev, p = (char *)adapter + stat->stat_offset; break; default: - WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n", - stat->type, i); + netdev_WARN_ONCE(netdev, "Invalid E1000 stat type: %u index %d\n", + stat->type, i); continue; } diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 9f18d39bdc8f..1298b69f990b 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3303,9 +3303,11 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) if (adapter->flags & FLAG_IS_ICH) { u32 rxdctl = er32(RXDCTL(0)); - ew32(RXDCTL(0), rxdctl | 0x3); + ew32(RXDCTL(0), rxdctl | 0x3 | BIT(8)); } + dev_info(&adapter->pdev->dev, + "Some CPU C-states have been disabled in order to enable jumbo frames\n"); pm_qos_update_request(&adapter->pm_qos_req, lat); } else { pm_qos_update_request(&adapter->pm_qos_req, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index ea3ab24265ee..760cfa52d02c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -353,7 +353,7 @@ int fm10k_iov_resume(struct pci_dev *pdev) struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; /* allocate all but the last GLORT to the VFs */ - if (i == ((~hw->mac.dglort_map) >> FM10K_DGLORTMAP_MASK_SHIFT)) + if (i == (~hw->mac.dglort_map >> FM10K_DGLORTMAP_MASK_SHIFT)) break; /* assign GLORT to VF, and restrict it to multicast */ @@ -511,7 +511,7 @@ int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs) return err; /* allocate VFs if not already allocated */ - if (num_vfs && (num_vfs != current_vfs)) { + if (num_vfs && num_vfs != current_vfs) { /* Disable completer abort error reporting as * the VFs can trigger this any time they read a queue * that they don't own. diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 538b42d5c187..8e12aae065d8 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -446,13 +446,13 @@ static void fm10k_type_trans(struct fm10k_ring *rx_ring, skb->protocol = eth_type_trans(skb, dev); + /* Record Rx queue, or update macvlan statistics */ if (!l2_accel) - return; - - /* update MACVLAN statistics */ - macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, 1, - !!(rx_desc->w.hdr_info & - cpu_to_le16(FM10K_RXD_HDR_INFO_XC_MASK))); + skb_record_rx_queue(skb, rx_ring->queue_index); + else + macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true, + (skb->pkt_type == PACKET_BROADCAST) || + (skb->pkt_type == PACKET_MULTICAST)); } /** @@ -479,8 +479,6 @@ static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring, FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan; - skb_record_rx_queue(skb, rx_ring->queue_index); - FM10K_CB(skb)->fi.d.glort = rx_desc->d.glort; if (rx_desc->w.vlan) { diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index adc62fb38c49..a38ae5c54da3 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -934,8 +934,12 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) if (vid >= VLAN_N_VID) return -EINVAL; - /* Verify we have permission to add VLANs */ - if (hw->mac.vlan_override) + /* Verify that we have permission to add VLANs. If this is a request + * to remove a VLAN, we still want to allow the user to remove the + * VLAN device. In that case, we need to clear the bit in the + * active_vlans bitmask. + */ + if (set && hw->mac.vlan_override) return -EACCES; /* update active_vlans bitmask */ @@ -954,6 +958,12 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) rx_ring->vid &= ~FM10K_VLAN_CLEAR; } + /* If our VLAN has been overridden, there is no reason to send VLAN + * removal requests as they will be silently ignored. + */ + if (hw->mac.vlan_override) + return 0; + /* Do not remove default VLAN ID related entries from VLAN and MAC * tables */ @@ -1040,14 +1050,13 @@ static int __fm10k_uc_sync(struct net_device *dev, const unsigned char *addr, bool sync) { struct fm10k_intfc *interface = netdev_priv(dev); - struct fm10k_hw *hw = &interface->hw; u16 vid, glort = interface->glort; s32 err; if (!is_valid_ether_addr(addr)) return -EADDRNOTAVAIL; - for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1; + for (vid = fm10k_find_next_vlan(interface, 0); vid < VLAN_N_VID; vid = fm10k_find_next_vlan(interface, vid)) { err = fm10k_queue_mac_request(interface, glort, @@ -1106,14 +1115,13 @@ static int __fm10k_mc_sync(struct net_device *dev, const unsigned char *addr, bool sync) { struct fm10k_intfc *interface = netdev_priv(dev); - struct fm10k_hw *hw = &interface->hw; u16 vid, glort = interface->glort; s32 err; if (!is_multicast_ether_addr(addr)) return -EADDRNOTAVAIL; - for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1; + for (vid = fm10k_find_next_vlan(interface, 0); vid < VLAN_N_VID; vid = fm10k_find_next_vlan(interface, vid)) { err = fm10k_queue_mac_request(interface, glort, @@ -1157,10 +1165,12 @@ static void fm10k_set_rx_mode(struct net_device *dev) /* update xcast mode first, but only if it changed */ if (interface->xcast_mode != xcast_mode) { - /* update VLAN table */ + /* update VLAN table when entering promiscuous mode */ if (xcast_mode == FM10K_XCAST_MODE_PROMISC) fm10k_queue_vlan_request(interface, FM10K_VLAN_ALL, 0, true); + + /* clear VLAN table when exiting promiscuous mode */ if (interface->xcast_mode == FM10K_XCAST_MODE_PROMISC) fm10k_clear_unused_vlans(interface); @@ -1182,9 +1192,10 @@ static void fm10k_set_rx_mode(struct net_device *dev) void fm10k_restore_rx_state(struct fm10k_intfc *interface) { + struct fm10k_l2_accel *l2_accel = interface->l2_accel; struct net_device *netdev = interface->netdev; struct fm10k_hw *hw = &interface->hw; - int xcast_mode; + int xcast_mode, i; u16 vid, glort; /* record glort for this interface */ @@ -1211,11 +1222,8 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface) fm10k_queue_vlan_request(interface, FM10K_VLAN_ALL, 0, xcast_mode == FM10K_XCAST_MODE_PROMISC); - /* Add filter for VLAN 0 */ - fm10k_queue_vlan_request(interface, 0, 0, true); - /* update table with current entries */ - for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1; + for (vid = fm10k_find_next_vlan(interface, 0); vid < VLAN_N_VID; vid = fm10k_find_next_vlan(interface, vid)) { fm10k_queue_vlan_request(interface, vid, 0, true); @@ -1234,6 +1242,24 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface) __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync); __dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync); + /* synchronize macvlan addresses */ + if (l2_accel) { + for (i = 0; i < l2_accel->size; i++) { + struct net_device *sdev = l2_accel->macvlan[i]; + + if (!sdev) + continue; + + glort = l2_accel->dglort + 1 + i; + + hw->mac.ops.update_xcast_mode(hw, glort, + FM10K_XCAST_MODE_MULTI); + fm10k_queue_mac_request(interface, glort, + sdev->dev_addr, + hw->mac.default_vid, true); + } + } + fm10k_mbx_unlock(interface); /* record updated xcast mode state */ @@ -1490,7 +1516,7 @@ static void *fm10k_dfwd_add_station(struct net_device *dev, hw->mac.ops.update_xcast_mode(hw, glort, FM10K_XCAST_MODE_MULTI); fm10k_queue_mac_request(interface, glort, sdev->dev_addr, - 0, true); + hw->mac.default_vid, true); } fm10k_mbx_unlock(interface); @@ -1530,7 +1556,7 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv) hw->mac.ops.update_xcast_mode(hw, glort, FM10K_XCAST_MODE_NONE); fm10k_queue_mac_request(interface, glort, sdev->dev_addr, - 0, false); + hw->mac.default_vid, false); } fm10k_mbx_unlock(interface); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index 425d814aed4d..d6406fc31ffb 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -866,7 +866,7 @@ static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw, /* Determine correct default VLAN ID. The FM10K_VLAN_OVERRIDE bit is * used here to indicate to the VF that it will not have privilege to * write VLAN_TABLE. All policy is enforced on the PF but this allows - * the VF to correctly report errors to userspace rqeuests. + * the VF to correctly report errors to userspace requests. */ if (vf_info->pf_vid) vf_vid = vf_info->pf_vid | FM10K_VLAN_OVERRIDE; diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index e019baa905c5..46e9f4e0a02c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -508,39 +508,40 @@ struct i40e_pf { #define I40E_HW_PORT_ID_VALID BIT(17) #define I40E_HW_RESTART_AUTONEG BIT(18) - u32 flags; -#define I40E_FLAG_RX_CSUM_ENABLED BIT(0) -#define I40E_FLAG_MSI_ENABLED BIT(1) -#define I40E_FLAG_MSIX_ENABLED BIT(2) -#define I40E_FLAG_RSS_ENABLED BIT(3) -#define I40E_FLAG_VMDQ_ENABLED BIT(4) -#define I40E_FLAG_FILTER_SYNC BIT(5) -#define I40E_FLAG_SRIOV_ENABLED BIT(6) -#define I40E_FLAG_DCB_CAPABLE BIT(7) -#define I40E_FLAG_DCB_ENABLED BIT(8) -#define I40E_FLAG_FD_SB_ENABLED BIT(9) -#define I40E_FLAG_FD_ATR_ENABLED BIT(10) -#define I40E_FLAG_FD_SB_AUTO_DISABLED BIT(11) -#define I40E_FLAG_FD_ATR_AUTO_DISABLED BIT(12) -#define I40E_FLAG_MFP_ENABLED BIT(13) -#define I40E_FLAG_UDP_FILTER_SYNC BIT(14) -#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT(15) -#define I40E_FLAG_VEB_MODE_ENABLED BIT(16) -#define I40E_FLAG_VEB_STATS_ENABLED BIT(17) -#define I40E_FLAG_LINK_POLLING_ENABLED BIT(18) -#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT(19) -#define I40E_FLAG_TEMP_LINK_POLLING BIT(20) -#define I40E_FLAG_LEGACY_RX BIT(21) -#define I40E_FLAG_PTP BIT(22) -#define I40E_FLAG_IWARP_ENABLED BIT(23) -#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT(24) -#define I40E_FLAG_CLIENT_L2_CHANGE BIT(25) -#define I40E_FLAG_CLIENT_RESET BIT(26) -#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT(27) -#define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT(28) -#define I40E_FLAG_TC_MQPRIO BIT(29) -#define I40E_FLAG_FD_SB_INACTIVE BIT(30) -#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT(31) + u64 flags; +#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(0) +#define I40E_FLAG_MSI_ENABLED BIT_ULL(1) +#define I40E_FLAG_MSIX_ENABLED BIT_ULL(2) +#define I40E_FLAG_RSS_ENABLED BIT_ULL(3) +#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(4) +#define I40E_FLAG_FILTER_SYNC BIT_ULL(5) +#define I40E_FLAG_SRIOV_ENABLED BIT_ULL(6) +#define I40E_FLAG_DCB_CAPABLE BIT_ULL(7) +#define I40E_FLAG_DCB_ENABLED BIT_ULL(8) +#define I40E_FLAG_FD_SB_ENABLED BIT_ULL(9) +#define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(10) +#define I40E_FLAG_FD_SB_AUTO_DISABLED BIT_ULL(11) +#define I40E_FLAG_FD_ATR_AUTO_DISABLED BIT_ULL(12) +#define I40E_FLAG_MFP_ENABLED BIT_ULL(13) +#define I40E_FLAG_UDP_FILTER_SYNC BIT_ULL(14) +#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT_ULL(15) +#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(16) +#define I40E_FLAG_VEB_STATS_ENABLED BIT_ULL(17) +#define I40E_FLAG_LINK_POLLING_ENABLED BIT_ULL(18) +#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(19) +#define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(20) +#define I40E_FLAG_LEGACY_RX BIT_ULL(21) +#define I40E_FLAG_PTP BIT_ULL(22) +#define I40E_FLAG_IWARP_ENABLED BIT_ULL(23) +#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(24) +#define I40E_FLAG_CLIENT_L2_CHANGE BIT_ULL(25) +#define I40E_FLAG_CLIENT_RESET BIT_ULL(26) +#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT_ULL(27) +#define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT_ULL(28) +#define I40E_FLAG_TC_MQPRIO BIT_ULL(29) +#define I40E_FLAG_FD_SB_INACTIVE BIT_ULL(30) +#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT_ULL(31) +#define I40E_FLAG_DISABLE_FW_LLDP BIT_ULL(32) struct i40e_client_instance *cinst; bool stat_offsets_loaded; diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 9af74253c3f7..e78971605e0b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -907,10 +907,15 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, /* update the error if time out occurred */ if ((!cmd_completed) && (!details->async && !details->postpone)) { - i40e_debug(hw, - I40E_DEBUG_AQ_MESSAGE, - "AQTX: Writeback timeout.\n"); - status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; + if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQTX: AQ Critical error.\n"); + status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR; + } else { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQTX: Writeback timeout.\n"); + status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; + } } asq_send_command_error: @@ -971,7 +976,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, } /* set next_to_use to head */ - ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); + ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK; if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; @@ -1027,7 +1032,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, hw->aq.arq.next_to_clean = ntc; hw->aq.arq.next_to_use = ntu; - i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode)); + i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode), &e->desc); clean_arq_element_out: /* Set pending if needed, unlock and return */ if (pending) diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index b0188b8f91ba..a852775d3059 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -198,13 +198,14 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_add_mirror_rule = 0x0260, i40e_aqc_opc_delete_mirror_rule = 0x0261, - /* Pipeline Personalization Profile */ + /* Dynamic Device Personalization */ i40e_aqc_opc_write_personalization_profile = 0x0270, i40e_aqc_opc_get_personalization_profile_list = 0x0271, /* DCB commands */ i40e_aqc_opc_dcb_ignore_pfc = 0x0301, i40e_aqc_opc_dcb_updated = 0x0302, + i40e_aqc_opc_set_dcb_parameters = 0x0303, /* TX scheduler */ i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, @@ -1594,7 +1595,7 @@ struct i40e_aqc_add_delete_mirror_rule_completion { I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); -/* Pipeline Personalization Profile */ +/* Dynamic Device Personalization */ struct i40e_aqc_write_personalization_profile { u8 flags; u8 reserved[3]; @@ -1605,7 +1606,7 @@ struct i40e_aqc_write_personalization_profile { I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile); -struct i40e_aqc_write_ppp_resp { +struct i40e_aqc_write_ddp_resp { __le32 error_offset; __le32 error_info; __le32 addr_high; @@ -1614,8 +1615,8 @@ struct i40e_aqc_write_ppp_resp { struct i40e_aqc_get_applied_profiles { u8 flags; -#define I40E_AQC_GET_PPP_GET_CONF 0x1 -#define I40E_AQC_GET_PPP_GET_RDPU_CONF 0x2 +#define I40E_AQC_GET_DDP_GET_CONF 0x1 +#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2 u8 rsv[3]; __le32 reserved; __le32 addr_high; @@ -2231,8 +2232,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access); */ struct i40e_aqc_nvm_update { u8 command_flags; -#define I40E_AQ_NVM_LAST_CMD 0x01 -#define I40E_AQ_NVM_FLASH_ONLY 0x80 +#define I40E_AQ_NVM_LAST_CMD 0x01 +#define I40E_AQ_NVM_FLASH_ONLY 0x80 +#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1 +#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03 +#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03 +#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01 u8 module_pointer; __le16 length; __le32 offset; @@ -2492,6 +2497,17 @@ struct i40e_aqc_lldp_start { I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); +/* Set DCB (direct 0x0303) */ +struct i40e_aqc_set_dcb_parameters { + u8 command; +#define I40E_AQ_DCB_SET_AGENT 0x1 +#define I40E_DCB_VALID 0x1 + u8 valid_flags; + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters); + /* Get CEE DCBX Oper Config (0x0A07) * uses the generic descriptor struct * returns below as indirect response diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index 1b1e2acbd07f..0de9610c1d8d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -378,11 +378,11 @@ void i40e_client_subtask(struct i40e_pf *pf) if (!client || !cdev) return; - /* Here we handle client opens. If the client is down, but - * the netdev is up, then open the client. + /* Here we handle client opens. If the client is down, and + * the netdev is registered, then open the client. */ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { - if (!test_bit(__I40E_VSI_DOWN, vsi->state) && + if (vsi->netdev_registered && client->ops && client->ops->open) { set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); ret = client->ops->open(&cdev->lan_info, client); @@ -393,17 +393,19 @@ void i40e_client_subtask(struct i40e_pf *pf) i40e_client_del_instance(pf); } } - } else { - /* Likewise for client close. If the client is up, but the netdev - * is down, then close the client. - */ - if (test_bit(__I40E_VSI_DOWN, vsi->state) && - client->ops && client->ops->close) { - clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); - client->ops->close(&cdev->lan_info, client, false); - i40e_client_release_qvlist(&cdev->lan_info); - } } + + /* enable/disable PE TCP_ENA flag based on netdev down/up + */ + if (test_bit(__I40E_VSI_DOWN, vsi->state)) + i40e_client_update_vsi_ctxt(&cdev->lan_info, client, + 0, 0, 0, + I40E_CLIENT_VSI_FLAG_TCP_ENABLE); + else + i40e_client_update_vsi_ctxt(&cdev->lan_info, client, + 0, 0, + I40E_CLIENT_VSI_FLAG_TCP_ENABLE, + I40E_CLIENT_VSI_FLAG_TCP_ENABLE); } /** @@ -717,13 +719,13 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev, return -ENOENT; } - if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE) && - (flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE)) { + if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE) && + (flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE)) { ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA; - } else if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE) && - !(flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE)) { + } else if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE) && + !(flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE)) { ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); ctxt.info.queueing_opt_flags &= ~I40E_AQ_VSI_QUE_OPT_TCP_ENA; diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h index 15b21a5315b5..ba55c889e4c5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.h +++ b/drivers/net/ethernet/intel/i40e/i40e_client.h @@ -132,7 +132,7 @@ struct i40e_info { #define I40E_CLIENT_RESET_LEVEL_PF 1 #define I40E_CLIENT_RESET_LEVEL_CORE 2 -#define I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE BIT(1) +#define I40E_CLIENT_VSI_FLAG_TCP_ENABLE BIT(1) struct i40e_ops { /* setup_q_vector_list enables queues with a particular vector */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 095965f268bd..ef5a868aae46 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -278,6 +278,8 @@ const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err) return "I40E_NOT_SUPPORTED"; case I40E_ERR_FIRMWARE_API_VERSION: return "I40E_ERR_FIRMWARE_API_VERSION"; + case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR: + return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR"; } snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); @@ -1486,6 +1488,7 @@ u32 i40e_led_get(struct i40e_hw *hw) case I40E_COMBINED_ACTIVITY: case I40E_FILTER_ACTIVITY: case I40E_MAC_ACTIVITY: + case I40E_LINK_ACTIVITY: continue; default: break; @@ -1534,6 +1537,7 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) case I40E_COMBINED_ACTIVITY: case I40E_FILTER_ACTIVITY: case I40E_MAC_ACTIVITY: + case I40E_LINK_ACTIVITY: continue; default: break; @@ -1544,9 +1548,6 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK); - if (mode == I40E_LINK_ACTIVITY) - blink = false; - if (blink) gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); else @@ -3465,13 +3466,14 @@ exit: * @length: length of the section to be written (in bytes from the offset) * @data: command buffer (size [bytes] = length) * @last_command: tells if this is the last command in a series + * @preservation_flags: Preservation mode flags * @cmd_details: pointer to command details structure or NULL * * Update the NVM using the admin queue commands **/ i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 length, void *data, - bool last_command, + bool last_command, u8 preservation_flags, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; @@ -3490,6 +3492,16 @@ i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, /* If this is the last command in a series, set the proper flag. */ if (last_command) cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; + if (hw->mac.type == I40E_MAC_X722) { + if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED) + cmd->command_flags |= + (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED << + I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); + else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL) + cmd->command_flags |= + (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL << + I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); + } cmd->module_pointer = module_pointer; cmd->offset = cpu_to_le32(offset); cmd->length = cpu_to_le16(length); @@ -3629,7 +3641,34 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start); cmd->command = I40E_AQ_LLDP_AGENT_START; + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} +/** + * i40e_aq_set_dcb_parameters + * @hw: pointer to the hw struct + * @cmd_details: pointer to command details structure or NULL + * @dcb_enable: True if DCB configuration needs to be applied + * + **/ +enum i40e_status_code +i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_dcb_parameters *cmd = + (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_dcb_parameters); + + if (dcb_enable) { + cmd->valid_flags = I40E_DCB_VALID; + cmd->command = I40E_AQ_DCB_SET_AGENT; + } status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; @@ -5236,7 +5275,7 @@ i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw, } /** - * i40e_aq_write_ppp - Write pipeline personalization profile (ppp) + * i40e_aq_write_ddp - Write dynamic device personalization (ddp) * @hw: pointer to the hw struct * @buff: command buffer (size in bytes = buff_size) * @buff_size: buffer size in bytes @@ -5246,7 +5285,7 @@ i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw, * @cmd_details: pointer to command details structure or NULL **/ enum -i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff, +i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, u16 buff_size, u32 track_id, u32 *error_offset, u32 *error_info, struct i40e_asq_cmd_details *cmd_details) @@ -5255,7 +5294,7 @@ i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff, struct i40e_aqc_write_personalization_profile *cmd = (struct i40e_aqc_write_personalization_profile *) &desc.params.raw; - struct i40e_aqc_write_ppp_resp *resp; + struct i40e_aqc_write_ddp_resp *resp; i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, @@ -5271,7 +5310,7 @@ i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff, status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); if (!status) { - resp = (struct i40e_aqc_write_ppp_resp *)&desc.params.raw; + resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw; if (error_offset) *error_offset = le32_to_cpu(resp->error_offset); if (error_info) @@ -5282,14 +5321,14 @@ i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff, } /** - * i40e_aq_get_ppp_list - Read pipeline personalization profile (ppp) + * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp) * @hw: pointer to the hw struct * @buff: command buffer (size in bytes = buff_size) * @buff_size: buffer size in bytes * @cmd_details: pointer to command details structure or NULL **/ enum -i40e_status_code i40e_aq_get_ppp_list(struct i40e_hw *hw, void *buff, +i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, u16 buff_size, u8 flags, struct i40e_asq_cmd_details *cmd_details) { @@ -5364,11 +5403,6 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, u32 offset = 0, info = 0; u32 i; - if (!track_id) { - i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0."); - return I40E_NOT_SUPPORTED; - } - dev_cnt = profile->device_table_count; for (i = 0; i < dev_cnt; i++) { @@ -5378,7 +5412,7 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, break; } if (i == dev_cnt) { - i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support PPP"); + i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP"); return I40E_ERR_DEVICE_NOT_SUPPORTED; } @@ -5397,7 +5431,7 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, sizeof(struct i40e_profile_section_header); /* Write profile */ - status = i40e_aq_write_ppp(hw, (void *)sec, (u16)section_size, + status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, track_id, &offset, &info, NULL); if (status) { i40e_debug(hw, I40E_DEBUG_PACKAGE, @@ -5439,10 +5473,10 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw, sec->section.offset); pinfo->track_id = track_id; pinfo->version = profile->version; - pinfo->op = I40E_PPP_ADD_TRACKID; - memcpy(pinfo->name, profile->name, I40E_PPP_NAME_SIZE); + pinfo->op = I40E_DDP_ADD_TRACKID; + memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); - status = i40e_aq_write_ppp(hw, (void *)sec, sec->data_end, + status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end, track_id, &offset, &info, NULL); return status; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 5f6cf7212d4f..2f5bee713fef 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -126,6 +126,10 @@ static const struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx), I40E_PF_STAT("link_xon_tx", stats.link_xon_tx), I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx), + I40E_PF_STAT("priority_xon_rx", stats.priority_xon_rx), + I40E_PF_STAT("priority_xoff_rx", stats.priority_xoff_rx), + I40E_PF_STAT("priority_xon_tx", stats.priority_xon_tx), + I40E_PF_STAT("priority_xoff_tx", stats.priority_xoff_tx), I40E_PF_STAT("rx_size_64", stats.rx_size_64), I40E_PF_STAT("rx_size_127", stats.rx_size_127), I40E_PF_STAT("rx_size_255", stats.rx_size_255), @@ -229,6 +233,7 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = { I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0), I40E_PRIV_FLAG("disable-source-pruning", I40E_FLAG_SOURCE_PRUNING_DISABLED, 0), + I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_DISABLE_FW_LLDP, 0), }; #define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags) @@ -1585,6 +1590,8 @@ static int i40e_set_ringparam(struct net_device *netdev, */ rx_rings[i].desc = NULL; rx_rings[i].rx_bi = NULL; + /* Clear cloned XDP RX-queue info before setup call */ + memset(&rx_rings[i].xdp_rxq, 0, sizeof(rx_rings[i].xdp_rxq)); /* this is to allow wr32 to have something to write to * during early allocation of Rx buffers */ @@ -2299,6 +2306,8 @@ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi, struct ethtool_coalesce *ec, int queue) { + struct i40e_ring *rx_ring = vsi->rx_rings[queue]; + struct i40e_ring *tx_ring = vsi->tx_rings[queue]; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_q_vector *q_vector; @@ -2306,26 +2315,26 @@ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi, intrl = i40e_intrl_usec_to_reg(vsi->int_rate_limit); - vsi->rx_rings[queue]->rx_itr_setting = ec->rx_coalesce_usecs; - vsi->tx_rings[queue]->tx_itr_setting = ec->tx_coalesce_usecs; + rx_ring->rx_itr_setting = ec->rx_coalesce_usecs; + tx_ring->tx_itr_setting = ec->tx_coalesce_usecs; if (ec->use_adaptive_rx_coalesce) - vsi->rx_rings[queue]->rx_itr_setting |= I40E_ITR_DYNAMIC; + rx_ring->rx_itr_setting |= I40E_ITR_DYNAMIC; else - vsi->rx_rings[queue]->rx_itr_setting &= ~I40E_ITR_DYNAMIC; + rx_ring->rx_itr_setting &= ~I40E_ITR_DYNAMIC; if (ec->use_adaptive_tx_coalesce) - vsi->tx_rings[queue]->tx_itr_setting |= I40E_ITR_DYNAMIC; + tx_ring->tx_itr_setting |= I40E_ITR_DYNAMIC; else - vsi->tx_rings[queue]->tx_itr_setting &= ~I40E_ITR_DYNAMIC; + tx_ring->tx_itr_setting &= ~I40E_ITR_DYNAMIC; - q_vector = vsi->rx_rings[queue]->q_vector; - q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[queue]->rx_itr_setting); + q_vector = rx_ring->q_vector; + q_vector->rx.itr = ITR_TO_REG(rx_ring->rx_itr_setting); vector = vsi->base_vector + q_vector->v_idx; wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), q_vector->rx.itr); - q_vector = vsi->tx_rings[queue]->q_vector; - q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[queue]->tx_itr_setting); + q_vector = tx_ring->q_vector; + q_vector->tx.itr = ITR_TO_REG(tx_ring->tx_itr_setting); vector = vsi->base_vector + q_vector->v_idx; wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), q_vector->tx.itr); @@ -2740,16 +2749,16 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf, no_input_set: if (input_set & I40E_L3_SRC_MASK) - fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFF); + fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFFFFFF); if (input_set & I40E_L3_DST_MASK) - fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFF); + fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFFFFFF); if (input_set & I40E_L4_SRC_MASK) - fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFFFFFF); + fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFF); if (input_set & I40E_L4_DST_MASK) - fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFFFFFF); + fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFF); if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET) fsp->ring_cookie = RX_CLS_FLOW_DISC; @@ -3800,6 +3809,16 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi, i40e_write_fd_input_set(pf, index, new_mask); + /* IP_USER_FLOW filters match both IPv4/Other and IPv4/Fragmented + * frames. If we're programming the input set for IPv4/Other, we also + * need to program the IPv4/Fragmented input set. Since we don't have + * separate support, we'll always assume and enforce that the two flow + * types must have matching input sets. + */ + if (index == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) + i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4, + new_mask); + /* Add the new offset and update table, if necessary */ if (new_flex_offset) { err = i40e_add_flex_offset(&pf->l4_flex_pit_list, src_offset, @@ -3822,6 +3841,87 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi, } /** + * i40e_match_fdir_filter - Return true of two filters match + * @a: pointer to filter struct + * @b: pointer to filter struct + * + * Returns true if the two filters match exactly the same criteria. I.e. they + * match the same flow type and have the same parameters. We don't need to + * check any input-set since all filters of the same flow type must use the + * same input set. + **/ +static bool i40e_match_fdir_filter(struct i40e_fdir_filter *a, + struct i40e_fdir_filter *b) +{ + /* The filters do not much if any of these criteria differ. */ + if (a->dst_ip != b->dst_ip || + a->src_ip != b->src_ip || + a->dst_port != b->dst_port || + a->src_port != b->src_port || + a->flow_type != b->flow_type || + a->ip4_proto != b->ip4_proto) + return false; + + return true; +} + +/** + * i40e_disallow_matching_filters - Check that new filters differ + * @vsi: pointer to the targeted VSI + * @input: new filter to check + * + * Due to hardware limitations, it is not possible for two filters that match + * similar criteria to be programmed at the same time. This is true for a few + * reasons: + * + * (a) all filters matching a particular flow type must use the same input + * set, that is they must match the same criteria. + * (b) different flow types will never match the same packet, as the flow type + * is decided by hardware before checking which rules apply. + * (c) hardware has no way to distinguish which order filters apply in. + * + * Due to this, we can't really support using the location data to order + * filters in the hardware parsing. It is technically possible for the user to + * request two filters matching the same criteria but which select different + * queues. In this case, rather than keep both filters in the list, we reject + * the 2nd filter when the user requests adding it. + * + * This avoids needing to track location for programming the filter to + * hardware, and ensures that we avoid some strange scenarios involving + * deleting filters which match the same criteria. + **/ +static int i40e_disallow_matching_filters(struct i40e_vsi *vsi, + struct i40e_fdir_filter *input) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_fdir_filter *rule; + struct hlist_node *node2; + + /* Loop through every filter, and check that it doesn't match */ + hlist_for_each_entry_safe(rule, node2, + &pf->fdir_filter_list, fdir_node) { + /* Don't check the filters match if they share the same fd_id, + * since the new filter is actually just updating the target + * of the old filter. + */ + if (rule->fd_id == input->fd_id) + continue; + + /* If any filters match, then print a warning message to the + * kernel message buffer and bail out. + */ + if (i40e_match_fdir_filter(rule, input)) { + dev_warn(&pf->pdev->dev, + "Existing user defined filter %d already matches this flow.\n", + rule->fd_id); + return -EINVAL; + } + } + + return 0; +} + +/** * i40e_add_fdir_ethtool - Add/Remove Flow Director filters * @vsi: pointer to the targeted VSI * @cmd: command to get or set RX flow classification rules @@ -3933,19 +4033,25 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, input->flex_offset = userdef.flex_offset; } - ret = i40e_add_del_fdir(vsi, input, true); + /* Avoid programming two filters with identical match criteria. */ + ret = i40e_disallow_matching_filters(vsi, input); if (ret) - goto free_input; + goto free_filter_memory; /* Add the input filter to the fdir_input_list, possibly replacing * a previous filter. Do not free the input structure after adding it * to the list as this would cause a use-after-free bug. */ i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL); - + ret = i40e_add_del_fdir(vsi, input, true); + if (ret) + goto remove_sw_rule; return 0; -free_input: +remove_sw_rule: + hlist_del(&input->fdir_node); + pf->fdir_pf_active_filters--; +free_filter_memory: kfree(input); return ret; } @@ -4258,7 +4364,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - u32 orig_flags, new_flags, changed_flags; + u64 orig_flags, new_flags, changed_flags; u32 i, j; orig_flags = READ_ONCE(pf->flags); @@ -4309,13 +4415,32 @@ flags_complete: !(pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)) return -EOPNOTSUPP; + /* Disable FW LLDP not supported if NPAR active or if FW + * API version < 1.7 + */ + if (new_flags & I40E_FLAG_DISABLE_FW_LLDP) { + if (pf->hw.func_caps.npar_enable) { + dev_warn(&pf->pdev->dev, + "Unable to stop FW LLDP if NPAR active\n"); + return -EOPNOTSUPP; + } + + if (pf->hw.aq.api_maj_ver < 1 || + (pf->hw.aq.api_maj_ver == 1 && + pf->hw.aq.api_min_ver < 7)) { + dev_warn(&pf->pdev->dev, + "FW ver does not support stopping FW LLDP\n"); + return -EOPNOTSUPP; + } + } + /* Compare and exchange the new flags into place. If we failed, that * is if cmpxchg returns anything but the old value, this means that * something else has modified the flags variable since we copied it * originally. We'll just punt with an error and log something in the * message buffer. */ - if (cmpxchg(&pf->flags, orig_flags, new_flags) != orig_flags) { + if (cmpxchg64(&pf->flags, orig_flags, new_flags) != orig_flags) { dev_warn(&pf->pdev->dev, "Unable to update pf->flags as it was modified by another thread...\n"); return -EAGAIN; @@ -4354,12 +4479,37 @@ flags_complete: } } + if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) { + if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) { + struct i40e_dcbx_config *dcbcfg; + int i; + + i40e_aq_stop_lldp(&pf->hw, true, NULL); + i40e_aq_set_dcb_parameters(&pf->hw, true, NULL); + /* reset local_dcbx_config to default */ + dcbcfg = &pf->hw.local_dcbx_config; + dcbcfg->etscfg.willing = 1; + dcbcfg->etscfg.maxtcs = 0; + dcbcfg->etscfg.tcbwtable[0] = 100; + for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) + dcbcfg->etscfg.tcbwtable[i] = 0; + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) + dcbcfg->etscfg.prioritytable[i] = 0; + dcbcfg->etscfg.tsatable[0] = I40E_IEEE_TSA_ETS; + dcbcfg->pfc.willing = 1; + dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; + } else { + i40e_aq_start_lldp(&pf->hw, NULL); + } + } + /* Issue reset to cause things to take effect, as additional bits * are added we will need to create a mask of bits requiring reset */ if (changed_flags & (I40E_FLAG_VEB_STATS_ENABLED | I40E_FLAG_LEGACY_RX | - I40E_FLAG_SOURCE_PRUNING_DISABLED)) + I40E_FLAG_SOURCE_PRUNING_DISABLED | + I40E_FLAG_DISABLE_FW_LLDP)) i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true); return 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index af792112a2d3..f95ce9b5e4fb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -47,8 +47,8 @@ static const char i40e_driver_string[] = #define DRV_KERN "-k" #define DRV_VERSION_MAJOR 2 -#define DRV_VERSION_MINOR 1 -#define DRV_VERSION_BUILD 14 +#define DRV_VERSION_MINOR 3 +#define DRV_VERSION_BUILD 2 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -1818,6 +1818,10 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, num_tc_qps = qcount / numtc; num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf)); + /* Do not allow use more TC queue pairs than MSI-X vectors exist */ + if (pf->flags & I40E_FLAG_MSIX_ENABLED) + num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix); + /* Setup queue offset/count for all TCs for given VSI */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { /* See if the given TC is enabled for the given VSI */ @@ -4122,6 +4126,7 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); q_vector->num_ringpairs = num_ringpairs; + q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1; q_vector->rx.count = 0; q_vector->tx.count = 0; @@ -4877,104 +4882,6 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf) #endif /** - * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue - * @q_idx: TX queue number - * @vsi: Pointer to VSI struct - * - * This function checks specified queue for given VSI. Detects hung condition. - * We proactively detect hung TX queues by checking if interrupts are disabled - * but there are pending descriptors. If it appears hung, attempt to recover - * by triggering a SW interrupt. - **/ -static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) -{ - struct i40e_ring *tx_ring = NULL; - struct i40e_pf *pf; - u32 val, tx_pending; - int i; - - pf = vsi->back; - - /* now that we have an index, find the tx_ring struct */ - for (i = 0; i < vsi->num_queue_pairs; i++) { - if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { - if (q_idx == vsi->tx_rings[i]->queue_index) { - tx_ring = vsi->tx_rings[i]; - break; - } - } - } - - if (!tx_ring) - return; - - /* Read interrupt register */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) - val = rd32(&pf->hw, - I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx + - tx_ring->vsi->base_vector - 1)); - else - val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); - - tx_pending = i40e_get_tx_pending(tx_ring); - - /* Interrupts are disabled and TX pending is non-zero, - * trigger the SW interrupt (don't wait). Worst case - * there will be one extra interrupt which may result - * into not cleaning any queues because queues are cleaned. - */ - if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) - i40e_force_wb(vsi, tx_ring->q_vector); -} - -/** - * i40e_detect_recover_hung - Function to detect and recover hung_queues - * @pf: pointer to PF struct - * - * LAN VSI has netdev and netdev has TX queues. This function is to check - * each of those TX queues if they are hung, trigger recovery by issuing - * SW interrupt. - **/ -static void i40e_detect_recover_hung(struct i40e_pf *pf) -{ - struct net_device *netdev; - struct i40e_vsi *vsi; - unsigned int i; - - /* Only for LAN VSI */ - vsi = pf->vsi[pf->lan_vsi]; - - if (!vsi) - return; - - /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */ - if (test_bit(__I40E_VSI_DOWN, vsi->back->state) || - test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state)) - return; - - /* Make sure type is MAIN VSI */ - if (vsi->type != I40E_VSI_MAIN) - return; - - netdev = vsi->netdev; - if (!netdev) - return; - - /* Bail out if netif_carrier is not OK */ - if (!netif_carrier_ok(netdev)) - return; - - /* Go thru' TX queues for netdev */ - for (i = 0; i < netdev->num_tx_queues; i++) { - struct netdev_queue *q; - - q = netdev_get_tx_queue(netdev, i); - if (q) - i40e_detect_recover_hung_queue(i, vsi); - } -} - -/** * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP * @pf: pointer to PF * @@ -5342,6 +5249,8 @@ static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi, static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) { u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; struct i40e_vsi_context ctxt; int ret = 0; int i; @@ -5359,10 +5268,40 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share); if (ret) { - dev_info(&vsi->back->pdev->dev, + struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; + + dev_info(&pf->pdev->dev, "Failed configuring TC map %d for VSI %d\n", enabled_tc, vsi->seid); - goto out; + ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, + &bw_config, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "Failed querying vsi bw info, err %s aq_err %s\n", + i40e_stat_str(hw, ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); + goto out; + } + if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) { + u8 valid_tc = bw_config.tc_valid_bits & enabled_tc; + + if (!valid_tc) + valid_tc = bw_config.tc_valid_bits; + /* Always enable TC0, no matter what */ + valid_tc |= 1; + dev_info(&pf->pdev->dev, + "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n", + enabled_tc, bw_config.tc_valid_bits, valid_tc); + enabled_tc = valid_tc; + } + + ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share); + if (ret) { + dev_err(&pf->pdev->dev, + "Unable to configure TC map %d for VSI %d\n", + enabled_tc, vsi->seid); + goto out; + } } /* Update Queue Pairs Mapping for currently enabled UPs */ @@ -5402,13 +5341,12 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) /* Update the VSI after updating the VSI queue-mapping * information */ - ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { - dev_info(&vsi->back->pdev->dev, + dev_info(&pf->pdev->dev, "Update vsi tc config failed, err %s aq_err %s\n", - i40e_stat_str(&vsi->back->hw, ret), - i40e_aq_str(&vsi->back->hw, - vsi->back->hw.aq.asq_last_status)); + i40e_stat_str(hw, ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); goto out; } /* update the local VSI info with updated queue map */ @@ -5418,11 +5356,10 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) /* Update current VSI BW information */ ret = i40e_vsi_get_bw_info(vsi); if (ret) { - dev_info(&vsi->back->pdev->dev, + dev_info(&pf->pdev->dev, "Failed updating vsi bw info, err %s aq_err %s\n", - i40e_stat_str(&vsi->back->hw, ret), - i40e_aq_str(&vsi->back->hw, - vsi->back->hw.aq.asq_last_status)); + i40e_stat_str(hw, ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); goto out; } @@ -6388,8 +6325,11 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) struct i40e_hw *hw = &pf->hw; int err = 0; - /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */ - if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) + /* Do not enable DCB for SW1 and SW2 images even if the FW is capable + * Also do not enable DCBx if FW LLDP agent is disabled + */ + if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) || + (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)) goto out; /* Get the initial DCB configuration */ @@ -6416,6 +6356,9 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) dev_dbg(&pf->pdev->dev, "DCBX offload is supported for this PF.\n"); } + } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) { + dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n"); + pf->flags |= I40E_FLAG_DISABLE_FW_LLDP; } else { dev_info(&pf->pdev->dev, "Query for DCB configuration failed, err %s aq_err %s\n", @@ -7505,11 +7448,6 @@ static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np, { struct i40e_vsi *vsi = np->vsi; - if (!tc_can_offload(vsi->netdev)) - return -EOPNOTSUPP; - if (cls_flower->common.chain_index) - return -EOPNOTSUPP; - switch (cls_flower->command) { case TC_CLSFLOWER_REPLACE: return i40e_configure_clsflower(vsi, cls_flower); @@ -7527,6 +7465,9 @@ static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, { struct i40e_netdev_priv *np = cb_priv; + if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data)) + return -EOPNOTSUPP; + switch (type) { case TC_SETUP_CLSFLOWER: return i40e_setup_tc_cls_flower(np, type_data); @@ -7744,6 +7685,9 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf) /* Reprogram the default input set for Other/IPv4 */ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER, I40E_L3_SRC_MASK | I40E_L3_DST_MASK); + + i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4, + I40E_L3_SRC_MASK | I40E_L3_DST_MASK); } /** @@ -9077,6 +9021,17 @@ static int i40e_rebuild_channels(struct i40e_vsi *vsi) vsi->uplink_seid); return ret; } + /* Reconfigure TX queues using QTX_CTL register */ + ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "failed to configure TX rings for channel %u\n", + ch->seid); + return ret; + } + /* update 'next_base_queue' */ + vsi->next_base_queue = vsi->next_base_queue + + ch->num_queue_pairs; if (ch->max_tx_rate) { u64 credits = ch->max_tx_rate; @@ -9280,6 +9235,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) goto end_core_reset; } + /* Enable FW to write a default DCB config on link-up */ + i40e_aq_set_dcb_parameters(hw, true, NULL); + #ifdef CONFIG_I40E_DCB ret = i40e_init_pf_dcb(pf); if (ret) { @@ -9697,7 +9655,7 @@ static void i40e_service_task(struct work_struct *work) if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state)) return; - i40e_detect_recover_hung(pf); + i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]); i40e_sync_filters_subtask(pf); i40e_reset_subtask(pf); i40e_handle_mdd_event(pf); @@ -10464,10 +10422,9 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf) /* set up vector assignment tracking */ size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors); pf->irq_pile = kzalloc(size, GFP_KERNEL); - if (!pf->irq_pile) { - dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n"); + if (!pf->irq_pile) return -ENOMEM; - } + pf->irq_pile->num_entries = vectors; pf->irq_pile->search_hint = 0; @@ -10785,8 +10742,13 @@ static int i40e_pf_config_rss(struct i40e_pf *pf) /* Determine the RSS size of the VSI */ if (!vsi->rss_size) { u16 qcount; - - qcount = vsi->num_queue_pairs / vsi->tc_config.numtc; + /* If the firmware does something weird during VSI init, we + * could end up with zero TCs. Check for that to avoid + * divide-by-zero. It probably won't pass traffic, but it also + * won't panic. + */ + qcount = vsi->num_queue_pairs / + (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1); vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount); } if (!vsi->rss_size) @@ -10974,7 +10936,7 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf) ret = i40e_aq_update_nvm(&pf->hw, I40E_SR_NVM_CONTROL_WORD, 0x10, sizeof(nvm_word), - &nvm_word, true, NULL); + &nvm_word, true, 0, NULL); /* Save off last admin queue command status before releasing * the NVM */ @@ -11115,13 +11077,13 @@ static int i40e_sw_init(struct i40e_pf *pf) pf->hw.aq.fw_maj_ver >= 6) pf->hw_features |= I40E_HW_PTP_L4_CAPABLE; - if (pf->hw.func_caps.vmdq) { + if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) { pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; pf->flags |= I40E_FLAG_VMDQ_ENABLED; pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf); } - if (pf->hw.func_caps.iwarp) { + if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) { pf->flags |= I40E_FLAG_IWARP_ENABLED; /* IWARP needs one extra vector for CQP just like MISC.*/ pf->num_iwarp_msix = (int)num_online_cpus() + 1; @@ -13598,6 +13560,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, pf); pci_save_state(pdev); + + /* Enable FW to write default DCB config on link-up */ + i40e_aq_set_dcb_parameters(hw, true, NULL); + #ifdef CONFIG_I40E_DCB err = i40e_init_pf_dcb(pf); if (err) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 7689c2ee0d46..76a5cb04e4fe 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -239,8 +239,9 @@ read_nvm_exit: * * Writes a 16 bit words buffer to the Shadow RAM using the admin command. **/ -static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer, - u32 offset, u16 words, void *data, +static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, + u8 module_pointer, u32 offset, + u16 words, void *data, bool last_command) { i40e_status ret_code = I40E_ERR_NVM; @@ -389,7 +390,7 @@ static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset, u16 *words, u16 *data) { i40e_status ret_code; - u16 read_size = *words; + u16 read_size; bool last_cmd = false; u16 words_read = 0; u16 i = 0; @@ -496,7 +497,8 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, ret_code = i40e_aq_update_nvm(hw, module_pointer, 2 * offset, /*bytes*/ 2 * words, /*bytes*/ - data, last_command, &cmd_details); + data, last_command, 0, + &cmd_details); return ret_code; } @@ -677,6 +679,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw, static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno); +static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); static inline u8 i40e_nvmupd_get_module(u32 val) { return (u8)(val & I40E_NVM_MOD_PNT_MASK); @@ -686,6 +691,12 @@ static inline u8 i40e_nvmupd_get_transaction(u32 val) return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT); } +static inline u8 i40e_nvmupd_get_preservation_flags(u32 val) +{ + return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >> + I40E_NVM_PRESERVATION_FLAGS_SHIFT); +} + static const char * const i40e_nvm_update_state_str[] = { "I40E_NVMUPD_INVALID", "I40E_NVMUPD_READ_CON", @@ -703,6 +714,7 @@ static const char * const i40e_nvm_update_state_str[] = { "I40E_NVMUPD_STATUS", "I40E_NVMUPD_EXEC_AQ", "I40E_NVMUPD_GET_AQ_RESULT", + "I40E_NVMUPD_GET_AQ_EVENT", }; /** @@ -798,9 +810,9 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, * the wait info and return before doing anything else */ if (cmd->offset == 0xffff) { - i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode); + i40e_nvmupd_clear_wait_state(hw); status = 0; - goto exit; + break; } status = I40E_ERR_NOT_READY; @@ -815,7 +827,7 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, *perrno = -ESRCH; break; } -exit: + mutex_unlock(&hw->aq.arq_mutex); return status; } @@ -944,6 +956,10 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno); break; + case I40E_NVMUPD_GET_AQ_EVENT: + status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno); + break; + default: i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: bad cmd %s in init state\n", @@ -1118,38 +1134,53 @@ retry: } /** - * i40e_nvmupd_check_wait_event - handle NVM update operation events + * i40e_nvmupd_clear_wait_state - clear wait state on hw * @hw: pointer to the hardware structure - * @opcode: the event that just happened **/ -void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode) +void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw) { - if (opcode == hw->nvm_wait_opcode) { - i40e_debug(hw, I40E_DEBUG_NVM, - "NVMUPD: clearing wait on opcode 0x%04x\n", opcode); - if (hw->nvm_release_on_done) { - i40e_release_nvm(hw); - hw->nvm_release_on_done = false; - } - hw->nvm_wait_opcode = 0; + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: clearing wait on opcode 0x%04x\n", + hw->nvm_wait_opcode); - if (hw->aq.arq_last_status) { - hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR; - return; - } + if (hw->nvm_release_on_done) { + i40e_release_nvm(hw); + hw->nvm_release_on_done = false; + } + hw->nvm_wait_opcode = 0; - switch (hw->nvmupd_state) { - case I40E_NVMUPD_STATE_INIT_WAIT: - hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; - break; + if (hw->aq.arq_last_status) { + hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR; + return; + } - case I40E_NVMUPD_STATE_WRITE_WAIT: - hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; - break; + switch (hw->nvmupd_state) { + case I40E_NVMUPD_STATE_INIT_WAIT: + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + break; - default: - break; - } + case I40E_NVMUPD_STATE_WRITE_WAIT: + hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; + break; + + default: + break; + } +} + +/** + * i40e_nvmupd_check_wait_event - handle NVM update operation events + * @hw: pointer to the hardware structure + * @opcode: the event that just happened + **/ +void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode, + struct i40e_aq_desc *desc) +{ + u32 aq_desc_len = sizeof(struct i40e_aq_desc); + + if (opcode == hw->nvm_wait_opcode) { + memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len); + i40e_nvmupd_clear_wait_state(hw); } } @@ -1205,6 +1236,9 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, else if (module == 0) upd_cmd = I40E_NVMUPD_GET_AQ_RESULT; break; + case I40E_NVM_AQE: + upd_cmd = I40E_NVMUPD_GET_AQ_EVENT; + break; } break; @@ -1267,6 +1301,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw, u32 aq_data_len; i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); + if (cmd->offset == 0xffff) + return 0; + memset(&cmd_details, 0, sizeof(cmd_details)); cmd_details.wb_desc = &hw->nvm_wb_desc; @@ -1302,6 +1339,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw, } } + if (cmd->offset) + memset(&hw->nvm_aq_event_desc, 0, aq_desc_len); + /* and away we go! */ status = i40e_asq_send_command(hw, aq_desc, buff, buff_size, &cmd_details); @@ -1311,6 +1351,7 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw, i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + return status; } /* should we wait for a followup event? */ @@ -1392,6 +1433,40 @@ static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw, } /** + * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * cmd structure contains identifiers and data buffer + **/ +static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + u32 aq_total_len; + u32 aq_desc_len; + + i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); + + aq_desc_len = sizeof(struct i40e_aq_desc); + aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen); + + /* check copylength range */ + if (cmd->data_size > aq_total_len) { + i40e_debug(hw, I40E_DEBUG_NVM, + "%s: copy length %d too big, trimming to %d\n", + __func__, cmd->data_size, aq_total_len); + cmd->data_size = aq_total_len; + } + + memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size); + + return 0; +} + +/** * i40e_nvmupd_nvm_read - Read NVM * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer @@ -1486,18 +1561,20 @@ static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw, i40e_status status = 0; struct i40e_asq_cmd_details cmd_details; u8 module, transaction; + u8 preservation_flags; bool last; transaction = i40e_nvmupd_get_transaction(cmd->config); module = i40e_nvmupd_get_module(cmd->config); last = (transaction & I40E_NVM_LCB); + preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config); memset(&cmd_details, 0, sizeof(cmd_details)); cmd_details.wb_desc = &hw->nvm_wb_desc; status = i40e_aq_update_nvm(hw, module, cmd->offset, (u16)cmd->data_size, bytes, last, - &cmd_details); + preservation_flags, &cmd_details); if (status) { i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n", diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 3bb6659db822..83798b7841b9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -214,7 +214,7 @@ i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 length, void *data, - bool last_command, + bool last_command, u8 preservation_flags, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, u8 mib_type, void *buff, u16 buff_size, @@ -225,6 +225,10 @@ i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_set_dcb_parameters(struct i40e_hw *hw, + bool dcb_enable, + struct i40e_asq_cmd_details + *cmd_details); i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, @@ -333,7 +337,9 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, i40e_status i40e_nvmupd_command(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *); -void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode); +void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode, + struct i40e_aq_desc *desc); +void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw); void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status); extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[]; @@ -343,6 +349,37 @@ static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) return i40e_ptype_lookup[ptype]; } +/** + * i40e_virtchnl_link_speed - Convert AdminQ link_speed to virtchnl definition + * @link_speed: the speed to convert + * + * Returns the link_speed in terms of the virtchnl interface, for use in + * converting link_speed as reported by the AdminQ into the format used for + * talking to virtchnl devices. If we can't represent the link speed properly, + * report LINK_SPEED_UNKNOWN. + **/ +static inline enum virtchnl_link_speed +i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed) +{ + switch (link_speed) { + case I40E_LINK_SPEED_100MB: + return VIRTCHNL_LINK_SPEED_100MB; + case I40E_LINK_SPEED_1GB: + return VIRTCHNL_LINK_SPEED_1GB; + case I40E_LINK_SPEED_10GB: + return VIRTCHNL_LINK_SPEED_10GB; + case I40E_LINK_SPEED_40GB: + return VIRTCHNL_LINK_SPEED_40GB; + case I40E_LINK_SPEED_20GB: + return VIRTCHNL_LINK_SPEED_20GB; + case I40E_LINK_SPEED_25GB: + return VIRTCHNL_LINK_SPEED_25GB; + case I40E_LINK_SPEED_UNKNOWN: + default: + return VIRTCHNL_LINK_SPEED_UNKNOWN; + } +} + /* prototype for functions used for SW locks */ /* i40e_common for VF drivers*/ @@ -400,13 +437,15 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg, u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num); i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, u32 time, u32 interval); -i40e_status i40e_aq_write_ppp(struct i40e_hw *hw, void *buff, +i40e_status i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, u16 buff_size, u32 track_id, u32 *error_offset, u32 *error_info, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_get_ppp_list(struct i40e_hw *hw, void *buff, + struct i40e_asq_cmd_details * + cmd_details); +i40e_status i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, u16 buff_size, u8 flags, - struct i40e_asq_cmd_details *cmd_details); + struct i40e_asq_cmd_details * + cmd_details); struct i40e_generic_seg_header * i40e_find_segment_in_package(u32 segment_type, struct i40e_package_header *pkg_header); diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h index 5f9cac55aa55..afb72e711d43 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_status.h +++ b/drivers/net/ethernet/intel/i40e/i40e_status.h @@ -95,6 +95,7 @@ enum i40e_status_code { I40E_ERR_NOT_READY = -63, I40E_NOT_SUPPORTED = -64, I40E_ERR_FIRMWARE_API_VERSION = -65, + I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66, }; #endif /* _I40E_STATUS_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 5bc2748ac468..e554aa6cf070 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -27,6 +27,7 @@ #include <linux/prefetch.h> #include <net/busy_poll.h> #include <linux/bpf_trace.h> +#include <net/xdp.h> #include "i40e.h" #include "i40e_trace.h" #include "i40e_prototype.h" @@ -725,6 +726,59 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring) return 0; } +/** + * i40e_detect_recover_hung - Function to detect and recover hung_queues + * @vsi: pointer to vsi struct with tx queues + * + * VSI has netdev and netdev has TX queues. This function is to check each of + * those TX queues if they are hung, trigger recovery by issuing SW interrupt. + **/ +void i40e_detect_recover_hung(struct i40e_vsi *vsi) +{ + struct i40e_ring *tx_ring = NULL; + struct net_device *netdev; + unsigned int i; + int packets; + + if (!vsi) + return; + + if (test_bit(__I40E_VSI_DOWN, vsi->state)) + return; + + netdev = vsi->netdev; + if (!netdev) + return; + + if (!netif_carrier_ok(netdev)) + return; + + for (i = 0; i < vsi->num_queue_pairs; i++) { + tx_ring = vsi->tx_rings[i]; + if (tx_ring && tx_ring->desc) { + /* If packet counter has not changed the queue is + * likely stalled, so force an interrupt for this + * queue. + * + * prev_pkt_ctr would be negative if there was no + * pending work. + */ + packets = tx_ring->stats.packets & INT_MAX; + if (tx_ring->tx_stats.prev_pkt_ctr == packets) { + i40e_force_wb(vsi, tx_ring->q_vector); + continue; + } + + /* Memory barrier between read of packet count and call + * to i40e_get_tx_pending() + */ + smp_rmb(); + tx_ring->tx_stats.prev_pkt_ctr = + i40e_get_tx_pending(tx_ring) ? packets : -1; + } + } +} + #define WB_STRIDE 4 /** @@ -902,7 +956,7 @@ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi, I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */ wr32(&vsi->back->hw, - I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1), + I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val); } else { val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK | @@ -929,8 +983,7 @@ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) /* allow 00 to be written to the index */ wr32(&vsi->back->hw, - I40E_PFINT_DYN_CTLN(q_vector->v_idx + - vsi->base_vector - 1), val); + I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val); } else { u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK | I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */ @@ -1162,6 +1215,7 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring) tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; + tx_ring->tx_stats.prev_pkt_ctr = -1; return 0; err: @@ -1236,6 +1290,8 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) void i40e_free_rx_resources(struct i40e_ring *rx_ring) { i40e_clean_rx_ring(rx_ring); + if (rx_ring->vsi->type == I40E_VSI_MAIN) + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); rx_ring->xdp_prog = NULL; kfree(rx_ring->rx_bi); rx_ring->rx_bi = NULL; @@ -1256,6 +1312,7 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring) int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) { struct device *dev = rx_ring->dev; + int err = -ENOMEM; int bi_size; /* warn if we are about to overwrite the pointer */ @@ -1283,13 +1340,21 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; + /* XDP RX-queue info only needed for RX rings exposed to XDP */ + if (rx_ring->vsi->type == I40E_VSI_MAIN) { + err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, + rx_ring->queue_index); + if (err < 0) + goto err; + } + rx_ring->xdp_prog = rx_ring->vsi->xdp_prog; return 0; err: kfree(rx_ring->rx_bi); rx_ring->rx_bi = NULL; - return -ENOMEM; + return err; } /** @@ -2068,11 +2133,13 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) struct sk_buff *skb = rx_ring->skb; u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); bool failure = false, xdp_xmit = false; + struct xdp_buff xdp; + + xdp.rxq = &rx_ring->xdp_rxq; while (likely(total_rx_packets < (unsigned int)budget)) { struct i40e_rx_buffer *rx_buffer; union i40e_rx_desc *rx_desc; - struct xdp_buff xdp; unsigned int size; u16 vlan_tag; u8 rx_ptype; @@ -2243,7 +2310,6 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, struct i40e_hw *hw = &vsi->back->hw; bool rx = false, tx = false; u32 rxval, txval; - int vector; int idx = q_vector->v_idx; int rx_itr_setting, tx_itr_setting; @@ -2253,8 +2319,6 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, return; } - vector = (q_vector->v_idx + vsi->base_vector); - /* avoid dynamic calculation if in countdown mode OR if * all dynamic is disabled */ @@ -2303,12 +2367,12 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, */ rxval |= BIT(31); /* don't check _DOWN because interrupt isn't being enabled */ - wr32(hw, INTREG(vector - 1), rxval); + wr32(hw, INTREG(q_vector->reg_idx), rxval); } enable_int: if (!test_bit(__I40E_VSI_DOWN, vsi->state)) - wr32(hw, INTREG(vector - 1), txval); + wr32(hw, INTREG(q_vector->reg_idx), txval); if (q_vector->itr_countdown) q_vector->itr_countdown--; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index fbae1182e2ea..701b708628b0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -27,6 +27,8 @@ #ifndef _I40E_TXRX_H_ #define _I40E_TXRX_H_ +#include <net/xdp.h> + /* Interrupt Throttling and Rate Limiting Goodies */ #define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ @@ -277,7 +279,7 @@ static inline unsigned int i40e_txd_use_count(unsigned int size) } /* Tx Descriptors needed, worst case */ -#define DESC_NEEDED (MAX_SKB_FRAGS + 4) +#define DESC_NEEDED (MAX_SKB_FRAGS + 6) #define I40E_MIN_DESC_PENDING 4 #define I40E_TX_FLAGS_HW_VLAN BIT(1) @@ -331,6 +333,7 @@ struct i40e_tx_queue_stats { u64 tx_done_old; u64 tx_linearize; u64 tx_force_wb; + int prev_pkt_ctr; }; struct i40e_rx_queue_stats { @@ -428,6 +431,7 @@ struct i40e_ring { */ struct i40e_channel *ch; + struct xdp_rxq_info xdp_rxq; } ____cacheline_internodealigned_in_smp; static inline bool ring_uses_build_skb(struct i40e_ring *ring) @@ -498,6 +502,7 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring); int i40e_napi_poll(struct napi_struct *napi, int budget); void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); u32 i40e_get_tx_pending(struct i40e_ring *ring); +void i40e_detect_recover_hung(struct i40e_vsi *vsi); int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); bool __i40e_chk_linearize(struct sk_buff *skb); diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 0e8568719b4e..cd294e6a8587 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -402,6 +402,7 @@ enum i40e_nvmupd_cmd { I40E_NVMUPD_STATUS, I40E_NVMUPD_EXEC_AQ, I40E_NVMUPD_GET_AQ_RESULT, + I40E_NVMUPD_GET_AQ_EVENT, }; enum i40e_nvmupd_state { @@ -421,15 +422,21 @@ enum i40e_nvmupd_state { #define I40E_NVM_MOD_PNT_MASK 0xFF -#define I40E_NVM_TRANS_SHIFT 8 -#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT) -#define I40E_NVM_CON 0x0 -#define I40E_NVM_SNT 0x1 -#define I40E_NVM_LCB 0x2 -#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB) -#define I40E_NVM_ERA 0x4 -#define I40E_NVM_CSUM 0x8 -#define I40E_NVM_EXEC 0xf +#define I40E_NVM_TRANS_SHIFT 8 +#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT) +#define I40E_NVM_PRESERVATION_FLAGS_SHIFT 12 +#define I40E_NVM_PRESERVATION_FLAGS_MASK \ + (0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT) +#define I40E_NVM_PRESERVATION_FLAGS_SELECTED 0x01 +#define I40E_NVM_PRESERVATION_FLAGS_ALL 0x02 +#define I40E_NVM_CON 0x0 +#define I40E_NVM_SNT 0x1 +#define I40E_NVM_LCB 0x2 +#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB) +#define I40E_NVM_ERA 0x4 +#define I40E_NVM_CSUM 0x8 +#define I40E_NVM_AQE 0xe +#define I40E_NVM_EXEC 0xf #define I40E_NVM_ADAPT_SHIFT 16 #define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT) @@ -611,6 +618,7 @@ struct i40e_hw { /* state of nvm update process */ enum i40e_nvmupd_state nvmupd_state; struct i40e_aq_desc nvm_wb_desc; + struct i40e_aq_desc nvm_aq_event_desc; struct i40e_virt_mem nvm_buff; bool nvm_release_on_done; u16 nvm_wait_opcode; @@ -1502,19 +1510,19 @@ struct i40e_lldp_variables { #define I40E_FLEX_57_SHIFT 6 #define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT) -/* Version format for PPP */ -struct i40e_ppp_version { +/* Version format for Dynamic Device Personalization(DDP) */ +struct i40e_ddp_version { u8 major; u8 minor; u8 update; u8 draft; }; -#define I40E_PPP_NAME_SIZE 32 +#define I40E_DDP_NAME_SIZE 32 /* Package header */ struct i40e_package_header { - struct i40e_ppp_version version; + struct i40e_ddp_version version; u32 segment_count; u32 segment_offset[1]; }; @@ -1526,16 +1534,16 @@ struct i40e_generic_seg_header { #define SEGMENT_TYPE_I40E 0x00000011 #define SEGMENT_TYPE_X722 0x00000012 u32 type; - struct i40e_ppp_version version; + struct i40e_ddp_version version; u32 size; - char name[I40E_PPP_NAME_SIZE]; + char name[I40E_DDP_NAME_SIZE]; }; struct i40e_metadata_segment { struct i40e_generic_seg_header header; - struct i40e_ppp_version version; + struct i40e_ddp_version version; u32 track_id; - char name[I40E_PPP_NAME_SIZE]; + char name[I40E_DDP_NAME_SIZE]; }; struct i40e_device_id_entry { @@ -1545,8 +1553,8 @@ struct i40e_device_id_entry { struct i40e_profile_segment { struct i40e_generic_seg_header header; - struct i40e_ppp_version version; - char name[I40E_PPP_NAME_SIZE]; + struct i40e_ddp_version version; + char name[I40E_DDP_NAME_SIZE]; u32 device_table_count; struct i40e_device_id_entry device_table[1]; }; @@ -1573,11 +1581,11 @@ struct i40e_profile_section_header { struct i40e_profile_info { u32 track_id; - struct i40e_ppp_version version; + struct i40e_ddp_version version; u8 op; -#define I40E_PPP_ADD_TRACKID 0x01 -#define I40E_PPP_REMOVE_TRACKID 0x02 +#define I40E_DDP_ADD_TRACKID 0x01 +#define I40E_DDP_REMOVE_TRACKID 0x02 u8 reserved[7]; - u8 name[I40E_PPP_NAME_SIZE]; + u8 name[I40E_DDP_NAME_SIZE]; }; #endif /* _I40E_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 36cb8e068e85..e9309fb9084b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -81,12 +81,12 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) if (vf->link_forced) { pfe.event_data.link_event.link_status = vf->link_up; pfe.event_data.link_event.link_speed = - (vf->link_up ? I40E_LINK_SPEED_40GB : 0); + (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0); } else { pfe.event_data.link_event.link_status = ls->link_info & I40E_AQ_LINK_UP; pfe.event_data.link_event.link_speed = - (enum virtchnl_link_speed)ls->link_speed; + i40e_virtchnl_link_speed(ls->link_speed); } i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, sizeof(pfe), NULL); @@ -2749,6 +2749,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, break; case VIRTCHNL_OP_GET_VF_RESOURCES: ret = i40e_vc_get_vf_resources_msg(vf, msg); + i40e_vc_notify_vf_link_state(vf); break; case VIRTCHNL_OP_RESET_VF: i40e_vc_reset_vf_msg(vf); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c index 8b0d4b255dea..d1aab6b8bfb1 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c @@ -837,10 +837,15 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, /* update the error if time out occurred */ if ((!cmd_completed) && (!details->async && !details->postpone)) { - i40e_debug(hw, - I40E_DEBUG_AQ_MESSAGE, - "AQTX: Writeback timeout.\n"); - status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; + if (rd32(hw, hw->aq.asq.len) & I40E_VF_ATQLEN1_ATQCRIT_MASK) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQTX: AQ Critical error.\n"); + status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR; + } else { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQTX: Writeback timeout.\n"); + status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; + } } asq_send_command_error: @@ -901,7 +906,7 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, } /* set next_to_use to head */ - ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK); + ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK; if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index 06b04572c518..815de8d9c3fb 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -198,13 +198,14 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_add_mirror_rule = 0x0260, i40e_aqc_opc_delete_mirror_rule = 0x0261, - /* Pipeline Personalization Profile */ + /* Dynamic Device Personalization */ i40e_aqc_opc_write_personalization_profile = 0x0270, i40e_aqc_opc_get_personalization_profile_list = 0x0271, /* DCB commands */ i40e_aqc_opc_dcb_ignore_pfc = 0x0301, i40e_aqc_opc_dcb_updated = 0x0302, + i40e_aqc_opc_set_dcb_parameters = 0x0303, /* TX scheduler */ i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, @@ -1562,7 +1563,7 @@ struct i40e_aqc_add_delete_mirror_rule_completion { I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); -/* Pipeline Personalization Profile */ +/* Dynamic Device Personalization */ struct i40e_aqc_write_personalization_profile { u8 flags; u8 reserved[3]; @@ -1573,7 +1574,7 @@ struct i40e_aqc_write_personalization_profile { I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile); -struct i40e_aqc_write_ppp_resp { +struct i40e_aqc_write_ddp_resp { __le32 error_offset; __le32 error_info; __le32 addr_high; @@ -1582,8 +1583,8 @@ struct i40e_aqc_write_ppp_resp { struct i40e_aqc_get_applied_profiles { u8 flags; -#define I40E_AQC_GET_PPP_GET_CONF 0x1 -#define I40E_AQC_GET_PPP_GET_RDPU_CONF 0x2 +#define I40E_AQC_GET_DDP_GET_CONF 0x1 +#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2 u8 rsv[3]; __le32 reserved; __le32 addr_high; @@ -2196,8 +2197,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access); */ struct i40e_aqc_nvm_update { u8 command_flags; -#define I40E_AQ_NVM_LAST_CMD 0x01 -#define I40E_AQ_NVM_FLASH_ONLY 0x80 +#define I40E_AQ_NVM_LAST_CMD 0x01 +#define I40E_AQ_NVM_FLASH_ONLY 0x80 +#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1 +#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03 +#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03 +#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01 u8 module_pointer; __le16 length; __le32 offset; @@ -2457,6 +2462,17 @@ struct i40e_aqc_lldp_start { I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); +/* Set DCB (direct 0x0303) */ +struct i40e_aqc_set_dcb_parameters { + u8 command; +#define I40E_AQ_DCB_SET_AGENT 0x1 +#define I40E_DCB_VALID 0x1 + u8 valid_flags; + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters); + /* Apply MIB changes (0x0A07) * uses the generic struc as it contains no data */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 7d70bf69b249..67bf5cebb76f 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -284,6 +284,8 @@ const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err) return "I40E_NOT_SUPPORTED"; case I40E_ERR_FIRMWARE_API_VERSION: return "I40E_ERR_FIRMWARE_API_VERSION"; + case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR: + return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR"; } snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); @@ -1202,7 +1204,7 @@ i40e_status i40e_vf_reset(struct i40e_hw *hw) } /** - * i40evf_aq_write_ppp - Write pipeline personalization profile (ppp) + * i40evf_aq_write_ddp - Write dynamic device personalization (ddp) * @hw: pointer to the hw struct * @buff: command buffer (size in bytes = buff_size) * @buff_size: buffer size in bytes @@ -1212,7 +1214,7 @@ i40e_status i40e_vf_reset(struct i40e_hw *hw) * @cmd_details: pointer to command details structure or NULL **/ enum -i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff, +i40e_status_code i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff, u16 buff_size, u32 track_id, u32 *error_offset, u32 *error_info, struct i40e_asq_cmd_details *cmd_details) @@ -1221,7 +1223,7 @@ i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff, struct i40e_aqc_write_personalization_profile *cmd = (struct i40e_aqc_write_personalization_profile *) &desc.params.raw; - struct i40e_aqc_write_ppp_resp *resp; + struct i40e_aqc_write_ddp_resp *resp; i40e_status status; i40evf_fill_default_direct_cmd_desc(&desc, @@ -1237,7 +1239,7 @@ i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff, status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details); if (!status) { - resp = (struct i40e_aqc_write_ppp_resp *)&desc.params.raw; + resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw; if (error_offset) *error_offset = le32_to_cpu(resp->error_offset); if (error_info) @@ -1248,16 +1250,16 @@ i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff, } /** - * i40evf_aq_get_ppp_list - Read pipeline personalization profile (ppp) + * i40evf_aq_get_ddp_list - Read dynamic device personalization (ddp) * @hw: pointer to the hw struct * @buff: command buffer (size in bytes = buff_size) * @buff_size: buffer size in bytes * @cmd_details: pointer to command details structure or NULL **/ enum -i40e_status_code i40evf_aq_get_ppp_list(struct i40e_hw *hw, void *buff, +i40e_status_code i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff, u16 buff_size, u8 flags, - struct i40e_asq_cmd_details *cmd_details) + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_get_applied_profiles *cmd = @@ -1330,11 +1332,6 @@ i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, u32 offset = 0, info = 0; u32 i; - if (!track_id) { - i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0."); - return I40E_NOT_SUPPORTED; - } - dev_cnt = profile->device_table_count; for (i = 0; i < dev_cnt; i++) { @@ -1344,7 +1341,7 @@ i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, break; } if (i == dev_cnt) { - i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support PPP"); + i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP"); return I40E_ERR_DEVICE_NOT_SUPPORTED; } @@ -1363,7 +1360,7 @@ i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, sizeof(struct i40e_profile_section_header); /* Write profile */ - status = i40evf_aq_write_ppp(hw, (void *)sec, (u16)section_size, + status = i40evf_aq_write_ddp(hw, (void *)sec, (u16)section_size, track_id, &offset, &info, NULL); if (status) { i40e_debug(hw, I40E_DEBUG_PACKAGE, @@ -1405,10 +1402,10 @@ i40evf_add_pinfo_to_list(struct i40e_hw *hw, sec->section.offset); pinfo->track_id = track_id; pinfo->version = profile->version; - pinfo->op = I40E_PPP_ADD_TRACKID; - memcpy(pinfo->name, profile->name, I40E_PPP_NAME_SIZE); + pinfo->op = I40E_DDP_ADD_TRACKID; + memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); - status = i40evf_aq_write_ppp(hw, (void *)sec, sec->data_end, + status = i40evf_aq_write_ddp(hw, (void *)sec, sec->data_end, track_id, &offset, &info, NULL); return status; } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h index b624b5994075..47c429931a57 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h @@ -131,13 +131,15 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg, u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num); i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, u32 time, u32 interval); -i40e_status i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff, +i40e_status i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff, u16 buff_size, u32 track_id, u32 *error_offset, u32 *error_info, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40evf_aq_get_ppp_list(struct i40e_hw *hw, void *buff, + struct i40e_asq_cmd_details * + cmd_details); +i40e_status i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff, u16 buff_size, u8 flags, - struct i40e_asq_cmd_details *cmd_details); + struct i40e_asq_cmd_details * + cmd_details); struct i40e_generic_seg_header * i40evf_find_segment_in_package(u32 segment_type, struct i40e_package_header *pkg_header); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_status.h b/drivers/net/ethernet/intel/i40evf/i40e_status.h index 7fa7a41915c1..5b222246e08b 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_status.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_status.h @@ -95,6 +95,7 @@ enum i40e_status_code { I40E_ERR_NOT_READY = -63, I40E_NOT_SUPPORTED = -64, I40E_ERR_FIRMWARE_API_VERSION = -65, + I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66, }; #endif /* _I40E_STATUS_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 1ba29bb85b67..357d6051281f 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -148,6 +148,59 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw) return 0; } +/** + * i40evf_detect_recover_hung - Function to detect and recover hung_queues + * @vsi: pointer to vsi struct with tx queues + * + * VSI has netdev and netdev has TX queues. This function is to check each of + * those TX queues if they are hung, trigger recovery by issuing SW interrupt. + **/ +void i40evf_detect_recover_hung(struct i40e_vsi *vsi) +{ + struct i40e_ring *tx_ring = NULL; + struct net_device *netdev; + unsigned int i; + int packets; + + if (!vsi) + return; + + if (test_bit(__I40E_VSI_DOWN, vsi->state)) + return; + + netdev = vsi->netdev; + if (!netdev) + return; + + if (!netif_carrier_ok(netdev)) + return; + + for (i = 0; i < vsi->back->num_active_queues; i++) { + tx_ring = &vsi->back->tx_rings[i]; + if (tx_ring && tx_ring->desc) { + /* If packet counter has not changed the queue is + * likely stalled, so force an interrupt for this + * queue. + * + * prev_pkt_ctr would be negative if there was no + * pending work. + */ + packets = tx_ring->stats.packets & INT_MAX; + if (tx_ring->tx_stats.prev_pkt_ctr == packets) { + i40evf_force_wb(vsi, tx_ring->q_vector); + continue; + } + + /* Memory barrier between read of packet count and call + * to i40evf_get_tx_pending() + */ + smp_rmb(); + tx_ring->tx_stats.prev_pkt_ctr = + i40evf_get_tx_pending(tx_ring, false) ? packets : -1; + } + } +} + #define WB_STRIDE 4 /** @@ -316,8 +369,7 @@ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi, I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */ wr32(&vsi->back->hw, - I40E_VFINT_DYN_CTLN1(q_vector->v_idx + - vsi->base_vector - 1), val); + I40E_VFINT_DYN_CTLN1(q_vector->reg_idx), val); q_vector->arm_wb_state = true; } @@ -336,7 +388,7 @@ void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) /* allow 00 to be written to the index */; wr32(&vsi->back->hw, - I40E_VFINT_DYN_CTLN1(q_vector->v_idx + vsi->base_vector - 1), + I40E_VFINT_DYN_CTLN1(q_vector->reg_idx), val); } @@ -469,6 +521,7 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring) tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; + tx_ring->tx_stats.prev_pkt_ctr = -1; return 0; err: @@ -1444,12 +1497,9 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, struct i40e_hw *hw = &vsi->back->hw; bool rx = false, tx = false; u32 rxval, txval; - int vector; int idx = q_vector->v_idx; int rx_itr_setting, tx_itr_setting; - vector = (q_vector->v_idx + vsi->base_vector); - /* avoid dynamic calculation if in countdown mode OR if * all dynamic is disabled */ @@ -1498,12 +1548,12 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, */ rxval |= BIT(31); /* don't check _DOWN because interrupt isn't being enabled */ - wr32(hw, INTREG(vector - 1), rxval); + wr32(hw, INTREG(q_vector->reg_idx), rxval); } enable_int: if (!test_bit(__I40E_VSI_DOWN, vsi->state)) - wr32(hw, INTREG(vector - 1), txval); + wr32(hw, INTREG(q_vector->reg_idx), txval); if (q_vector->itr_countdown) q_vector->itr_countdown--; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 8d26c85d12e1..7798a6645c3f 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -260,7 +260,7 @@ static inline unsigned int i40e_txd_use_count(unsigned int size) } /* Tx Descriptors needed, worst case */ -#define DESC_NEEDED (MAX_SKB_FRAGS + 4) +#define DESC_NEEDED (MAX_SKB_FRAGS + 6) #define I40E_MIN_DESC_PENDING 4 #define I40E_TX_FLAGS_HW_VLAN BIT(1) @@ -313,6 +313,7 @@ struct i40e_tx_queue_stats { u64 tx_done_old; u64 tx_linearize; u64 tx_force_wb; + int prev_pkt_ctr; u64 tx_lost_interrupt; }; @@ -467,6 +468,7 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring); int i40evf_napi_poll(struct napi_struct *napi, int budget); void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw); +void i40evf_detect_recover_hung(struct i40e_vsi *vsi); int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size); bool __i40evf_chk_linearize(struct sk_buff *skb); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index 213b773dfad6..54951c84a481 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -361,6 +361,7 @@ enum i40e_nvmupd_cmd { I40E_NVMUPD_STATUS, I40E_NVMUPD_EXEC_AQ, I40E_NVMUPD_GET_AQ_RESULT, + I40E_NVMUPD_GET_AQ_EVENT, }; enum i40e_nvmupd_state { @@ -380,15 +381,21 @@ enum i40e_nvmupd_state { #define I40E_NVM_MOD_PNT_MASK 0xFF -#define I40E_NVM_TRANS_SHIFT 8 -#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT) -#define I40E_NVM_CON 0x0 -#define I40E_NVM_SNT 0x1 -#define I40E_NVM_LCB 0x2 -#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB) -#define I40E_NVM_ERA 0x4 -#define I40E_NVM_CSUM 0x8 -#define I40E_NVM_EXEC 0xf +#define I40E_NVM_TRANS_SHIFT 8 +#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT) +#define I40E_NVM_PRESERVATION_FLAGS_SHIFT 12 +#define I40E_NVM_PRESERVATION_FLAGS_MASK \ + (0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT) +#define I40E_NVM_PRESERVATION_FLAGS_SELECTED 0x01 +#define I40E_NVM_PRESERVATION_FLAGS_ALL 0x02 +#define I40E_NVM_CON 0x0 +#define I40E_NVM_SNT 0x1 +#define I40E_NVM_LCB 0x2 +#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB) +#define I40E_NVM_ERA 0x4 +#define I40E_NVM_CSUM 0x8 +#define I40E_NVM_AQE 0xe +#define I40E_NVM_EXEC 0xf #define I40E_NVM_ADAPT_SHIFT 16 #define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT) @@ -561,6 +568,7 @@ struct i40e_hw { /* state of nvm update process */ enum i40e_nvmupd_state nvmupd_state; struct i40e_aq_desc nvm_wb_desc; + struct i40e_aq_desc nvm_aq_event_desc; struct i40e_virt_mem nvm_buff; bool nvm_release_on_done; u16 nvm_wait_opcode; @@ -1422,19 +1430,19 @@ enum i40e_reset_type { #define I40E_FD_INSET_FLEX_WORD57_MASK (0x1ULL << \ I40E_FD_INSET_FLEX_WORD57_SHIFT) -/* Version format for PPP */ -struct i40e_ppp_version { +/* Version format for Dynamic Device Personalization(DDP) */ +struct i40e_ddp_version { u8 major; u8 minor; u8 update; u8 draft; }; -#define I40E_PPP_NAME_SIZE 32 +#define I40E_DDP_NAME_SIZE 32 /* Package header */ struct i40e_package_header { - struct i40e_ppp_version version; + struct i40e_ddp_version version; u32 segment_count; u32 segment_offset[1]; }; @@ -1446,16 +1454,16 @@ struct i40e_generic_seg_header { #define SEGMENT_TYPE_I40E 0x00000011 #define SEGMENT_TYPE_X722 0x00000012 u32 type; - struct i40e_ppp_version version; + struct i40e_ddp_version version; u32 size; - char name[I40E_PPP_NAME_SIZE]; + char name[I40E_DDP_NAME_SIZE]; }; struct i40e_metadata_segment { struct i40e_generic_seg_header header; - struct i40e_ppp_version version; + struct i40e_ddp_version version; u32 track_id; - char name[I40E_PPP_NAME_SIZE]; + char name[I40E_DDP_NAME_SIZE]; }; struct i40e_device_id_entry { @@ -1465,8 +1473,8 @@ struct i40e_device_id_entry { struct i40e_profile_segment { struct i40e_generic_seg_header header; - struct i40e_ppp_version version; - char name[I40E_PPP_NAME_SIZE]; + struct i40e_ddp_version version; + char name[I40E_DDP_NAME_SIZE]; u32 device_table_count; struct i40e_device_id_entry device_table[1]; }; @@ -1493,11 +1501,11 @@ struct i40e_profile_section_header { struct i40e_profile_info { u32 track_id; - struct i40e_ppp_version version; + struct i40e_ddp_version version; u8 op; -#define I40E_PPP_ADD_TRACKID 0x01 -#define I40E_PPP_REMOVE_TRACKID 0x02 +#define I40E_DDP_ADD_TRACKID 0x01 +#define I40E_DDP_REMOVE_TRACKID 0x02 u8 reserved[7]; - u8 name[I40E_PPP_NAME_SIZE]; + u8 name[I40E_DDP_NAME_SIZE]; }; #endif /* _I40E_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index de0af521d602..9690c1ea019e 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -114,14 +114,14 @@ struct i40e_q_vector { struct i40evf_adapter *adapter; struct i40e_vsi *vsi; struct napi_struct napi; - unsigned long reg_idx; struct i40e_ring_container rx; struct i40e_ring_container tx; u32 ring_mask; u8 num_ringpairs; /* total number of ring pairs in vector */ #define ITR_COUNTDOWN_START 100 u8 itr_countdown; /* when 0 or 1 update ITR */ - int v_idx; /* vector index in list */ + u16 v_idx; /* index in the vsi->q_vector array. */ + u16 reg_idx; /* register index of the interrupt */ char name[IFNAMSIZ + 15]; bool arm_wb_state; cpumask_t affinity_mask; @@ -187,6 +187,7 @@ enum i40evf_state_t { enum i40evf_critical_section_t { __I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */ __I40EVF_IN_CLIENT_TASK, + __I40EVF_IN_REMOVE_TASK, /* device being removed */ }; /* board specific private data structure */ @@ -199,6 +200,9 @@ struct i40evf_adapter { wait_queue_head_t down_waitqueue; struct i40e_q_vector *q_vectors; struct list_head vlan_filter_list; + struct list_head mac_filter_list; + /* Lock to protect accesses to MAC and VLAN lists */ + spinlock_t mac_vlan_list_lock; char misc_vector_name[IFNAMSIZ + 9]; int num_active_queues; int num_req_queues; @@ -206,7 +210,6 @@ struct i40evf_adapter { /* TX */ struct i40e_ring *tx_rings; u32 tx_timeout_count; - struct list_head mac_filter_list; u32 tx_desc_count; /* RX */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index da006fa3fec1..e2d8aa19d205 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -512,31 +512,31 @@ static void i40evf_set_itr_per_queue(struct i40evf_adapter *adapter, struct ethtool_coalesce *ec, int queue) { + struct i40e_ring *rx_ring = &adapter->rx_rings[queue]; + struct i40e_ring *tx_ring = &adapter->tx_rings[queue]; struct i40e_vsi *vsi = &adapter->vsi; struct i40e_hw *hw = &adapter->hw; struct i40e_q_vector *q_vector; u16 vector; - adapter->rx_rings[queue].rx_itr_setting = ec->rx_coalesce_usecs; - adapter->tx_rings[queue].tx_itr_setting = ec->tx_coalesce_usecs; + rx_ring->rx_itr_setting = ec->rx_coalesce_usecs; + tx_ring->tx_itr_setting = ec->tx_coalesce_usecs; - if (ec->use_adaptive_rx_coalesce) - adapter->rx_rings[queue].rx_itr_setting |= I40E_ITR_DYNAMIC; - else - adapter->rx_rings[queue].rx_itr_setting &= ~I40E_ITR_DYNAMIC; + rx_ring->rx_itr_setting |= I40E_ITR_DYNAMIC; + if (!ec->use_adaptive_rx_coalesce) + rx_ring->rx_itr_setting ^= I40E_ITR_DYNAMIC; - if (ec->use_adaptive_tx_coalesce) - adapter->tx_rings[queue].tx_itr_setting |= I40E_ITR_DYNAMIC; - else - adapter->tx_rings[queue].tx_itr_setting &= ~I40E_ITR_DYNAMIC; + tx_ring->tx_itr_setting |= I40E_ITR_DYNAMIC; + if (!ec->use_adaptive_tx_coalesce) + tx_ring->tx_itr_setting ^= I40E_ITR_DYNAMIC; - q_vector = adapter->rx_rings[queue].q_vector; - q_vector->rx.itr = ITR_TO_REG(adapter->rx_rings[queue].rx_itr_setting); + q_vector = rx_ring->q_vector; + q_vector->rx.itr = ITR_TO_REG(rx_ring->rx_itr_setting); vector = vsi->base_vector + q_vector->v_idx; wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1), q_vector->rx.itr); - q_vector = adapter->tx_rings[queue].q_vector; - q_vector->tx.itr = ITR_TO_REG(adapter->tx_rings[queue].tx_itr_setting); + q_vector = tx_ring->q_vector; + q_vector->tx.itr = ITR_TO_REG(tx_ring->tx_itr_setting); vector = vsi->base_vector + q_vector->v_idx; wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1), q_vector->tx.itr); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 7b2a4eba92e2..16989ad2ca90 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -45,8 +45,8 @@ static const char i40evf_driver_string[] = #define DRV_KERN "-k" #define DRV_VERSION_MAJOR 3 -#define DRV_VERSION_MINOR 0 -#define DRV_VERSION_BUILD 1 +#define DRV_VERSION_MINOR 2 +#define DRV_VERSION_BUILD 2 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) \ @@ -276,37 +276,7 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask) if (mask & BIT(i - 1)) { wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | - I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK); - } - } -} - -/** - * i40evf_fire_sw_int - Generate SW interrupt for specified vectors - * @adapter: board private structure - * @mask: bitmap of vectors to trigger - **/ -static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask) -{ - struct i40e_hw *hw = &adapter->hw; - int i; - u32 dyn_ctl; - - if (mask & 1) { - dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01); - dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | - I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | - I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK; - wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl); - } - for (i = 1; i < adapter->num_msix_vectors; i++) { - if (mask & BIT(i)) { - dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1)); - dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | - I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | - I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK; - wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl); + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK); } } } @@ -337,15 +307,10 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data) struct net_device *netdev = data; struct i40evf_adapter *adapter = netdev_priv(netdev); struct i40e_hw *hw = &adapter->hw; - u32 val; /* handle non-queue interrupts, these reads clear the registers */ - val = rd32(hw, I40E_VFINT_ICR01); - val = rd32(hw, I40E_VFINT_ICR0_ENA1); - - val = rd32(hw, I40E_VFINT_DYN_CTL01) | - I40E_VFINT_DYN_CTL01_CLEARPBA_MASK; - wr32(hw, I40E_VFINT_DYN_CTL01, val); + rd32(hw, I40E_VFINT_ICR01); + rd32(hw, I40E_VFINT_ICR0_ENA1); /* schedule work on the private workqueue */ schedule_work(&adapter->adminq_task); @@ -706,7 +671,8 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter) * @adapter: board private structure * @vlan: vlan tag * - * Returns ptr to the filter object or NULL + * Returns ptr to the filter object or NULL. Must be called while holding the + * mac_vlan_list_lock. **/ static struct i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan) @@ -731,14 +697,8 @@ static struct i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan) { struct i40evf_vlan_filter *f = NULL; - int count = 50; - while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, - &adapter->crit_section)) { - udelay(1); - if (--count == 0) - goto out; - } + spin_lock_bh(&adapter->mac_vlan_list_lock); f = i40evf_find_vlan(adapter, vlan); if (!f) { @@ -755,8 +715,7 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan) } clearout: - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); -out: + spin_unlock_bh(&adapter->mac_vlan_list_lock); return f; } @@ -768,21 +727,16 @@ out: static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan) { struct i40evf_vlan_filter *f; - int count = 50; - while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, - &adapter->crit_section)) { - udelay(1); - if (--count == 0) - return; - } + spin_lock_bh(&adapter->mac_vlan_list_lock); f = i40evf_find_vlan(adapter, vlan); if (f) { f->remove = true; adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; } - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + + spin_unlock_bh(&adapter->mac_vlan_list_lock); } /** @@ -824,7 +778,8 @@ static int i40evf_vlan_rx_kill_vid(struct net_device *netdev, * @adapter: board private structure * @macaddr: the MAC address * - * Returns ptr to the filter object or NULL + * Returns ptr to the filter object or NULL. Must be called while holding the + * mac_vlan_list_lock. **/ static struct i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter, @@ -854,26 +809,17 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, u8 *macaddr) { struct i40evf_mac_filter *f; - int count = 50; if (!macaddr) return NULL; - while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, - &adapter->crit_section)) { - udelay(1); - if (--count == 0) - return NULL; - } + spin_lock_bh(&adapter->mac_vlan_list_lock); f = i40evf_find_filter(adapter, macaddr); if (!f) { f = kzalloc(sizeof(*f), GFP_ATOMIC); - if (!f) { - clear_bit(__I40EVF_IN_CRITICAL_TASK, - &adapter->crit_section); - return NULL; - } + if (!f) + goto clearout; ether_addr_copy(f->macaddr, macaddr); @@ -884,7 +830,8 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, f->remove = false; } - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); +clearout: + spin_unlock_bh(&adapter->mac_vlan_list_lock); return f; } @@ -911,12 +858,16 @@ static int i40evf_set_mac(struct net_device *netdev, void *p) if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF) return -EPERM; + spin_lock_bh(&adapter->mac_vlan_list_lock); + f = i40evf_find_filter(adapter, hw->mac.addr); if (f) { f->remove = true; adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; } + spin_unlock_bh(&adapter->mac_vlan_list_lock); + f = i40evf_add_filter(adapter, addr->sa_data); if (f) { ether_addr_copy(hw->mac.addr, addr->sa_data); @@ -937,7 +888,6 @@ static void i40evf_set_rx_mode(struct net_device *netdev) struct netdev_hw_addr *uca; struct netdev_hw_addr *mca; struct netdev_hw_addr *ha; - int count = 50; /* add addr if not already in the filter list */ netdev_for_each_uc_addr(uca, netdev) { @@ -947,16 +897,8 @@ static void i40evf_set_rx_mode(struct net_device *netdev) i40evf_add_filter(adapter, mca->addr); } - while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, - &adapter->crit_section)) { - udelay(1); - if (--count == 0) { - dev_err(&adapter->pdev->dev, - "Failed to get lock in %s\n", __func__); - return; - } - } - /* remove filter if not in netdev list */ + spin_lock_bh(&adapter->mac_vlan_list_lock); + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { netdev_for_each_mc_addr(mca, netdev) if (ether_addr_equal(mca->addr, f->macaddr)) @@ -995,7 +937,7 @@ bottom_of_search_loop: adapter->flags & I40EVF_FLAG_ALLMULTI_ON) adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI; - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + spin_unlock_bh(&adapter->mac_vlan_list_lock); } /** @@ -1058,6 +1000,8 @@ static void i40evf_configure(struct i40evf_adapter *adapter) /** * i40evf_up_complete - Finish the last steps of bringing up a connection * @adapter: board private structure + * + * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock. **/ static void i40evf_up_complete(struct i40evf_adapter *adapter) { @@ -1075,6 +1019,8 @@ static void i40evf_up_complete(struct i40evf_adapter *adapter) /** * i40e_down - Shutdown the connection processing * @adapter: board private structure + * + * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock. **/ void i40evf_down(struct i40evf_adapter *adapter) { @@ -1084,16 +1030,14 @@ void i40evf_down(struct i40evf_adapter *adapter) if (adapter->state <= __I40EVF_DOWN_PENDING) return; - while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, - &adapter->crit_section)) - usleep_range(500, 1000); - netif_carrier_off(netdev); netif_tx_disable(netdev); adapter->link_up = false; i40evf_napi_disable_all(adapter); i40evf_irq_disable(adapter); + spin_lock_bh(&adapter->mac_vlan_list_lock); + /* remove all MAC filters */ list_for_each_entry(f, &adapter->mac_filter_list, list) { f->remove = true; @@ -1102,6 +1046,9 @@ void i40evf_down(struct i40evf_adapter *adapter) list_for_each_entry(f, &adapter->vlan_filter_list, list) { f->remove = true; } + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) && adapter->state != __I40EVF_RESETTING) { /* cancel any current operation */ @@ -1115,7 +1062,6 @@ void i40evf_down(struct i40evf_adapter *adapter) adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES; } - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); } @@ -1441,6 +1387,7 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter) q_vector->adapter = adapter; q_vector->vsi = &adapter->vsi; q_vector->v_idx = q_idx; + q_vector->reg_idx = q_idx; cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); netif_napi_add(adapter->netdev, &q_vector->napi, i40evf_napi_poll, NAPI_POLL_WEIGHT); @@ -1770,13 +1717,8 @@ static void i40evf_watchdog_task(struct work_struct *work) if (adapter->state == __I40EVF_RUNNING) i40evf_request_stats(adapter); watchdog_done: - if (adapter->state == __I40EVF_RUNNING) { - i40evf_irq_enable_queues(adapter, ~0); - i40evf_fire_sw_int(adapter, 0xFF); - } else { - i40evf_fire_sw_int(adapter, 0x1); - } - + if (adapter->state == __I40EVF_RUNNING) + i40evf_detect_recover_hung(&adapter->vsi); clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); restart_watchdog: if (adapter->state == __I40EVF_REMOVE) @@ -1796,7 +1738,11 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter) adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; - if (netif_running(adapter->netdev)) { + /* We don't use netif_running() because it may be true prior to + * ndo_open() returning, so we can't assume it means all our open + * tasks have finished, since we're not holding the rtnl_lock here. + */ + if (adapter->state == __I40EVF_RUNNING) { set_bit(__I40E_VSI_DOWN, adapter->vsi.state); netif_carrier_off(adapter->netdev); netif_tx_disable(adapter->netdev); @@ -1808,6 +1754,8 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter) i40evf_free_all_rx_resources(adapter); } + spin_lock_bh(&adapter->mac_vlan_list_lock); + /* Delete all of the filters, both MAC and VLAN. */ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { list_del(&f->list); @@ -1819,6 +1767,8 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter) kfree(fv); } + spin_unlock_bh(&adapter->mac_vlan_list_lock); + i40evf_free_misc_irq(adapter); i40evf_reset_interrupt_capability(adapter); i40evf_free_queues(adapter); @@ -1854,6 +1804,13 @@ static void i40evf_reset_task(struct work_struct *work) struct i40evf_mac_filter *f; u32 reg_val; int i = 0, err; + bool running; + + /* When device is being removed it doesn't make sense to run the reset + * task, just return in such a case. + */ + if (test_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section)) + return; while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section)) @@ -1913,7 +1870,13 @@ static void i40evf_reset_task(struct work_struct *work) } continue_reset: - if (netif_running(netdev)) { + /* We don't use netif_running() because it may be true prior to + * ndo_open() returning, so we can't assume it means all our open + * tasks have finished, since we're not holding the rtnl_lock here. + */ + running = (adapter->state == __I40EVF_RUNNING); + + if (running) { netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); adapter->link_up = false; @@ -1948,6 +1911,8 @@ continue_reset: adapter->aq_required |= I40EVF_FLAG_AQ_GET_CONFIG; adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; + spin_lock_bh(&adapter->mac_vlan_list_lock); + /* re-add all MAC filters */ list_for_each_entry(f, &adapter->mac_filter_list, list) { f->add = true; @@ -1956,15 +1921,19 @@ continue_reset: list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { vlf->add = true; } + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); - clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section); i40evf_misc_irq_enable(adapter); mod_timer(&adapter->watchdog_timer, jiffies + 2); - if (netif_running(adapter->netdev)) { + /* We were running when the reset started, so we need to restore some + * state here. + */ + if (running) { /* allocate transmit descriptors */ err = i40evf_setup_all_tx_resources(adapter); if (err) @@ -1993,9 +1962,13 @@ continue_reset: adapter->state = __I40EVF_DOWN; wake_up(&adapter->down_waitqueue); } + clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section); + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); return; reset_err: + clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section); + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); i40evf_close(netdev); } @@ -2239,8 +2212,14 @@ static int i40evf_open(struct net_device *netdev) return -EIO; } - if (adapter->state != __I40EVF_DOWN) - return -EBUSY; + while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + &adapter->crit_section)) + usleep_range(500, 1000); + + if (adapter->state != __I40EVF_DOWN) { + err = -EBUSY; + goto err_unlock; + } /* allocate transmit descriptors */ err = i40evf_setup_all_tx_resources(adapter); @@ -2264,6 +2243,8 @@ static int i40evf_open(struct net_device *netdev) i40evf_irq_enable(adapter, true); + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + return 0; err_req_irq: @@ -2273,6 +2254,8 @@ err_setup_rx: i40evf_free_all_rx_resources(adapter); err_setup_tx: i40evf_free_all_tx_resources(adapter); +err_unlock: + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); return err; } @@ -2296,6 +2279,9 @@ static int i40evf_close(struct net_device *netdev) if (adapter->state <= __I40EVF_DOWN_PENDING) return 0; + while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + &adapter->crit_section)) + usleep_range(500, 1000); set_bit(__I40E_VSI_DOWN, adapter->vsi.state); if (CLIENT_ENABLED(adapter)) @@ -2305,6 +2291,8 @@ static int i40evf_close(struct net_device *netdev) adapter->state = __I40EVF_DOWN_PENDING; i40evf_free_traffic_irqs(adapter); + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + /* We explicitly don't free resources here because the hardware is * still active and can DMA into memory. Resources are cleared in * i40evf_virtchnl_completion() after we get confirmation from the PF @@ -2357,13 +2345,19 @@ static int i40evf_set_features(struct net_device *netdev, { struct i40evf_adapter *adapter = netdev_priv(netdev); - if (!VLAN_ALLOWED(adapter)) + /* Don't allow changing VLAN_RX flag when VLAN is set for VF + * and return an error in this case + */ + if (VLAN_ALLOWED(adapter)) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + adapter->aq_required |= + I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; + else + adapter->aq_required |= + I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; + } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { return -EINVAL; - - if (features & NETIF_F_HW_VLAN_CTAG_RX) - adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; - else - adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; + } return 0; } @@ -2943,6 +2937,8 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) mutex_init(&hw->aq.asq_mutex); mutex_init(&hw->aq.arq_mutex); + spin_lock_init(&adapter->mac_vlan_list_lock); + INIT_LIST_HEAD(&adapter->mac_filter_list); INIT_LIST_HEAD(&adapter->vlan_filter_list); @@ -2985,6 +2981,10 @@ static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state) netif_device_detach(netdev); + while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + &adapter->crit_section)) + usleep_range(500, 1000); + if (netif_running(netdev)) { rtnl_lock(); i40evf_down(adapter); @@ -2993,6 +2993,8 @@ static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state) i40evf_free_misc_irq(adapter); i40evf_reset_interrupt_capability(adapter); + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + retval = pci_save_state(pdev); if (retval) return retval; @@ -3066,7 +3068,8 @@ static void i40evf_remove(struct pci_dev *pdev) struct i40evf_mac_filter *f, *ftmp; struct i40e_hw *hw = &adapter->hw; int err; - + /* Indicate we are in remove and not to run reset_task */ + set_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section); cancel_delayed_work_sync(&adapter->init_task); cancel_work_sync(&adapter->reset_task); cancel_delayed_work_sync(&adapter->client_task); @@ -3101,8 +3104,6 @@ static void i40evf_remove(struct pci_dev *pdev) if (adapter->watchdog_timer.function) del_timer_sync(&adapter->watchdog_timer); - flush_scheduled_work(); - i40evf_free_rss(adapter); if (hw->aq.asq.count) @@ -3118,6 +3119,7 @@ static void i40evf_remove(struct pci_dev *pdev) i40evf_free_all_rx_resources(adapter); i40evf_free_queues(adapter); kfree(adapter->vf_res); + spin_lock_bh(&adapter->mac_vlan_list_lock); /* If we got removed before an up/down sequence, we've got a filter * hanging out there that we need to get rid of. */ @@ -3130,6 +3132,8 @@ static void i40evf_remove(struct pci_dev *pdev) kfree(f); } + spin_unlock_bh(&adapter->mac_vlan_list_lock); + free_netdev(netdev); pci_disable_pcie_error_reporting(pdev); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 46c8b8a3907c..50ce0d6c09ef 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -433,12 +433,16 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) adapter->current_op); return; } + + spin_lock_bh(&adapter->mac_vlan_list_lock); + list_for_each_entry(f, &adapter->mac_filter_list, list) { if (f->add) count++; } if (!count) { adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR; @@ -456,8 +460,10 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) } veal = kzalloc(len, GFP_KERNEL); - if (!veal) + if (!veal) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); return; + } veal->vsi_id = adapter->vsi_res->vsi_id; veal->num_elements = count; @@ -472,6 +478,9 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) } if (!more) adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal, len); kfree(veal); @@ -498,12 +507,16 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) adapter->current_op); return; } + + spin_lock_bh(&adapter->mac_vlan_list_lock); + list_for_each_entry(f, &adapter->mac_filter_list, list) { if (f->remove) count++; } if (!count) { adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR; @@ -520,8 +533,10 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) more = true; } veal = kzalloc(len, GFP_KERNEL); - if (!veal) + if (!veal) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); return; + } veal->vsi_id = adapter->vsi_res->vsi_id; veal->num_elements = count; @@ -537,6 +552,9 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) } if (!more) adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal, len); kfree(veal); @@ -564,12 +582,15 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) return; } + spin_lock_bh(&adapter->mac_vlan_list_lock); + list_for_each_entry(f, &adapter->vlan_filter_list, list) { if (f->add) count++; } if (!count) { adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } adapter->current_op = VIRTCHNL_OP_ADD_VLAN; @@ -586,8 +607,10 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) more = true; } vvfl = kzalloc(len, GFP_KERNEL); - if (!vvfl) + if (!vvfl) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); return; + } vvfl->vsi_id = adapter->vsi_res->vsi_id; vvfl->num_elements = count; @@ -602,6 +625,9 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) } if (!more) adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); kfree(vvfl); } @@ -628,12 +654,15 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) return; } + spin_lock_bh(&adapter->mac_vlan_list_lock); + list_for_each_entry(f, &adapter->vlan_filter_list, list) { if (f->remove) count++; } if (!count) { adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } adapter->current_op = VIRTCHNL_OP_DEL_VLAN; @@ -650,8 +679,10 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) more = true; } vvfl = kzalloc(len, GFP_KERNEL); - if (!vvfl) + if (!vvfl) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); return; + } vvfl->vsi_id = adapter->vsi_res->vsi_id; vvfl->num_elements = count; @@ -667,6 +698,9 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) } if (!more) adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); kfree(vvfl); } @@ -705,8 +739,10 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) } if (!flags) { - adapter->flags &= ~I40EVF_FLAG_PROMISC_ON; - adapter->aq_required &= ~I40EVF_FLAG_AQ_RELEASE_PROMISC; + adapter->flags &= ~(I40EVF_FLAG_PROMISC_ON | + I40EVF_FLAG_ALLMULTI_ON); + adapter->aq_required &= ~(I40EVF_FLAG_AQ_RELEASE_PROMISC | + I40EVF_FLAG_AQ_RELEASE_ALLMULTI); dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); } @@ -965,23 +1001,34 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, if (v_opcode == VIRTCHNL_OP_EVENT) { struct virtchnl_pf_event *vpe = (struct virtchnl_pf_event *)msg; + bool link_up = vpe->event_data.link_event.link_status; switch (vpe->event) { case VIRTCHNL_EVENT_LINK_CHANGE: adapter->link_speed = vpe->event_data.link_event.link_speed; - if (adapter->link_up != - vpe->event_data.link_event.link_status) { - adapter->link_up = - vpe->event_data.link_event.link_status; - if (adapter->link_up) { - netif_tx_start_all_queues(netdev); - netif_carrier_on(netdev); - } else { - netif_tx_stop_all_queues(netdev); - netif_carrier_off(netdev); - } - i40evf_print_link_message(adapter); + + /* we've already got the right link status, bail */ + if (adapter->link_up == link_up) + break; + + /* If we get link up message and start queues before + * our queues are configured it will trigger a TX hang. + * In that case, just ignore the link status message, + * we'll get another one after we enable queues and + * actually prepared to send traffic. + */ + if (link_up && adapter->state != __I40EVF_RUNNING) + break; + + adapter->link_up = link_up; + if (link_up) { + netif_tx_start_all_queues(netdev); + netif_carrier_on(netdev); + } else { + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); } + i40evf_print_link_message(adapter); break; case VIRTCHNL_EVENT_RESET_IMPENDING: dev_info(&adapter->pdev->dev, "PF reset warning received\n"); diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 92845692087a..1c6b8d9176a8 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -690,6 +690,7 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); void igb_set_flag_queue_pairs(struct igb_adapter *, const u32); +unsigned int igb_get_max_rss_queues(struct igb_adapter *); #ifdef CONFIG_IGB_HWMON void igb_sysfs_exit(struct igb_adapter *adapter); int igb_sysfs_init(struct igb_adapter *adapter); diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index d06a8db514d4..606e6761758f 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -3338,37 +3338,7 @@ static int igb_set_rxfh(struct net_device *netdev, const u32 *indir, static unsigned int igb_max_channels(struct igb_adapter *adapter) { - struct e1000_hw *hw = &adapter->hw; - unsigned int max_combined = 0; - - switch (hw->mac.type) { - case e1000_i211: - max_combined = IGB_MAX_RX_QUEUES_I211; - break; - case e1000_82575: - case e1000_i210: - max_combined = IGB_MAX_RX_QUEUES_82575; - break; - case e1000_i350: - if (!!adapter->vfs_allocated_count) { - max_combined = 1; - break; - } - /* fall through */ - case e1000_82576: - if (!!adapter->vfs_allocated_count) { - max_combined = 2; - break; - } - /* fall through */ - case e1000_82580: - case e1000_i354: - default: - max_combined = IGB_MAX_RX_QUEUES; - break; - } - - return max_combined; + return igb_get_max_rss_queues(adapter); } static void igb_get_channels(struct net_device *netdev, diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index c208753ff5b7..b88fae785369 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1744,6 +1744,20 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue, * value = idleSlope * 61034 * ----------------- (E6) * 1000000 + * + * NOTE: For i210, given the above, we can see that idleslope + * is represented in 16.38431 kbps units by the value at + * the TQAVCC register (1Gbps / 61034), which reduces + * the granularity for idleslope increments. + * For instance, if you want to configure a 2576kbps + * idleslope, the value to be written on the register + * would have to be 157.23. If rounded down, you end + * up with less bandwidth available than originally + * required (~2572 kbps). If rounded up, you end up + * with a higher bandwidth (~2589 kbps). Below the + * approach we take is to always round up the + * calculated value, so the resulting bandwidth might + * be slightly higher for some configurations. */ value = DIV_ROUND_UP_ULL(idleslope * 61034ULL, 1000000); @@ -3200,8 +3214,6 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) /* if allocation failed then we do not support SR-IOV */ if (!adapter->vf_data) { adapter->vfs_allocated_count = 0; - dev_err(&pdev->dev, - "Unable to allocate memory for VF Data Storage\n"); err = -ENOMEM; goto out; } @@ -3373,10 +3385,10 @@ static void igb_probe_vfs(struct igb_adapter *adapter) #endif /* CONFIG_PCI_IOV */ } -static void igb_init_queue_configuration(struct igb_adapter *adapter) +unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - u32 max_rss_queues; + unsigned int max_rss_queues; /* Determine the maximum number of RSS queues supported. */ switch (hw->mac.type) { @@ -3407,6 +3419,14 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter) break; } + return max_rss_queues; +} + +static void igb_init_queue_configuration(struct igb_adapter *adapter) +{ + u32 max_rss_queues; + + max_rss_queues = igb_get_max_rss_queues(adapter); adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); igb_set_flag_queue_pairs(adapter, max_rss_queues); @@ -3676,7 +3696,7 @@ static int __igb_close(struct net_device *netdev, bool suspending) int igb_close(struct net_device *netdev) { - if (netif_device_present(netdev)) + if (netif_device_present(netdev) || netdev->dismantle) return __igb_close(netdev, false); return 0; } @@ -8718,7 +8738,8 @@ static void igb_rar_set_index(struct igb_adapter *adapter, u32 index) /* Indicate to hardware the Address is Valid. */ if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) { - rar_high |= E1000_RAH_AV; + if (is_valid_ether_addr(addr)) + rar_high |= E1000_RAH_AV; if (hw->mac.type == e1000_82575) rar_high |= E1000_RAH_POOL_1 * @@ -8756,17 +8777,36 @@ static int igb_set_vf_mac(struct igb_adapter *adapter, static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { struct igb_adapter *adapter = netdev_priv(netdev); - if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count)) + + if (vf >= adapter->vfs_allocated_count) + return -EINVAL; + + /* Setting the VF MAC to 0 reverts the IGB_VF_FLAG_PF_SET_MAC + * flag and allows to overwrite the MAC via VF netdev. This + * is necessary to allow libvirt a way to restore the original + * MAC after unbinding vfio-pci and reloading igbvf after shutting + * down a VM. + */ + if (is_zero_ether_addr(mac)) { + adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC; + dev_info(&adapter->pdev->dev, + "remove administratively set MAC on VF %d\n", + vf); + } else if (is_valid_ether_addr(mac)) { + adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC; + dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", + mac, vf); + dev_info(&adapter->pdev->dev, + "Reload the VF driver to make this change effective."); + /* Generate additional warning if PF is down */ + if (test_bit(__IGB_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF MAC address has been set, but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, + "Bring the PF device up before attempting to use the VF device.\n"); + } + } else { return -EINVAL; - adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC; - dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); - dev_info(&adapter->pdev->dev, - "Reload the VF driver to make this change effective."); - if (test_bit(__IGB_DOWN, &adapter->state)) { - dev_warn(&adapter->pdev->dev, - "The VF MAC address has been set, but the PF device is not up.\n"); - dev_warn(&adapter->pdev->dev, - "Bring the PF device up before attempting to use the VF device.\n"); } return igb_set_vf_mac(adapter, vf, mac); } diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 841c2a083349..0746b19ec6d3 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -643,6 +643,10 @@ static void igb_ptp_tx_work(struct work_struct *work) adapter->ptp_tx_skb = NULL; clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); adapter->tx_hwtstamp_timeouts++; + /* Clear the tx valid bit in TSYNCTXCTL register to enable + * interrupt + */ + rd32(E1000_TXSTMPH); dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n"); return; } @@ -717,6 +721,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter) */ void igb_ptp_tx_hang(struct igb_adapter *adapter) { + struct e1000_hw *hw = &adapter->hw; bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + IGB_PTP_TX_TIMEOUT); @@ -736,6 +741,10 @@ void igb_ptp_tx_hang(struct igb_adapter *adapter) adapter->ptp_tx_skb = NULL; clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); adapter->tx_hwtstamp_timeouts++; + /* Clear the tx valid bit in TSYNCTXCTL register to enable + * interrupt + */ + rd32(E1000_TXSTMPH); dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n"); } } diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile index 35e6fa643c7e..8319465eb38d 100644 --- a/drivers/net/ethernet/intel/ixgbe/Makefile +++ b/drivers/net/ethernet/intel/ixgbe/Makefile @@ -42,3 +42,4 @@ ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o ixgbe-$(CONFIG_DEBUG_FS) += ixgbe_debugfs.o ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o +ixgbe-$(CONFIG_XFRM_OFFLOAD) += ixgbe_ipsec.o diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 468c3555a629..c1e3a0039ea5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -52,7 +52,9 @@ #ifdef CONFIG_IXGBE_DCA #include <linux/dca.h> #endif +#include "ixgbe_ipsec.h" +#include <net/xdp.h> #include <net/busy_poll.h> /* common prefix used by pr_<> macros */ @@ -170,10 +172,11 @@ enum ixgbe_tx_flags { IXGBE_TX_FLAGS_CC = 0x08, IXGBE_TX_FLAGS_IPV4 = 0x10, IXGBE_TX_FLAGS_CSUM = 0x20, + IXGBE_TX_FLAGS_IPSEC = 0x40, /* software defined flags */ - IXGBE_TX_FLAGS_SW_VLAN = 0x40, - IXGBE_TX_FLAGS_FCOE = 0x80, + IXGBE_TX_FLAGS_SW_VLAN = 0x80, + IXGBE_TX_FLAGS_FCOE = 0x100, }; /* VLAN info */ @@ -332,7 +335,6 @@ struct ixgbe_ring { struct net_device *netdev; /* netdev ring belongs to */ struct bpf_prog *xdp_prog; struct device *dev; /* device for DMA mapping */ - struct ixgbe_fwd_adapter *l2_accel_priv; void *desc; /* descriptor ring memory */ union { struct ixgbe_tx_buffer *tx_buffer_info; @@ -371,6 +373,7 @@ struct ixgbe_ring { struct ixgbe_tx_queue_stats tx_stats; struct ixgbe_rx_queue_stats rx_stats; }; + struct xdp_rxq_info xdp_rxq; } ____cacheline_internodealigned_in_smp; enum ixgbe_ring_f_enum { @@ -395,8 +398,7 @@ enum ixgbe_ring_f_enum { #define MAX_XDP_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) #define IXGBE_MAX_L2A_QUEUES 4 #define IXGBE_BAD_L2A_QUEUE 3 -#define IXGBE_MAX_MACVLANS 31 -#define IXGBE_MAX_DCBMACVLANS 8 +#define IXGBE_MAX_MACVLANS 63 struct ixgbe_ring_feature { u16 limit; /* upper limit on feature indices */ @@ -629,15 +631,18 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_EEE_CAPABLE BIT(14) #define IXGBE_FLAG2_EEE_ENABLED BIT(15) #define IXGBE_FLAG2_RX_LEGACY BIT(16) +#define IXGBE_FLAG2_IPSEC_ENABLED BIT(17) /* Tx fast path data */ int num_tx_queues; u16 tx_itr_setting; u16 tx_work_limit; + u64 tx_ipsec; /* Rx fast path data */ int num_rx_queues; u16 rx_itr_setting; + u64 rx_ipsec; /* Port number used to identify VXLAN traffic */ __be16 vxlan_port; @@ -674,6 +679,7 @@ struct ixgbe_adapter { struct ieee_ets *ixgbe_ieee_ets; struct ixgbe_dcb_config dcb_cfg; struct ixgbe_dcb_config temp_dcb_cfg; + u8 hw_tcs; u8 dcb_set_bitmap; u8 dcbx_cap; enum ixgbe_fc_mode last_lfc_mode; @@ -721,8 +727,7 @@ struct ixgbe_adapter { u16 bridge_mode; - u16 eeprom_verh; - u16 eeprom_verl; + char eeprom_id[NVM_VER_SIZE]; u16 eeprom_cap; u32 interrupt_event; @@ -766,7 +771,8 @@ struct ixgbe_adapter { #endif /*CONFIG_DEBUG_FS*/ u8 default_up; - unsigned long fwd_bitmask; /* Bitmask indicating in use pools */ + /* Bitmask indicating in use pools */ + DECLARE_BITMAP(fwd_bitmask, IXGBE_MAX_MACVLANS + 1); #define IXGBE_MAX_LINK_HANDLE 10 struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE]; @@ -780,6 +786,10 @@ struct ixgbe_adapter { #define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ u32 *rss_key; + +#ifdef CONFIG_XFRM + struct ixgbe_ipsec *ipsec; +#endif /* CONFIG_XFRM */ }; static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) @@ -1010,4 +1020,24 @@ void ixgbe_store_key(struct ixgbe_adapter *adapter); void ixgbe_store_reta(struct ixgbe_adapter *adapter); s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); +#ifdef CONFIG_XFRM_OFFLOAD +void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter); +void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter); +void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter); +void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb); +int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, + struct ixgbe_ipsec_tx_data *itd); +#else +static inline void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) { }; +static inline void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) { }; +static inline void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { }; +static inline void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) { }; +static inline int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, + struct ixgbe_tx_buffer *first, + struct ixgbe_ipsec_tx_data *itd) { return 0; }; +#endif /* CONFIG_XFRM_OFFLOAD */ #endif /* _IXGBE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index 8a32eb7d47b9..a0ebd9ecf243 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -431,6 +431,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) /** * ixgbe_start_mac_link_82598 - Configures MAC link settings * @hw: pointer to hardware structure + * @autoneg_wait_to_complete: true when waiting for completion is needed * * Configures link settings based on values in the ixgbe_hw struct. * Restarts the link. Performs autonegotiation if needed. @@ -1054,7 +1055,7 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface. * @hw: pointer to hardware structure * @byte_offset: byte offset at address 0xA2 - * @eeprom_data: value read + * @sff8472_data: value read * * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C **/ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index d602637ccc40..4dfc81dbee4b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -221,7 +221,7 @@ static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, /** * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write * @hw: pointer to hardware structure - * @reg_val: value to write to AUTOC + * @autoc: value to write to AUTOC * @locked: bool to indicate whether the SW/FW lock was already taken by * previous proc_autoc_read_82599. * @@ -1310,10 +1310,11 @@ do { \ /** * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash - * @stream: input bitstream to compute the hash on + * @input: input bitstream to compute the hash on + * @common: compressed common input dword * * This function is almost identical to the function above but contains - * several optomizations such as unwinding all of the loops, letting the + * several optimizations such as unwinding all of the loops, letting the * compiler work out all of the conditional ifs since the keys are static * defines, and computing two keys at once since the hashed dword stream * will be the same for both keys. @@ -1446,7 +1447,7 @@ do { \ /** * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash - * @atr_input: input bitstream to compute the hash on + * @input: input bitstream to compute the hash on * @input_mask: mask for the input bitstream * * This function serves two main purposes. First it applies the input_mask @@ -2078,6 +2079,7 @@ reset_pipeline_out: * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to read + * @dev_addr: address to read from * @data: value read * * Performs byte read operation to SFP module's EEPROM over I2C interface at @@ -2131,6 +2133,7 @@ release_i2c_access: * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to write + * @dev_addr: address to write to * @data: value to write * * Performs byte write operation to SFP module's EEPROM over I2C interface at diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 9bef255f6a18..61188f343955 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -1613,6 +1613,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, /** * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM * @hw: pointer to hardware structure + * @count: number of bits to shift **/ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) { @@ -1667,7 +1668,7 @@ static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) /** * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. * @hw: pointer to hardware structure - * @eecd: EECD's current value + * @eec: EEC's current value **/ static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) { @@ -2037,7 +2038,7 @@ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) /** * ixgbe_set_mta - Set bit-vector in multicast table * @hw: pointer to hardware structure - * @hash_value: Multicast address hash value + * @mc_addr: Multicast address * * Sets the bit-vector in the multicast table. **/ @@ -3086,6 +3087,8 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot * @hw: pointer to hardware structure * @vlan: VLAN id to write to VLAN filter + * @vlvf_bypass: true to find vlanid only, false returns first empty slot if + * vlanid not found * * return the VLVF index where this VLAN id should be placed * @@ -3476,7 +3479,7 @@ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing * @hw: pointer to hardware structure * @enable: enable or disable switch for VLAN anti-spoofing - * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing * **/ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) @@ -4028,6 +4031,118 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) return 0; } +/** + * ixgbe_get_orom_version - Return option ROM from EEPROM + * + * @hw: pointer to hardware structure + * @nvm_ver: pointer to output structure + * + * if valid option ROM version, nvm_ver->or_valid set to true + * else nvm_ver->or_valid is false. + **/ +void ixgbe_get_orom_version(struct ixgbe_hw *hw, + struct ixgbe_nvm_version *nvm_ver) +{ + u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl; + + nvm_ver->or_valid = false; + /* Option Rom may or may not be present. Start with pointer */ + hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset); + + /* make sure offset is valid */ + if (offset == 0x0 || offset == NVM_INVALID_PTR) + return; + + hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh); + hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl); + + /* option rom exists and is valid */ + if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 || + eeprom_cfg_blkl == NVM_VER_INVALID || + eeprom_cfg_blkh == NVM_VER_INVALID) + return; + + nvm_ver->or_valid = true; + nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT; + nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) | + (eeprom_cfg_blkh >> NVM_OROM_SHIFT); + nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK; +} + +/** + * ixgbe_get_oem_prod_version Etrack ID from EEPROM + * + * @hw: pointer to hardware structure + * @nvm_ver: pointer to output structure + * + * if valid OEM product version, nvm_ver->oem_valid set to true + * else nvm_ver->oem_valid is false. + **/ +void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw, + struct ixgbe_nvm_version *nvm_ver) +{ + u16 rel_num, prod_ver, mod_len, cap, offset; + + nvm_ver->oem_valid = false; + hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset); + + /* Return is offset to OEM Product Version block is invalid */ + if (offset == 0x0 || offset == NVM_INVALID_PTR) + return; + + /* Read product version block */ + hw->eeprom.ops.read(hw, offset, &mod_len); + hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap); + + /* Return if OEM product version block is invalid */ + if (mod_len != NVM_OEM_PROD_VER_MOD_LEN || + (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0) + return; + + hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver); + hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num); + + /* Return if version is invalid */ + if ((rel_num | prod_ver) == 0x0 || + rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID) + return; + + nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT; + nvm_ver->oem_minor = prod_ver & NVM_VER_MASK; + nvm_ver->oem_release = rel_num; + nvm_ver->oem_valid = true; +} + +/** + * ixgbe_get_etk_id - Return Etrack ID from EEPROM + * + * @hw: pointer to hardware structure + * @nvm_ver: pointer to output structure + * + * word read errors will return 0xFFFF + **/ +void ixgbe_get_etk_id(struct ixgbe_hw *hw, + struct ixgbe_nvm_version *nvm_ver) +{ + u16 etk_id_l, etk_id_h; + + if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l)) + etk_id_l = NVM_VER_INVALID; + if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h)) + etk_id_h = NVM_VER_INVALID; + + /* The word order for the version format is determined by high order + * word bit 15. + */ + if ((etk_id_h & NVM_ETK_VALID) == 0) { + nvm_ver->etk_id = etk_id_h; + nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT); + } else { + nvm_ver->etk_id = etk_id_l; + nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT); + } +} + void ixgbe_disable_rx_generic(struct ixgbe_hw *hw) { u32 rxctrl; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index a01409e2e06c..4d4c02366cb3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -139,6 +139,12 @@ extern const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT]; s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw); s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw); +void ixgbe_get_etk_id(struct ixgbe_hw *hw, + struct ixgbe_nvm_version *nvm_ver); +void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw, + struct ixgbe_nvm_version *nvm_ver); +void ixgbe_get_orom_version(struct ixgbe_hw *hw, + struct ixgbe_nvm_version *nvm_ver); void ixgbe_disable_rx_generic(struct ixgbe_hw *hw); void ixgbe_enable_rx_generic(struct ixgbe_hw *hw); s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c index 072ef3b5fc61..aaea8282bfd2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c @@ -39,6 +39,10 @@ * are the smallest unit programmable into the underlying * hardware. The IEEE 802.1Qaz specification do not use bandwidth * groups so this is much simplified from the CEE case. + * @bw: bandwidth index by traffic class + * @refill: refill credits index by traffic class + * @max: max credits by traffic class + * @max_frame: maximum frame size */ static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame) @@ -72,8 +76,10 @@ static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, /** * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits - * @ixgbe_dcb_config: Struct containing DCB settings. - * @direction: Configuring either Tx or Rx. + * @hw: pointer to hardware structure + * @dcb_config: Struct containing DCB settings + * @max_frame: Maximum frame size + * @direction: Configuring either Tx or Rx * * This function calculates the credits allocated to each traffic class. * It should be called only after the rules are checked by diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c index b79e93a5b699..f94c7e82a30b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c @@ -34,7 +34,9 @@ /** * ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @prio_type: priority type indexed by traffic class * * Configure Rx Data Arbiter and credits for each traffic class. */ @@ -91,7 +93,10 @@ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, /** * ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @bwg_id: bandwidth grouping indexed by traffic class + * @prio_type: priority type indexed by traffic class * * Configure Tx Descriptor Arbiter and credits for each traffic class. */ @@ -137,7 +142,10 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, /** * ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @bwg_id: bandwidth grouping indexed by traffic class + * @prio_type: priority type indexed by traffic class * * Configure Tx Data Arbiter and credits for each traffic class. */ @@ -184,7 +192,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, /** * ixgbe_dcb_config_pfc_82598 - Config priority flow control * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure + * @pfc_en: enabled pfc bitmask * * Configure Priority Flow Control for each traffic class. */ @@ -269,7 +277,11 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) /** * ixgbe_dcb_hw_config_82598 - Config and enable DCB * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure + * @pfc_en: enabled pfc bitmask + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @bwg_id: bandwidth grouping indexed by traffic class + * @prio_type: priority type indexed by traffic class * * Configure dcb settings and enable dcb mode. */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c index 1011d644978f..1eed6811e914 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c @@ -38,6 +38,7 @@ * @max: max credits index by traffic class * @bwg_id: bandwidth grouping indexed by traffic class * @prio_type: priority type indexed by traffic class + * @prio_tc: priority to tc assignments indexed by priority * * Configure Rx Packet Arbiter and credits for each traffic class. */ @@ -148,6 +149,7 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, * @max: max credits index by traffic class * @bwg_id: bandwidth grouping indexed by traffic class * @prio_type: priority type indexed by traffic class + * @prio_tc: priority to tc assignments indexed by priority * * Configure Tx Packet Arbiter and credits for each traffic class. */ @@ -344,11 +346,12 @@ static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw) /** * ixgbe_dcb_hw_config_82599 - Configure and enable DCB * @hw: pointer to hardware structure + * @pfc_en: enabled pfc bitmask * @refill: refill credits index by traffic class * @max: max credits index by traffic class * @bwg_id: bandwidth grouping indexed by traffic class * @prio_type: priority type indexed by traffic class - * @pfc_en: enabled pfc bitmask + * @prio_tc: priority to tc assignments indexed by priority * * Configure dcb settings and enable dcb mode. */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index 78c52375acc6..b33f3f87e4b1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -571,7 +571,7 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, if (max_tc > adapter->dcb_cfg.num_tcs.pg_tcs) return -EINVAL; - if (max_tc != netdev_get_num_tc(dev)) { + if (max_tc != adapter->hw_tcs) { err = ixgbe_setup_tc(dev, max_tc); if (err) return err; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c index 5e2c1e35e517..ad54080488ee 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c @@ -249,7 +249,7 @@ void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) /** * ixgbe_dbg_adapter_exit - clear out the adapter's debugfs entries - * @pf: the pf that is stopping + * @adapter: the adapter that is exiting **/ void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 0aad1c2a3667..221f15803480 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -115,6 +115,8 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = { {"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts)}, {"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped)}, {"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared)}, + {"tx_ipsec", IXGBE_STAT(tx_ipsec)}, + {"rx_ipsec", IXGBE_STAT(rx_ipsec)}, #ifdef IXGBE_FCOE {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)}, {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)}, @@ -1014,16 +1016,13 @@ static void ixgbe_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - u32 nvm_track_id; strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, ixgbe_driver_version, sizeof(drvinfo->version)); - nvm_track_id = (adapter->eeprom_verh << 16) | - adapter->eeprom_verl; - snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x", - nvm_track_id); + strlcpy(drvinfo->fw_version, adapter->eeprom_id, + sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); @@ -1156,6 +1155,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev, memcpy(&temp_ring[i], adapter->rx_ring[i], sizeof(struct ixgbe_ring)); + /* Clear copied XDP RX-queue info */ + memset(&temp_ring[i].xdp_rxq, 0, + sizeof(temp_ring[i].xdp_rxq)); + temp_ring[i].count = new_rx_count; err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]); if (err) { @@ -3082,26 +3085,9 @@ static int ixgbe_get_ts_info(struct net_device *dev, case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL); - /* fallthrough */ + break; case ixgbe_mac_X540: case ixgbe_mac_82599EB: - info->so_timestamping = - SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | - SOF_TIMESTAMPING_TX_HARDWARE | - SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_RAW_HARDWARE; - - if (adapter->ptp_clock) - info->phc_index = ptp_clock_index(adapter->ptp_clock); - else - info->phc_index = -1; - - info->tx_types = - BIT(HWTSTAMP_TX_OFF) | - BIT(HWTSTAMP_TX_ON); - info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | @@ -3110,13 +3096,31 @@ static int ixgbe_get_ts_info(struct net_device *dev, default: return ethtool_op_get_ts_info(dev, info); } + + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + info->tx_types = + BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); + return 0; } static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter) { unsigned int max_combined; - u8 tcs = netdev_get_num_tc(adapter->netdev); + u8 tcs = adapter->hw_tcs; if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { /* We only support one q_vector without MSI-X */ @@ -3173,7 +3177,7 @@ static void ixgbe_get_channels(struct net_device *dev, return; /* same thing goes for being DCB enabled */ - if (netdev_get_num_tc(dev) > 1) + if (adapter->hw_tcs > 1) return; /* if ATR is disabled we can exit */ @@ -3219,7 +3223,7 @@ static int ixgbe_set_channels(struct net_device *dev, #endif /* use setup TC to update any traffic class queue mapping */ - return ixgbe_setup_tc(dev, netdev_get_num_tc(dev)); + return ixgbe_setup_tc(dev, adapter->hw_tcs); } static int ixgbe_get_module_info(struct net_device *dev, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index a23c2b5411a0..7a09a40e4472 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -150,6 +150,7 @@ skip_ddpinv: * @xid: the exchange id requesting ddp * @sgl: the scatter-gather list for this request * @sgc: the number of scatter-gather items + * @target_mode: 1 to setup target mode, 0 to setup initiator mode * * Returns : 1 for success and 0 for no ddp */ @@ -1034,11 +1035,8 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, ixgbe_driver_name, ixgbe_driver_version); /* Firmware Version */ - snprintf(info->firmware_version, - sizeof(info->firmware_version), - "0x%08x", - (adapter->eeprom_verh << 16) | - adapter->eeprom_verl); + strlcpy(info->firmware_version, adapter->eeprom_id, + sizeof(info->firmware_version)); /* Model */ if (hw->mac.type == ixgbe_mac_82599EB) { @@ -1066,7 +1064,7 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, /** * ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to - * @adapter - pointer to the device adapter structure + * @adapter: pointer to the device adapter structure * * Return : TC that FCoE is mapped to */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c new file mode 100644 index 000000000000..93eacddb6704 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -0,0 +1,941 @@ +/******************************************************************************* + * + * Intel 10 Gigabit PCI Express Linux driver + * Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS <linux.nics@intel.com> + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "ixgbe.h" +#include <net/xfrm.h> +#include <crypto/aead.h> + +/** + * ixgbe_ipsec_set_tx_sa - set the Tx SA registers + * @hw: hw specific details + * @idx: register index to write + * @key: key byte array + * @salt: salt bytes + **/ +static void ixgbe_ipsec_set_tx_sa(struct ixgbe_hw *hw, u16 idx, + u32 key[], u32 salt) +{ + u32 reg; + int i; + + for (i = 0; i < 4; i++) + IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i), cpu_to_be32(key[3 - i])); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, cpu_to_be32(salt)); + IXGBE_WRITE_FLUSH(hw); + + reg = IXGBE_READ_REG(hw, IXGBE_IPSTXIDX); + reg &= IXGBE_RXTXIDX_IPS_EN; + reg |= idx << IXGBE_RXTXIDX_IDX_SHIFT | IXGBE_RXTXIDX_WRITE; + IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, reg); + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_ipsec_set_rx_item - set an Rx table item + * @hw: hw specific details + * @idx: register index to write + * @tbl: table selector + * + * Trigger the device to store into a particular Rx table the + * data that has already been loaded into the input register + **/ +static void ixgbe_ipsec_set_rx_item(struct ixgbe_hw *hw, u16 idx, + enum ixgbe_ipsec_tbl_sel tbl) +{ + u32 reg; + + reg = IXGBE_READ_REG(hw, IXGBE_IPSRXIDX); + reg &= IXGBE_RXTXIDX_IPS_EN; + reg |= tbl << IXGBE_RXIDX_TBL_SHIFT | + idx << IXGBE_RXTXIDX_IDX_SHIFT | + IXGBE_RXTXIDX_WRITE; + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, reg); + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_ipsec_set_rx_sa - set up the register bits to save SA info + * @hw: hw specific details + * @idx: register index to write + * @spi: security parameter index + * @key: key byte array + * @salt: salt bytes + * @mode: rx decrypt control bits + * @ip_idx: index into IP table for related IP address + **/ +static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi, + u32 key[], u32 salt, u32 mode, u32 ip_idx) +{ + int i; + + /* store the SPI (in bigendian) and IPidx */ + IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, cpu_to_le32(spi)); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, ip_idx); + IXGBE_WRITE_FLUSH(hw); + + ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_spi_tbl); + + /* store the key, salt, and mode */ + for (i = 0; i < 4; i++) + IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i), cpu_to_be32(key[3 - i])); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, cpu_to_be32(salt)); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, mode); + IXGBE_WRITE_FLUSH(hw); + + ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_key_tbl); +} + +/** + * ixgbe_ipsec_set_rx_ip - set up the register bits to save SA IP addr info + * @hw: hw specific details + * @idx: register index to write + * @addr: IP address byte array + **/ +static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[]) +{ + int i; + + /* store the ip address */ + for (i = 0; i < 4; i++) + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i), cpu_to_le32(addr[i])); + IXGBE_WRITE_FLUSH(hw); + + ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_ip_tbl); +} + +/** + * ixgbe_ipsec_clear_hw_tables - because some tables don't get cleared on reset + * @adapter: board private structure + **/ +static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter) +{ + struct ixgbe_ipsec *ipsec = adapter->ipsec; + struct ixgbe_hw *hw = &adapter->hw; + u32 buf[4] = {0, 0, 0, 0}; + u16 idx; + + /* disable Rx and Tx SA lookup */ + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0); + + /* scrub the tables - split the loops for the max of the IP table */ + for (idx = 0; idx < IXGBE_IPSEC_MAX_RX_IP_COUNT; idx++) { + ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0); + ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0); + ixgbe_ipsec_set_rx_ip(hw, idx, (__be32 *)buf); + } + for (; idx < IXGBE_IPSEC_MAX_SA_COUNT; idx++) { + ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0); + ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0); + } + + ipsec->num_rx_sa = 0; + ipsec->num_tx_sa = 0; +} + +/** + * ixgbe_ipsec_stop_data + * @adapter: board private structure + **/ +static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + bool link = adapter->link_up; + u32 t_rdy, r_rdy; + u32 limit; + u32 reg; + + /* halt data paths */ + reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); + reg |= IXGBE_SECTXCTRL_TX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg); + + reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + reg |= IXGBE_SECRXCTRL_RX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg); + + IXGBE_WRITE_FLUSH(hw); + + /* If the tx fifo doesn't have link, but still has data, + * we can't clear the tx sec block. Set the MAC loopback + * before block clear + */ + if (!link) { + reg = IXGBE_READ_REG(hw, IXGBE_MACC); + reg |= IXGBE_MACC_FLU; + IXGBE_WRITE_REG(hw, IXGBE_MACC, reg); + + reg = IXGBE_READ_REG(hw, IXGBE_HLREG0); + reg |= IXGBE_HLREG0_LPBK; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg); + + IXGBE_WRITE_FLUSH(hw); + mdelay(3); + } + + /* wait for the paths to empty */ + limit = 20; + do { + mdelay(10); + t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) & + IXGBE_SECTXSTAT_SECTX_RDY; + r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) & + IXGBE_SECRXSTAT_SECRX_RDY; + } while (!t_rdy && !r_rdy && limit--); + + /* undo loopback if we played with it earlier */ + if (!link) { + reg = IXGBE_READ_REG(hw, IXGBE_MACC); + reg &= ~IXGBE_MACC_FLU; + IXGBE_WRITE_REG(hw, IXGBE_MACC, reg); + + reg = IXGBE_READ_REG(hw, IXGBE_HLREG0); + reg &= ~IXGBE_HLREG0_LPBK; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg); + + IXGBE_WRITE_FLUSH(hw); + } +} + +/** + * ixgbe_ipsec_stop_engine + * @adapter: board private structure + **/ +static void ixgbe_ipsec_stop_engine(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 reg; + + ixgbe_ipsec_stop_data(adapter); + + /* disable Rx and Tx SA lookup */ + IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0); + + /* disable the Rx and Tx engines and full packet store-n-forward */ + reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); + reg |= IXGBE_SECTXCTRL_SECTX_DIS; + reg &= ~IXGBE_SECTXCTRL_STORE_FORWARD; + IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg); + + reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + reg |= IXGBE_SECRXCTRL_SECRX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg); + + /* restore the "tx security buffer almost full threshold" to 0x250 */ + IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x250); + + /* Set minimum IFG between packets back to the default 0x1 */ + reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); + reg = (reg & 0xfffffff0) | 0x1; + IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); + + /* final set for normal (no ipsec offload) processing */ + IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_SECTX_DIS); + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, IXGBE_SECRXCTRL_SECRX_DIS); + + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_ipsec_start_engine + * @adapter: board private structure + * + * NOTE: this increases power consumption whether being used or not + **/ +static void ixgbe_ipsec_start_engine(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 reg; + + ixgbe_ipsec_stop_data(adapter); + + /* Set minimum IFG between packets to 3 */ + reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); + reg = (reg & 0xfffffff0) | 0x3; + IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); + + /* Set "tx security buffer almost full threshold" to 0x15 so that the + * almost full indication is generated only after buffer contains at + * least an entire jumbo packet. + */ + reg = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF); + reg = (reg & 0xfffffc00) | 0x15; + IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, reg); + + /* restart the data paths by clearing the DISABLE bits */ + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0); + IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_STORE_FORWARD); + + /* enable Rx and Tx SA lookup */ + IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, IXGBE_RXTXIDX_IPS_EN); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, IXGBE_RXTXIDX_IPS_EN); + + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_ipsec_restore - restore the ipsec HW settings after a reset + * @adapter: board private structure + **/ +void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) +{ + struct ixgbe_ipsec *ipsec = adapter->ipsec; + struct ixgbe_hw *hw = &adapter->hw; + int i; + + if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED)) + return; + + /* clean up and restart the engine */ + ixgbe_ipsec_stop_engine(adapter); + ixgbe_ipsec_clear_hw_tables(adapter); + ixgbe_ipsec_start_engine(adapter); + + /* reload the IP addrs */ + for (i = 0; i < IXGBE_IPSEC_MAX_RX_IP_COUNT; i++) { + struct rx_ip_sa *ipsa = &ipsec->ip_tbl[i]; + + if (ipsa->used) + ixgbe_ipsec_set_rx_ip(hw, i, ipsa->ipaddr); + } + + /* reload the Rx and Tx keys */ + for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { + struct rx_sa *rsa = &ipsec->rx_tbl[i]; + struct tx_sa *tsa = &ipsec->tx_tbl[i]; + + if (rsa->used) + ixgbe_ipsec_set_rx_sa(hw, i, rsa->xs->id.spi, + rsa->key, rsa->salt, + rsa->mode, rsa->iptbl_ind); + + if (tsa->used) + ixgbe_ipsec_set_tx_sa(hw, i, tsa->key, tsa->salt); + } +} + +/** + * ixgbe_ipsec_find_empty_idx - find the first unused security parameter index + * @ipsec: pointer to ipsec struct + * @rxtable: true if we need to look in the Rx table + * + * Returns the first unused index in either the Rx or Tx SA table + **/ +static int ixgbe_ipsec_find_empty_idx(struct ixgbe_ipsec *ipsec, bool rxtable) +{ + u32 i; + + if (rxtable) { + if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT) + return -ENOSPC; + + /* search rx sa table */ + for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { + if (!ipsec->rx_tbl[i].used) + return i; + } + } else { + if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT) + return -ENOSPC; + + /* search tx sa table */ + for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { + if (!ipsec->tx_tbl[i].used) + return i; + } + } + + return -ENOSPC; +} + +/** + * ixgbe_ipsec_find_rx_state - find the state that matches + * @ipsec: pointer to ipsec struct + * @daddr: inbound address to match + * @proto: protocol to match + * @spi: SPI to match + * @ip4: true if using an ipv4 address + * + * Returns a pointer to the matching SA state information + **/ +static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec, + __be32 *daddr, u8 proto, + __be32 spi, bool ip4) +{ + struct rx_sa *rsa; + struct xfrm_state *ret = NULL; + + rcu_read_lock(); + hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, spi) + if (spi == rsa->xs->id.spi && + ((ip4 && *daddr == rsa->xs->id.daddr.a4) || + (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6, + sizeof(rsa->xs->id.daddr.a6)))) && + proto == rsa->xs->id.proto) { + ret = rsa->xs; + xfrm_state_hold(ret); + break; + } + rcu_read_unlock(); + return ret; +} + +/** + * ixgbe_ipsec_parse_proto_keys - find the key and salt based on the protocol + * @xs: pointer to xfrm_state struct + * @mykey: pointer to key array to populate + * @mysalt: pointer to salt value to populate + * + * This copies the protocol keys and salt to our own data tables. The + * 82599 family only supports the one algorithm. + **/ +static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs, + u32 *mykey, u32 *mysalt) +{ + struct net_device *dev = xs->xso.dev; + unsigned char *key_data; + char *alg_name = NULL; + const char aes_gcm_name[] = "rfc4106(gcm(aes))"; + int key_len; + + if (xs->aead) { + key_data = &xs->aead->alg_key[0]; + key_len = xs->aead->alg_key_len; + alg_name = xs->aead->alg_name; + } else { + netdev_err(dev, "Unsupported IPsec algorithm\n"); + return -EINVAL; + } + + if (strcmp(alg_name, aes_gcm_name)) { + netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n", + aes_gcm_name); + return -EINVAL; + } + + /* The key bytes come down in a bigendian array of bytes, so + * we don't need to do any byteswapping. + * 160 accounts for 16 byte key and 4 byte salt + */ + if (key_len == 160) { + *mysalt = ((u32 *)key_data)[4]; + } else if (key_len != 128) { + netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n"); + return -EINVAL; + } else { + netdev_info(dev, "IPsec hw offload parameters missing 32 bit salt value\n"); + *mysalt = 0; + } + memcpy(mykey, key_data, 16); + + return 0; +} + +/** + * ixgbe_ipsec_add_sa - program device with a security association + * @xs: pointer to transformer state struct + **/ +static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) +{ + struct net_device *dev = xs->xso.dev; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_ipsec *ipsec = adapter->ipsec; + struct ixgbe_hw *hw = &adapter->hw; + int checked, match, first; + u16 sa_idx; + int ret; + int i; + + if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { + netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n", + xs->id.proto); + return -EINVAL; + } + + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { + struct rx_sa rsa; + + if (xs->calg) { + netdev_err(dev, "Compression offload not supported\n"); + return -EINVAL; + } + + /* find the first unused index */ + ret = ixgbe_ipsec_find_empty_idx(ipsec, true); + if (ret < 0) { + netdev_err(dev, "No space for SA in Rx table!\n"); + return ret; + } + sa_idx = (u16)ret; + + memset(&rsa, 0, sizeof(rsa)); + rsa.used = true; + rsa.xs = xs; + + if (rsa.xs->id.proto & IPPROTO_ESP) + rsa.decrypt = xs->ealg || xs->aead; + + /* get the key and salt */ + ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt); + if (ret) { + netdev_err(dev, "Failed to get key data for Rx SA table\n"); + return ret; + } + + /* get ip for rx sa table */ + if (xs->props.family == AF_INET6) + memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16); + else + memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4); + + /* The HW does not have a 1:1 mapping from keys to IP addrs, so + * check for a matching IP addr entry in the table. If the addr + * already exists, use it; else find an unused slot and add the + * addr. If one does not exist and there are no unused table + * entries, fail the request. + */ + + /* Find an existing match or first not used, and stop looking + * after we've checked all we know we have. + */ + checked = 0; + match = -1; + first = -1; + for (i = 0; + i < IXGBE_IPSEC_MAX_RX_IP_COUNT && + (checked < ipsec->num_rx_sa || first < 0); + i++) { + if (ipsec->ip_tbl[i].used) { + if (!memcmp(ipsec->ip_tbl[i].ipaddr, + rsa.ipaddr, sizeof(rsa.ipaddr))) { + match = i; + break; + } + checked++; + } else if (first < 0) { + first = i; /* track the first empty seen */ + } + } + + if (ipsec->num_rx_sa == 0) + first = 0; + + if (match >= 0) { + /* addrs are the same, we should use this one */ + rsa.iptbl_ind = match; + ipsec->ip_tbl[match].ref_cnt++; + + } else if (first >= 0) { + /* no matches, but here's an empty slot */ + rsa.iptbl_ind = first; + + memcpy(ipsec->ip_tbl[first].ipaddr, + rsa.ipaddr, sizeof(rsa.ipaddr)); + ipsec->ip_tbl[first].ref_cnt = 1; + ipsec->ip_tbl[first].used = true; + + ixgbe_ipsec_set_rx_ip(hw, rsa.iptbl_ind, rsa.ipaddr); + + } else { + /* no match and no empty slot */ + netdev_err(dev, "No space for SA in Rx IP SA table\n"); + memset(&rsa, 0, sizeof(rsa)); + return -ENOSPC; + } + + rsa.mode = IXGBE_RXMOD_VALID; + if (rsa.xs->id.proto & IPPROTO_ESP) + rsa.mode |= IXGBE_RXMOD_PROTO_ESP; + if (rsa.decrypt) + rsa.mode |= IXGBE_RXMOD_DECRYPT; + if (rsa.xs->props.family == AF_INET6) + rsa.mode |= IXGBE_RXMOD_IPV6; + + /* the preparations worked, so save the info */ + memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa)); + + ixgbe_ipsec_set_rx_sa(hw, sa_idx, rsa.xs->id.spi, rsa.key, + rsa.salt, rsa.mode, rsa.iptbl_ind); + xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX; + + ipsec->num_rx_sa++; + + /* hash the new entry for faster search in Rx path */ + hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist, + rsa.xs->id.spi); + } else { + struct tx_sa tsa; + + /* find the first unused index */ + ret = ixgbe_ipsec_find_empty_idx(ipsec, false); + if (ret < 0) { + netdev_err(dev, "No space for SA in Tx table\n"); + return ret; + } + sa_idx = (u16)ret; + + memset(&tsa, 0, sizeof(tsa)); + tsa.used = true; + tsa.xs = xs; + + if (xs->id.proto & IPPROTO_ESP) + tsa.encrypt = xs->ealg || xs->aead; + + ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt); + if (ret) { + netdev_err(dev, "Failed to get key data for Tx SA table\n"); + memset(&tsa, 0, sizeof(tsa)); + return ret; + } + + /* the preparations worked, so save the info */ + memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa)); + + ixgbe_ipsec_set_tx_sa(hw, sa_idx, tsa.key, tsa.salt); + + xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX; + + ipsec->num_tx_sa++; + } + + /* enable the engine if not already warmed up */ + if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED)) { + ixgbe_ipsec_start_engine(adapter); + adapter->flags2 |= IXGBE_FLAG2_IPSEC_ENABLED; + } + + return 0; +} + +/** + * ixgbe_ipsec_del_sa - clear out this specific SA + * @xs: pointer to transformer state struct + **/ +static void ixgbe_ipsec_del_sa(struct xfrm_state *xs) +{ + struct net_device *dev = xs->xso.dev; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_ipsec *ipsec = adapter->ipsec; + struct ixgbe_hw *hw = &adapter->hw; + u32 zerobuf[4] = {0, 0, 0, 0}; + u16 sa_idx; + + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { + struct rx_sa *rsa; + u8 ipi; + + sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX; + rsa = &ipsec->rx_tbl[sa_idx]; + + if (!rsa->used) { + netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n", + sa_idx, xs->xso.offload_handle); + return; + } + + ixgbe_ipsec_set_rx_sa(hw, sa_idx, 0, zerobuf, 0, 0, 0); + hash_del_rcu(&rsa->hlist); + + /* if the IP table entry is referenced by only this SA, + * i.e. ref_cnt is only 1, clear the IP table entry as well + */ + ipi = rsa->iptbl_ind; + if (ipsec->ip_tbl[ipi].ref_cnt > 0) { + ipsec->ip_tbl[ipi].ref_cnt--; + + if (!ipsec->ip_tbl[ipi].ref_cnt) { + memset(&ipsec->ip_tbl[ipi], 0, + sizeof(struct rx_ip_sa)); + ixgbe_ipsec_set_rx_ip(hw, ipi, zerobuf); + } + } + + memset(rsa, 0, sizeof(struct rx_sa)); + ipsec->num_rx_sa--; + } else { + sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX; + + if (!ipsec->tx_tbl[sa_idx].used) { + netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n", + sa_idx, xs->xso.offload_handle); + return; + } + + ixgbe_ipsec_set_tx_sa(hw, sa_idx, zerobuf, 0); + memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa)); + ipsec->num_tx_sa--; + } + + /* if there are no SAs left, stop the engine to save energy */ + if (ipsec->num_rx_sa == 0 && ipsec->num_tx_sa == 0) { + adapter->flags2 &= ~IXGBE_FLAG2_IPSEC_ENABLED; + ixgbe_ipsec_stop_engine(adapter); + } +} + +/** + * ixgbe_ipsec_offload_ok - can this packet use the xfrm hw offload + * @skb: current data packet + * @xs: pointer to transformer state struct + **/ +static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) +{ + if (xs->props.family == AF_INET) { + /* Offload with IPv4 options is not supported yet */ + if (ip_hdr(skb)->ihl != 5) + return false; + } else { + /* Offload with IPv6 extension headers is not support yet */ + if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) + return false; + } + + return true; +} + +/** + * ixgbe_ipsec_free - called by xfrm garbage collections + * @xs: pointer to transformer state struct + * + * We don't have any garbage to collect, so we shouldn't bother + * implementing this function, but the XFRM code doesn't check for + * existence before calling the API callback. + **/ +static void ixgbe_ipsec_free(struct xfrm_state *xs) +{ +} + +static const struct xfrmdev_ops ixgbe_xfrmdev_ops = { + .xdo_dev_state_add = ixgbe_ipsec_add_sa, + .xdo_dev_state_delete = ixgbe_ipsec_del_sa, + .xdo_dev_offload_ok = ixgbe_ipsec_offload_ok, + .xdo_dev_state_free = ixgbe_ipsec_free, +}; + +/** + * ixgbe_ipsec_tx - setup Tx flags for ipsec offload + * @tx_ring: outgoing context + * @first: current data packet + * @itd: ipsec Tx data for later use in building context descriptor + **/ +int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, + struct ixgbe_tx_buffer *first, + struct ixgbe_ipsec_tx_data *itd) +{ + struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev); + struct ixgbe_ipsec *ipsec = adapter->ipsec; + struct xfrm_state *xs; + struct tx_sa *tsa; + + if (unlikely(!first->skb->sp->len)) { + netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n", + __func__, first->skb->sp->len); + return 0; + } + + xs = xfrm_input_state(first->skb); + if (unlikely(!xs)) { + netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n", + __func__, xs); + return 0; + } + + itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX; + if (unlikely(itd->sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) { + netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n", + __func__, itd->sa_idx, xs->xso.offload_handle); + return 0; + } + + tsa = &ipsec->tx_tbl[itd->sa_idx]; + if (unlikely(!tsa->used)) { + netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n", + __func__, itd->sa_idx); + return 0; + } + + first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CC; + + itd->flags = 0; + if (xs->id.proto == IPPROTO_ESP) { + itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP | + IXGBE_ADVTXD_TUCMD_L4T_TCP; + if (first->protocol == htons(ETH_P_IP)) + itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4; + itd->trailer_len = xs->props.trailer_len; + } + if (tsa->encrypt) + itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN; + + return 1; +} + +/** + * ixgbe_ipsec_rx - decode ipsec bits from Rx descriptor + * @rx_ring: receiving ring + * @rx_desc: receive data descriptor + * @skb: current data packet + * + * Determine if there was an ipsec encapsulation noticed, and if so set up + * the resulting status for later in the receive stack. + **/ +void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct ixgbe_adapter *adapter = netdev_priv(rx_ring->netdev); + __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; + __le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH | + IXGBE_RXDADV_PKTTYPE_IPSEC_ESP); + struct ixgbe_ipsec *ipsec = adapter->ipsec; + struct xfrm_offload *xo = NULL; + struct xfrm_state *xs = NULL; + struct ipv6hdr *ip6 = NULL; + struct iphdr *ip4 = NULL; + void *daddr; + __be32 spi; + u8 *c_hdr; + u8 proto; + + /* Find the ip and crypto headers in the data. + * We can assume no vlan header in the way, b/c the + * hw won't recognize the IPsec packet and anyway the + * currently vlan device doesn't support xfrm offload. + */ + if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) { + ip4 = (struct iphdr *)(skb->data + ETH_HLEN); + daddr = &ip4->daddr; + c_hdr = (u8 *)ip4 + ip4->ihl * 4; + } else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) { + ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN); + daddr = &ip6->daddr; + c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr); + } else { + return; + } + + switch (pkt_info & ipsec_pkt_types) { + case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH): + spi = ((struct ip_auth_hdr *)c_hdr)->spi; + proto = IPPROTO_AH; + break; + case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP): + spi = ((struct ip_esp_hdr *)c_hdr)->spi; + proto = IPPROTO_ESP; + break; + default: + return; + } + + xs = ixgbe_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4); + if (unlikely(!xs)) + return; + + skb->sp = secpath_dup(skb->sp); + if (unlikely(!skb->sp)) + return; + + skb->sp->xvec[skb->sp->len++] = xs; + skb->sp->olen++; + xo = xfrm_offload(skb); + xo->flags = CRYPTO_DONE; + xo->status = CRYPTO_SUCCESS; + + adapter->rx_ipsec++; +} + +/** + * ixgbe_init_ipsec_offload - initialize security registers for IPSec operation + * @adapter: board private structure + **/ +void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) +{ + struct ixgbe_ipsec *ipsec; + size_t size; + + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + return; + + ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL); + if (!ipsec) + goto err1; + hash_init(ipsec->rx_sa_list); + + size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT; + ipsec->rx_tbl = kzalloc(size, GFP_KERNEL); + if (!ipsec->rx_tbl) + goto err2; + + size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT; + ipsec->tx_tbl = kzalloc(size, GFP_KERNEL); + if (!ipsec->tx_tbl) + goto err2; + + size = sizeof(struct rx_ip_sa) * IXGBE_IPSEC_MAX_RX_IP_COUNT; + ipsec->ip_tbl = kzalloc(size, GFP_KERNEL); + if (!ipsec->ip_tbl) + goto err2; + + ipsec->num_rx_sa = 0; + ipsec->num_tx_sa = 0; + + adapter->ipsec = ipsec; + ixgbe_ipsec_stop_engine(adapter); + ixgbe_ipsec_clear_hw_tables(adapter); + + adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops; + adapter->netdev->features |= NETIF_F_HW_ESP; + adapter->netdev->hw_enc_features |= NETIF_F_HW_ESP; + + return; + +err2: + kfree(ipsec->ip_tbl); + kfree(ipsec->rx_tbl); + kfree(ipsec->tx_tbl); +err1: + kfree(adapter->ipsec); + netdev_err(adapter->netdev, "Unable to allocate memory for SA tables"); +} + +/** + * ixgbe_stop_ipsec_offload - tear down the ipsec offload + * @adapter: board private structure + **/ +void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) +{ + struct ixgbe_ipsec *ipsec = adapter->ipsec; + + adapter->ipsec = NULL; + if (ipsec) { + kfree(ipsec->ip_tbl); + kfree(ipsec->rx_tbl); + kfree(ipsec->tx_tbl); + kfree(ipsec); + } +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h new file mode 100644 index 000000000000..da3ce7849e85 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h @@ -0,0 +1,93 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program. If not, see <http://www.gnu.org/licenses/>. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_IPSEC_H_ +#define _IXGBE_IPSEC_H_ + +#define IXGBE_IPSEC_MAX_SA_COUNT 1024 +#define IXGBE_IPSEC_MAX_RX_IP_COUNT 128 +#define IXGBE_IPSEC_BASE_RX_INDEX 0 +#define IXGBE_IPSEC_BASE_TX_INDEX IXGBE_IPSEC_MAX_SA_COUNT + +#define IXGBE_RXTXIDX_IPS_EN 0x00000001 +#define IXGBE_RXIDX_TBL_SHIFT 1 +enum ixgbe_ipsec_tbl_sel { + ips_rx_ip_tbl = 0x01, + ips_rx_spi_tbl = 0x02, + ips_rx_key_tbl = 0x03, +}; + +#define IXGBE_RXTXIDX_IDX_SHIFT 3 +#define IXGBE_RXTXIDX_READ 0x40000000 +#define IXGBE_RXTXIDX_WRITE 0x80000000 + +#define IXGBE_RXMOD_VALID 0x00000001 +#define IXGBE_RXMOD_PROTO_ESP 0x00000004 +#define IXGBE_RXMOD_DECRYPT 0x00000008 +#define IXGBE_RXMOD_IPV6 0x00000010 + +struct rx_sa { + struct hlist_node hlist; + struct xfrm_state *xs; + __be32 ipaddr[4]; + u32 key[4]; + u32 salt; + u32 mode; + u8 iptbl_ind; + bool used; + bool decrypt; +}; + +struct rx_ip_sa { + __be32 ipaddr[4]; + u32 ref_cnt; + bool used; +}; + +struct tx_sa { + struct xfrm_state *xs; + u32 key[4]; + u32 salt; + bool encrypt; + bool used; +}; + +struct ixgbe_ipsec_tx_data { + u32 flags; + u16 trailer_len; + u16 sa_idx; +}; + +struct ixgbe_ipsec { + u16 num_rx_sa; + u16 num_tx_sa; + struct rx_ip_sa *ip_tbl; + struct rx_sa *rx_tbl; + struct tx_sa *tx_tbl; + DECLARE_HASHTABLE(rx_sa_list, 10); +}; +#endif /* _IXGBE_IPSEC_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 8e2a957aca18..4242f0213e46 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -46,8 +46,8 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) #endif /* IXGBE_FCOE */ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; int i; - u16 reg_idx; - u8 tcs = netdev_get_num_tc(adapter->netdev); + u16 reg_idx, pool; + u8 tcs = adapter->hw_tcs; /* verify we have DCB queueing enabled before proceeding */ if (tcs <= 1) @@ -58,12 +58,16 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) return false; /* start at VMDq register offset for SR-IOV enabled setups */ + pool = 0; reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); - for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { + for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) { /* If we are greater than indices move to next pool */ - if ((reg_idx & ~vmdq->mask) >= tcs) + if ((reg_idx & ~vmdq->mask) >= tcs) { + pool++; reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + } adapter->rx_ring[i]->reg_idx = reg_idx; + adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev; } reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); @@ -92,6 +96,7 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) for (i = fcoe->offset; i < adapter->num_rx_queues; i++) { reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; adapter->rx_ring[i]->reg_idx = reg_idx; + adapter->rx_ring[i]->netdev = adapter->netdev; reg_idx++; } @@ -111,9 +116,8 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, unsigned int *tx, unsigned int *rx) { - struct net_device *dev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; - u8 num_tcs = netdev_get_num_tc(dev); + u8 num_tcs = adapter->hw_tcs; *tx = 0; *rx = 0; @@ -168,10 +172,9 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, **/ static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) { - struct net_device *dev = adapter->netdev; + u8 num_tcs = adapter->hw_tcs; unsigned int tx_idx, rx_idx; int tc, offset, rss_i, i; - u8 num_tcs = netdev_get_num_tc(dev); /* verify we have DCB queueing enabled before proceeding */ if (num_tcs <= 1) @@ -184,6 +187,7 @@ static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) { adapter->tx_ring[offset + i]->reg_idx = tx_idx; adapter->rx_ring[offset + i]->reg_idx = rx_idx; + adapter->rx_ring[offset + i]->netdev = adapter->netdev; adapter->tx_ring[offset + i]->dcb_tc = tc; adapter->rx_ring[offset + i]->dcb_tc = tc; } @@ -208,14 +212,15 @@ static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) #endif /* IXGBE_FCOE */ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; + u16 reg_idx, pool; int i; - u16 reg_idx; /* only proceed if VMDq is enabled */ if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) return false; /* start at VMDq register offset for SR-IOV enabled setups */ + pool = 0; reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { #ifdef IXGBE_FCOE @@ -224,15 +229,20 @@ static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) break; #endif /* If we are greater than indices move to next pool */ - if ((reg_idx & ~vmdq->mask) >= rss->indices) + if ((reg_idx & ~vmdq->mask) >= rss->indices) { + pool++; reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + } adapter->rx_ring[i]->reg_idx = reg_idx; + adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev; } #ifdef IXGBE_FCOE /* FCoE uses a linear block of queues so just assigning 1:1 */ - for (; i < adapter->num_rx_queues; i++, reg_idx++) + for (; i < adapter->num_rx_queues; i++, reg_idx++) { adapter->rx_ring[i]->reg_idx = reg_idx; + adapter->rx_ring[i]->netdev = adapter->netdev; + } #endif reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); @@ -269,8 +279,10 @@ static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) { int i, reg_idx; - for (i = 0; i < adapter->num_rx_queues; i++) + for (i = 0; i < adapter->num_rx_queues; i++) { adapter->rx_ring[i]->reg_idx = i; + adapter->rx_ring[i]->netdev = adapter->netdev; + } for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++) adapter->tx_ring[i]->reg_idx = reg_idx; for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++) @@ -340,7 +352,7 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) #ifdef IXGBE_FCOE u16 fcoe_i = 0; #endif - u8 tcs = netdev_get_num_tc(adapter->netdev); + u8 tcs = adapter->hw_tcs; /* verify we have DCB queueing enabled before proceeding */ if (tcs <= 1) @@ -350,6 +362,9 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) return false; + /* limit VMDq instances on the PF by number of Tx queues */ + vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs); + /* Add starting offset to total pool count */ vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; @@ -437,7 +452,7 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) int tcs; /* Map queue offset and counts onto allocated tx queues */ - tcs = netdev_get_num_tc(dev); + tcs = adapter->hw_tcs; /* verify we have DCB queueing enabled before proceeding */ if (tcs <= 1) @@ -512,12 +527,14 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) #ifdef IXGBE_FCOE u16 fcoe_i = 0; #endif - bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); /* only proceed if SR-IOV is enabled */ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) return false; + /* limit l2fwd RSS based on total Tx queue limit */ + rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i); + /* Add starting offset to total pool count */ vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; @@ -525,7 +542,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); /* 64 pool mode with 2 queues per pool */ - if ((vmdq_i > 32) || (vmdq_i > 16 && pools)) { + if (vmdq_i > 32) { vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; rss_m = IXGBE_RSS_2Q_MASK; rss_i = min_t(u16, rss_i, 2); @@ -602,6 +619,10 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) } #endif + /* populate TC0 for use by pool 0 */ + netdev_set_tc_queue(adapter->netdev, 0, + adapter->num_rx_queues_per_pool, 0); + return true; } @@ -701,7 +722,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) adapter->num_rx_queues = 1; adapter->num_tx_queues = 1; adapter->num_xdp_queues = 0; - adapter->num_rx_pools = adapter->num_rx_queues; + adapter->num_rx_pools = 1; adapter->num_rx_queues_per_pool = 1; #ifdef CONFIG_IXGBE_DCB @@ -834,7 +855,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int node = NUMA_NO_NODE; int cpu = -1; int ring_count, size; - u8 tcs = netdev_get_num_tc(adapter->netdev); + u8 tcs = adapter->hw_tcs; ring_count = txr_count + rxr_count + xdp_count; size = sizeof(struct ixgbe_q_vector) + @@ -917,11 +938,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, /* apply Tx specific ring traits */ ring->count = adapter->tx_ring_count; - if (adapter->num_rx_pools > 1) - ring->queue_index = - txr_idx % adapter->num_rx_queues_per_pool; - else - ring->queue_index = txr_idx; + ring->queue_index = txr_idx; /* assign ring to adapter */ adapter->tx_ring[txr_idx] = ring; @@ -991,11 +1008,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, #endif /* IXGBE_FCOE */ /* apply Rx specific ring traits */ ring->count = adapter->rx_ring_count; - if (adapter->num_rx_pools > 1) - ring->queue_index = - rxr_idx % adapter->num_rx_queues_per_pool; - else - ring->queue_index = rxr_idx; + ring->queue_index = rxr_idx; /* assign ring to adapter */ adapter->rx_ring[rxr_idx] = ring; @@ -1171,7 +1184,7 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) */ /* Disable DCB unless we only have a single traffic class */ - if (netdev_get_num_tc(adapter->netdev) > 1) { + if (adapter->hw_tcs > 1) { e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n"); netdev_reset_tc(adapter->netdev); @@ -1183,6 +1196,7 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) adapter->dcb_cfg.pfc_mode_enable = false; } + adapter->hw_tcs = 0; adapter->dcb_cfg.num_tcs.pg_tcs = 1; adapter->dcb_cfg.num_tcs.pfc_tcs = 1; @@ -1268,7 +1282,7 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) } void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, - u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) + u32 fceof_saidx, u32 type_tucmd, u32 mss_l4len_idx) { struct ixgbe_adv_tx_context_desc *context_desc; u16 i = tx_ring->next_to_use; @@ -1282,7 +1296,7 @@ void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); - context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); + context_desc->fceof_saidx = cpu_to_le32(fceof_saidx); context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 62a18914f00f..0da5aa2c8aba 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -192,6 +192,13 @@ static struct workqueue_struct *ixgbe_wq; static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *); +static const struct net_device_ops ixgbe_netdev_ops; + +static bool netif_is_ixgbe(struct net_device *dev) +{ + return dev && (dev->netdev_ops == &ixgbe_netdev_ops); +} + static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, u32 reg, u16 *value) { @@ -1064,24 +1071,12 @@ static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring) static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) { - struct ixgbe_adapter *adapter; - struct ixgbe_hw *hw; - u32 head, tail; + unsigned int head, tail; - if (ring->l2_accel_priv) - adapter = ring->l2_accel_priv->real_adapter; - else - adapter = netdev_priv(ring->netdev); + head = ring->next_to_clean; + tail = ring->next_to_use; - hw = &adapter->hw; - head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx)); - tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx)); - - if (head != tail) - return (head < tail) ? - tail - head : (tail + ring->count - head); - - return 0; + return ((head <= tail) ? tail : tail + ring->count) - head; } static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) @@ -1133,6 +1128,9 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) /** * ixgbe_tx_maxrate - callback to set the maximum per-queue bitrate + * @netdev: network interface device structure + * @queue_index: Tx queue to set + * @maxrate: desired maximum transmit bitrate **/ static int ixgbe_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) @@ -1173,7 +1171,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; - unsigned int total_bytes = 0, total_packets = 0; + unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0; unsigned int budget = q_vector->tx.work_limit; unsigned int i = tx_ring->next_to_clean; @@ -1204,6 +1202,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, /* update the statistics for this packet */ total_bytes += tx_buffer->bytecount; total_packets += tx_buffer->gso_segs; + if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC) + total_ipsec++; /* free the skb */ if (ring_is_xdp(tx_ring)) @@ -1266,6 +1266,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, u64_stats_update_end(&tx_ring->syncp); q_vector->tx.total_bytes += total_bytes; q_vector->tx.total_packets += total_packets; + adapter->tx_ipsec += total_ipsec; if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { /* schedule immediate reset if we believe we hung */ @@ -1754,9 +1755,18 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); } - skb_record_rx_queue(skb, rx_ring->queue_index); + if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP)) + ixgbe_ipsec_rx(rx_ring, rx_desc, skb); skb->protocol = eth_type_trans(skb, dev); + + /* record Rx queue, or update MACVLAN statistics */ + if (netif_is_ixgbe(dev)) + skb_record_rx_queue(skb, rx_ring->queue_index); + else + macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true, + (skb->pkt_type == PACKET_BROADCAST) || + (skb->pkt_type == PACKET_MULTICAST)); } static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, @@ -1921,10 +1931,13 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, if (IS_ERR(skb)) return true; - /* verify that the packet does not have any known errors */ - if (unlikely(ixgbe_test_staterr(rx_desc, - IXGBE_RXDADV_ERR_FRAME_ERR_MASK) && - !(netdev->features & NETIF_F_RXALL))) { + /* Verify netdev is present, and that packet does not have any + * errors that would be unacceptable to the netdev. + */ + if (!netdev || + (unlikely(ixgbe_test_staterr(rx_desc, + IXGBE_RXDADV_ERR_FRAME_ERR_MASK) && + !(netdev->features & NETIF_F_RXALL)))) { dev_kfree_skb_any(skb); return true; } @@ -2021,8 +2034,8 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer) * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: buffer containing page to add - * @rx_desc: descriptor containing length of buffer written by hardware * @skb: sk_buff to place the data into + * @size: size of data in rx_buffer * * This function will add the data contained in rx_buffer->page to the skb. * This is done either through a direct copy if the data in the buffer is @@ -2318,12 +2331,14 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, #endif /* IXGBE_FCOE */ u16 cleaned_count = ixgbe_desc_unused(rx_ring); bool xdp_xmit = false; + struct xdp_buff xdp; + + xdp.rxq = &rx_ring->xdp_rxq; while (likely(total_rx_packets < budget)) { union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *rx_buffer; struct sk_buff *skb; - struct xdp_buff xdp; unsigned int size; /* return some buffers to hardware, one at a time is too slow */ @@ -2515,13 +2530,6 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); } -enum latency_range { - lowest_latency = 0, - low_latency = 1, - bulk_latency = 2, - latency_invalid = 255 -}; - /** * ixgbe_update_itr - update the dynamic ITR value based on statistics * @q_vector: structure containing interrupt and ring information @@ -2534,8 +2542,6 @@ enum latency_range { * based on theoretical maximum wire speed and thresholds were set based * on testing data as well as attempting to minimize response time * while increasing bulk throughput. - * this functionality is controlled by the InterruptThrottleRate module - * parameter (see ixgbe_param.c) **/ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, struct ixgbe_ring_container *ring_container) @@ -3020,6 +3026,8 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, /** * ixgbe_irq_enable - Enable default interrupt generation settings * @adapter: board private structure + * @queues: enable irqs for queues + * @flush: flush register write **/ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, bool flush) @@ -3475,6 +3483,7 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) /** * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts + * @adapter: board private structure * **/ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) @@ -3586,7 +3595,7 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 rttdcs, mtqc; - u8 tcs = netdev_get_num_tc(adapter->netdev); + u8 tcs = adapter->hw_tcs; if (hw->mac.type == ixgbe_mac_82598EB) return; @@ -3853,16 +3862,20 @@ static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter) u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter); struct ixgbe_hw *hw = &adapter->hw; u32 vfreta = 0; - unsigned int pf_pool = adapter->num_vfs; /* Write redirection table to HW */ for (i = 0; i < reta_entries; i++) { + u16 pool = adapter->num_rx_pools; + vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8; - if ((i & 3) == 3) { - IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool), + if ((i & 3) != 3) + continue; + + while (pool--) + IXGBE_WRITE_REG(hw, + IXGBE_PFVFRETA(i >> 2, VMDQ_P(pool)), vfreta); - vfreta = 0; - } + vfreta = 0; } } @@ -3899,13 +3912,17 @@ static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; - unsigned int pf_pool = adapter->num_vfs; int i, j; /* Fill out hash function seeds */ - for (i = 0; i < 10; i++) - IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool), - *(adapter->rss_key + i)); + for (i = 0; i < 10; i++) { + u16 pool = adapter->num_rx_pools; + + while (pool--) + IXGBE_WRITE_REG(hw, + IXGBE_PFVFRSSRK(i, VMDQ_P(pool)), + *(adapter->rss_key + i)); + } /* Fill out the redirection table */ for (i = 0, j = 0; i < 64; i++, j++) { @@ -3933,7 +3950,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) if (adapter->ring_feature[RING_F_RSS].mask) mrqc = IXGBE_MRQC_RSSEN; } else { - u8 tcs = netdev_get_num_tc(adapter->netdev); + u8 tcs = adapter->hw_tcs; if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { if (tcs > 4) @@ -3971,7 +3988,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) if ((hw->mac.type >= ixgbe_mac_X550) && (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { - unsigned int pf_pool = adapter->num_vfs; + u16 pool = adapter->num_rx_pools; /* Enable VF RSS mode */ mrqc |= IXGBE_MRQC_MULTIPLE_RSS; @@ -3981,7 +3998,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) ixgbe_setup_vfreta(adapter); vfmrqc = IXGBE_MRQC_RSSEN; vfmrqc |= rss_field; - IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc); + + while (pool--) + IXGBE_WRITE_REG(hw, + IXGBE_PFVFMRQC(VMDQ_P(pool)), + vfmrqc); } else { ixgbe_setup_reta(adapter); mrqc |= rss_field; @@ -3991,8 +4012,8 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) /** * ixgbe_configure_rscctl - enable RSC for the indicated ring - * @adapter: address of board private structure - * @index: index of ring to set + * @adapter: address of board private structure + * @ring: structure containing ring specific data **/ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring) @@ -4112,11 +4133,15 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, rxdctl &= ~0x3FFFFF; rxdctl |= 0x080420; #if (PAGE_SIZE < 8192) - } else { + /* RXDCTL.RLPML does not work on 82599 */ + } else if (hw->mac.type != ixgbe_mac_82599EB) { rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK | IXGBE_RXDCTL_RLPML_EN); - /* Limit the maximum frame size so we don't overrun the skb */ + /* Limit the maximum frame size so we don't overrun the skb. + * This can happen in SRIOV mode when the MTU of the VF is + * higher than the MTU of the PF. + */ if (ring_uses_build_skb(ring) && !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB | @@ -4144,7 +4169,7 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int rss_i = adapter->ring_feature[RING_F_RSS].indices; - u16 pool; + u16 pool = adapter->num_rx_pools; /* PSRTYPE must be initialized in non 82598 adapters */ u32 psrtype = IXGBE_PSRTYPE_TCPHDR | @@ -4161,7 +4186,7 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) else if (rss_i > 1) psrtype |= 1u << 29; - for_each_set_bit(pool, &adapter->fwd_bitmask, 32) + while (pool--) IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); } @@ -4488,8 +4513,9 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = adapter->rx_ring[i]; - if (ring->l2_accel_priv) + if (!netif_is_ixgbe(ring->netdev)) continue; + j = ring->reg_idx; vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); vlnctrl &= ~IXGBE_RXDCTL_VME; @@ -4525,8 +4551,9 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = adapter->rx_ring[i]; - if (ring->l2_accel_priv) + if (!netif_is_ixgbe(ring->netdev)) continue; + j = ring->reg_idx; vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); vlnctrl |= IXGBE_RXDCTL_VME; @@ -4846,9 +4873,11 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, return -ENOMEM; } + /** * ixgbe_write_uc_addr_list - write unicast addresses to RAR table * @netdev: network interface device structure + * @vfn: pool to associate with unicast addresses * * Writes unicast address list to the RAR table. * Returns: -ENOMEM on failure/insufficient address space @@ -5195,7 +5224,7 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb) static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - int num_tc = netdev_get_num_tc(adapter->netdev); + int num_tc = adapter->hw_tcs; int i; if (!num_tc) @@ -5218,7 +5247,7 @@ static void ixgbe_configure_pb(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int hdrm; - u8 tc = netdev_get_num_tc(adapter->netdev); + u8 tc = adapter->hw_tcs; if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) @@ -5277,29 +5306,6 @@ static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool, IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); } -static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter) -{ - struct ixgbe_adapter *adapter = vadapter->real_adapter; - int rss_i = adapter->num_rx_queues_per_pool; - struct ixgbe_hw *hw = &adapter->hw; - u16 pool = vadapter->pool; - u32 psrtype = IXGBE_PSRTYPE_TCPHDR | - IXGBE_PSRTYPE_UDPHDR | - IXGBE_PSRTYPE_IPV4HDR | - IXGBE_PSRTYPE_L2HDR | - IXGBE_PSRTYPE_IPV6HDR; - - if (hw->mac.type == ixgbe_mac_82598EB) - return; - - if (rss_i > 3) - psrtype |= 2u << 29; - else if (rss_i > 1) - psrtype |= 1u << 29; - - IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); -} - /** * ixgbe_clean_rx_ring - Free Rx Buffers per Queue * @rx_ring: ring to free buffers from @@ -5352,96 +5358,45 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) rx_ring->next_to_use = 0; } -static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter, - struct ixgbe_ring *rx_ring) -{ - struct ixgbe_adapter *adapter = vadapter->real_adapter; - int index = rx_ring->queue_index + vadapter->rx_base_queue; - - /* shutdown specific queue receive and wait for dma to settle */ - ixgbe_disable_rx_queue(adapter, rx_ring); - usleep_range(10000, 20000); - ixgbe_irq_disable_queues(adapter, BIT_ULL(index)); - ixgbe_clean_rx_ring(rx_ring); - rx_ring->l2_accel_priv = NULL; -} - -static int ixgbe_fwd_ring_down(struct net_device *vdev, - struct ixgbe_fwd_adapter *accel) -{ - struct ixgbe_adapter *adapter = accel->real_adapter; - unsigned int rxbase = accel->rx_base_queue; - unsigned int txbase = accel->tx_base_queue; - int i; - - netif_tx_stop_all_queues(vdev); - - for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { - ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); - adapter->rx_ring[rxbase + i]->netdev = adapter->netdev; - } - - for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { - adapter->tx_ring[txbase + i]->l2_accel_priv = NULL; - adapter->tx_ring[txbase + i]->netdev = adapter->netdev; - } - - - return 0; -} - static int ixgbe_fwd_ring_up(struct net_device *vdev, struct ixgbe_fwd_adapter *accel) { struct ixgbe_adapter *adapter = accel->real_adapter; - unsigned int rxbase, txbase, queues; - int i, baseq, err = 0; + int i, baseq, err; - if (!test_bit(accel->pool, &adapter->fwd_bitmask)) + if (!test_bit(accel->pool, adapter->fwd_bitmask)) return 0; baseq = accel->pool * adapter->num_rx_queues_per_pool; - netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", + netdev_dbg(vdev, "pool %i:%i queues %i:%i\n", accel->pool, adapter->num_rx_pools, - baseq, baseq + adapter->num_rx_queues_per_pool, - adapter->fwd_bitmask); + baseq, baseq + adapter->num_rx_queues_per_pool); accel->netdev = vdev; - accel->rx_base_queue = rxbase = baseq; - accel->tx_base_queue = txbase = baseq; + accel->rx_base_queue = baseq; + accel->tx_base_queue = baseq; for (i = 0; i < adapter->num_rx_queues_per_pool; i++) - ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); + adapter->rx_ring[baseq + i]->netdev = vdev; - for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { - adapter->rx_ring[rxbase + i]->netdev = vdev; - adapter->rx_ring[rxbase + i]->l2_accel_priv = accel; - ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]); - } + /* Guarantee all rings are updated before we update the + * MAC address filter. + */ + wmb(); - for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { - adapter->tx_ring[txbase + i]->netdev = vdev; - adapter->tx_ring[txbase + i]->l2_accel_priv = accel; + /* ixgbe_add_mac_filter will return an index if it succeeds, so we + * need to only treat it as an error value if it is negative. + */ + err = ixgbe_add_mac_filter(adapter, vdev->dev_addr, + VMDQ_P(accel->pool)); + if (err >= 0) { + ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter); + return 0; } - queues = min_t(unsigned int, - adapter->num_rx_queues_per_pool, vdev->num_tx_queues); - err = netif_set_real_num_tx_queues(vdev, queues); - if (err) - goto fwd_queue_err; - - err = netif_set_real_num_rx_queues(vdev, queues); - if (err) - goto fwd_queue_err; - - if (is_valid_ether_addr(vdev->dev_addr)) - ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool); + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) + adapter->rx_ring[baseq + i]->netdev = NULL; - ixgbe_fwd_psrtype(accel); - ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter); - return err; -fwd_queue_err: - ixgbe_fwd_ring_down(vdev, accel); return err; } @@ -5480,6 +5435,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) ixgbe_set_rx_mode(adapter->netdev); ixgbe_restore_vlan(adapter); + ixgbe_ipsec_restore(adapter); switch (hw->mac.type) { case ixgbe_mac_82599EB: @@ -5917,21 +5873,6 @@ static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter) spin_unlock(&adapter->fdir_perfect_lock); } -static int ixgbe_disable_macvlan(struct net_device *upper, void *data) -{ - if (netif_is_macvlan(upper)) { - struct macvlan_dev *vlan = netdev_priv(upper); - - if (vlan->fwd_priv) { - netif_tx_stop_all_queues(upper); - netif_carrier_off(upper); - netif_tx_disable(upper); - } - } - - return 0; -} - void ixgbe_down(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -5961,10 +5902,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter) netif_carrier_off(netdev); netif_tx_disable(netdev); - /* disable any upper devices */ - netdev_walk_all_upper_dev_rcu(adapter->netdev, - ixgbe_disable_macvlan, NULL); - ixgbe_irq_disable(adapter); ixgbe_napi_disable_all(adapter); @@ -6119,6 +6056,7 @@ static void ixgbe_init_dcb(struct ixgbe_adapter *adapter) /** * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) * @adapter: board private structure to initialize + * @ii: pointer to ixgbe_info for device * * ixgbe_sw_init initializes the Adapter private data structure. * Fields are initialized based on PCI device information and @@ -6153,6 +6091,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus()); adapter->ring_feature[RING_F_FDIR].limit = fdir; adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; + adapter->ring_feature[RING_F_VMDQ].limit = 1; #ifdef CONFIG_IXGBE_DCA adapter->flags |= IXGBE_FLAG_DCA_CAPABLE; #endif @@ -6302,7 +6241,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, } /* PF holds first pool slot */ - set_bit(0, &adapter->fwd_bitmask); + set_bit(0, adapter->fwd_bitmask); set_bit(__IXGBE_DOWN, &adapter->state); return 0; @@ -6402,6 +6341,7 @@ err_setup_tx: /** * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: pointer to ixgbe_adapter * @rx_ring: rx descriptor ring (for a specific queue) to setup * * Returns 0 on success, negative on failure @@ -6444,6 +6384,11 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; + /* XDP RX-queue info */ + if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, + rx_ring->queue_index) < 0) + goto err; + rx_ring->xdp_prog = adapter->xdp_prog; return 0; @@ -6541,6 +6486,7 @@ void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring) ixgbe_clean_rx_ring(rx_ring); rx_ring->xdp_prog = NULL; + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; @@ -6646,20 +6592,12 @@ int ixgbe_open(struct net_device *netdev) goto err_req_irq; /* Notify the stack of the actual queue counts. */ - if (adapter->num_rx_pools > 1) - queues = adapter->num_rx_queues_per_pool; - else - queues = adapter->num_tx_queues; - + queues = adapter->num_tx_queues; err = netif_set_real_num_tx_queues(netdev, queues); if (err) goto err_set_queues; - if (adapter->num_rx_pools > 1 && - adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES) - queues = IXGBE_MAX_L2A_QUEUES; - else - queues = adapter->num_rx_queues; + queues = adapter->num_rx_queues; err = netif_set_real_num_rx_queues(netdev, queues); if (err) goto err_set_queues; @@ -6783,7 +6721,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; - u32 ctrl, fctrl; + u32 ctrl; u32 wufc = adapter->wol; #ifdef CONFIG_PM int retval = 0; @@ -6808,18 +6746,18 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) hw->mac.ops.stop_link_on_d3(hw); if (wufc) { + u32 fctrl; + ixgbe_set_rx_mode(netdev); /* enable the optics for 82599 SFP+ fiber as we can WoL */ if (hw->mac.ops.enable_tx_laser) hw->mac.ops.enable_tx_laser(hw); - /* turn on all-multi mode if wake on multicast is enabled */ - if (wufc & IXGBE_WUFC_MC) { - fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); - fctrl |= IXGBE_FCTRL_MPE; - IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); - } + /* enable the reception of multicast packets */ + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl |= IXGBE_FCTRL_MPE; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); ctrl |= IXGBE_CTRL_GIO_DIS; @@ -7216,7 +7154,6 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) /** * ixgbe_watchdog_update_link - update the link status * @adapter: pointer to the device adapter structure - * @link_speed: pointer to a u32 to store the link_speed **/ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) { @@ -7273,18 +7210,6 @@ static void ixgbe_update_default_up(struct ixgbe_adapter *adapter) #endif } -static int ixgbe_enable_macvlan(struct net_device *upper, void *data) -{ - if (netif_is_macvlan(upper)) { - struct macvlan_dev *vlan = netdev_priv(upper); - - if (vlan->fwd_priv) - netif_tx_wake_all_queues(upper); - } - - return 0; -} - /** * ixgbe_watchdog_link_is_up - update netif_carrier status and * print link up message @@ -7338,6 +7263,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) case IXGBE_LINK_SPEED_10GB_FULL: speed_str = "10 Gbps"; break; + case IXGBE_LINK_SPEED_5GB_FULL: + speed_str = "5 Gbps"; + break; case IXGBE_LINK_SPEED_2_5GB_FULL: speed_str = "2.5 Gbps"; break; @@ -7365,12 +7293,6 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) /* enable transmits */ netif_tx_wake_all_queues(adapter->netdev); - /* enable any upper devices */ - rtnl_lock(); - netdev_walk_all_upper_dev_rcu(adapter->netdev, - ixgbe_enable_macvlan, NULL); - rtnl_unlock(); - /* update the default user priority for VFs */ ixgbe_update_default_up(adapter); @@ -7655,6 +7577,7 @@ sfp_out: static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; + u32 cap_speed; u32 speed; bool autoneg = false; @@ -7667,16 +7590,14 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; - speed = hw->phy.autoneg_advertised; - if ((!speed) && (hw->mac.ops.get_link_capabilities)) { - hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg); + hw->mac.ops.get_link_capabilities(hw, &cap_speed, &autoneg); - /* setup the highest link when no autoneg */ - if (!autoneg) { - if (speed & IXGBE_LINK_SPEED_10GB_FULL) - speed = IXGBE_LINK_SPEED_10GB_FULL; - } - } + /* advertise highest capable link speed */ + if (!autoneg && (cap_speed & IXGBE_LINK_SPEED_10GB_FULL)) + speed = IXGBE_LINK_SPEED_10GB_FULL; + else + speed = cap_speed & (IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL); if (hw->mac.ops.setup_link) hw->mac.ops.setup_link(hw, speed, true); @@ -7688,7 +7609,7 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) /** * ixgbe_service_timer - Timer Call-back - * @data: pointer to adapter cast into an unsigned long + * @t: pointer to timer_list structure **/ static void ixgbe_service_timer(struct timer_list *t) { @@ -7888,10 +7809,12 @@ static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb) } static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, - struct ixgbe_tx_buffer *first) + struct ixgbe_tx_buffer *first, + struct ixgbe_ipsec_tx_data *itd) { struct sk_buff *skb = first->skb; u32 vlan_macip_lens = 0; + u32 fceof_saidx = 0; u32 type_tucmd = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) { @@ -7932,7 +7855,12 @@ no_csum: vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; - ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 0); + if (first->tx_flags & IXGBE_TX_FLAGS_IPSEC) { + fceof_saidx |= itd->sa_idx; + type_tucmd |= itd->flags | itd->trailer_len; + } + + ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, 0); } #define IXGBE_SET_FLAG(_input, _flag, _result) \ @@ -7975,11 +7903,16 @@ static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, IXGBE_TX_FLAGS_CSUM, IXGBE_ADVTXD_POPTS_TXSM); - /* enble IPv4 checksum for TSO */ + /* enable IPv4 checksum for TSO */ olinfo_status |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_IPV4, IXGBE_ADVTXD_POPTS_IXSM); + /* enable IPsec */ + olinfo_status |= IXGBE_SET_FLAG(tx_flags, + IXGBE_TX_FLAGS_IPSEC, + IXGBE_ADVTXD_POPTS_IPSEC); + /* * Check Context must be set if Tx switch is enabled, which it * always is for case where virtual functions are running @@ -8332,14 +8265,19 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) { struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; -#ifdef IXGBE_FCOE struct ixgbe_adapter *adapter; - struct ixgbe_ring_feature *f; int txq; +#ifdef IXGBE_FCOE + struct ixgbe_ring_feature *f; #endif - if (fwd_adapter) - return skb->queue_mapping + fwd_adapter->tx_base_queue; + if (fwd_adapter) { + adapter = netdev_priv(dev); + txq = reciprocal_scale(skb_get_hash(skb), + adapter->num_rx_queues_per_pool); + + return txq + fwd_adapter->tx_base_queue; + } #ifdef IXGBE_FCOE @@ -8438,6 +8376,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, u32 tx_flags = 0; unsigned short f; u16 count = TXD_USE_COUNT(skb_headlen(skb)); + struct ixgbe_ipsec_tx_data ipsec_tx = { 0 }; __be16 protocol = skb->protocol; u8 hdr_len = 0; @@ -8542,11 +8481,16 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, } #endif /* IXGBE_FCOE */ + +#ifdef CONFIG_XFRM_OFFLOAD + if (skb->sp && !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx)) + goto out_drop; +#endif tso = ixgbe_tso(tx_ring, first, &hdr_len); if (tso < 0) goto out_drop; else if (!tso) - ixgbe_tx_csum(tx_ring, first); + ixgbe_tx_csum(tx_ring, first, &ipsec_tx); /* add the ATR filter if ATR is on */ if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) @@ -8671,7 +8615,7 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) /** * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding * netdev->dev_addrs - * @netdev: network interface device structure + * @dev: network interface device structure * * Returns non-zero on failure **/ @@ -8695,7 +8639,7 @@ static int ixgbe_add_sanmac_netdev(struct net_device *dev) /** * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding * netdev->dev_addrs - * @netdev: network interface device structure + * @dev: network interface device structure * * Returns non-zero on failure **/ @@ -8862,14 +8806,13 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter) /** * ixgbe_setup_tc - configure net_device for multiple traffic classes * - * @netdev: net device to configure + * @dev: net device to configure * @tc: number of traffic classes to enable */ int ixgbe_setup_tc(struct net_device *dev, u8 tc) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; - bool pools; /* Hardware supports up to 8 traffic classes */ if (tc > adapter->dcb_cfg.num_tcs.pg_tcs) @@ -8878,10 +8821,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS) return -EINVAL; - pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); - if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS) - return -EBUSY; - /* Hardware has to reinitialize queues and interrupts to * match packet buffer alignment. Unfortunately, the * hardware is not flexible enough to do this dynamically. @@ -8898,6 +8837,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) netdev_set_num_tc(dev, tc); ixgbe_set_prio_tc_map(adapter); + adapter->hw_tcs = tc; adapter->flags |= IXGBE_FLAG_DCB_ENABLED; if (adapter->hw.mac.type == ixgbe_mac_82598EB) { @@ -8907,10 +8847,19 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) } else { netdev_reset_tc(dev); + /* To support macvlan offload we have to use num_tc to + * restrict the queues that can be used by the device. + * By doing this we can avoid reporting a false number of + * queues. + */ + if (!tc && adapter->num_rx_pools > 1) + netdev_set_num_tc(dev, 1); + if (adapter->hw.mac.type == ixgbe_mac_82598EB) adapter->hw.fc.requested_mode = adapter->last_lfc_mode; adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; + adapter->hw_tcs = tc; adapter->temp_dcb_cfg.pfc_mode_enable = false; adapter->dcb_cfg.pfc_mode_enable = false; @@ -9044,6 +8993,7 @@ static int get_macvlan_queue(struct net_device *upper, void *_data) static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex, u8 *queue, u64 *action) { + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; unsigned int num_vfs = adapter->num_vfs, vf; struct upper_walk_data data; struct net_device *upper; @@ -9052,11 +9002,7 @@ static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex, for (vf = 0; vf < num_vfs; ++vf) { upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev); if (upper->ifindex == ifindex) { - if (adapter->num_rx_pools > 1) - *queue = vf * 2; - else - *queue = vf * adapter->num_rx_queues_per_pool; - + *queue = vf * __ALIGN_MASK(1, ~vmdq->mask); *action = vf + 1; *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; return 0; @@ -9101,9 +9047,11 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter, /* Redirect to a VF or a offloaded macvlan */ if (is_tcf_mirred_egress_redirect(a)) { - int ifindex = tcf_mirred_ifindex(a); + struct net_device *dev = tcf_mirred_dev(a); - err = handle_redirect_action(adapter, ifindex, queue, + if (!dev) + return -EINVAL; + err = handle_redirect_action(adapter, dev->ifindex, queue, action); if (err == 0) return err; @@ -9362,9 +9310,6 @@ free_jump: static int ixgbe_setup_tc_cls_u32(struct ixgbe_adapter *adapter, struct tc_cls_u32_offload *cls_u32) { - if (cls_u32->common.chain_index) - return -EOPNOTSUPP; - switch (cls_u32->command) { case TC_CLSU32_NEW_KNODE: case TC_CLSU32_REPLACE_KNODE: @@ -9386,7 +9331,7 @@ static int ixgbe_setup_tc_block_cb(enum tc_setup_type type, void *type_data, { struct ixgbe_adapter *adapter = cb_priv; - if (!tc_can_offload(adapter->netdev)) + if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) return -EOPNOTSUPP; switch (type) { @@ -9444,7 +9389,7 @@ void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter) struct net_device *netdev = adapter->netdev; rtnl_lock(); - ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev)); + ixgbe_setup_tc(netdev, adapter->hw_tcs); rtnl_unlock(); } @@ -9520,7 +9465,7 @@ static int ixgbe_set_features(struct net_device *netdev, /* We cannot enable ATR if SR-IOV is enabled */ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED || /* We cannot enable ATR if we have 2 or more tcs */ - (netdev_get_num_tc(netdev) > 1) || + (adapter->hw_tcs > 1) || /* We cannot enable ATR if RSS is disabled */ (adapter->ring_feature[RING_F_RSS].limit <= 1) || /* A sample rate of 0 indicates ATR disabled */ @@ -9695,8 +9640,8 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], /** * ixgbe_configure_bridge_mode - set various bridge modes - * @adapter - the private structure - * @mode - requested bridge mode + * @adapter: the private structure + * @mode: requested bridge mode * * Configure some settings require for various bridge modes. **/ @@ -9821,6 +9766,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) struct ixgbe_fwd_adapter *fwd_adapter = NULL; struct ixgbe_adapter *adapter = netdev_priv(pdev); int used_pools = adapter->num_vfs + adapter->num_rx_pools; + int tcs = adapter->hw_tcs ? : 1; unsigned int limit; int pool, err; @@ -9831,24 +9777,8 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) if (used_pools >= IXGBE_MAX_VF_FUNCTIONS) return ERR_PTR(-EINVAL); -#ifdef CONFIG_RPS - if (vdev->num_rx_queues != vdev->num_tx_queues) { - netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n", - vdev->name); - return ERR_PTR(-EINVAL); - } -#endif - /* Check for hardware restriction on number of rx/tx queues */ - if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES || - vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) { - netdev_info(pdev, - "%s: Supports RX/TX Queue counts 1,2, and 4\n", - pdev->name); - return ERR_PTR(-EINVAL); - } - if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && - adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) || + adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) || (adapter->num_rx_pools > IXGBE_MAX_MACVLANS)) return ERR_PTR(-EBUSY); @@ -9856,53 +9786,68 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) if (!fwd_adapter) return ERR_PTR(-ENOMEM); - pool = find_first_zero_bit(&adapter->fwd_bitmask, 32); - adapter->num_rx_pools++; - set_bit(pool, &adapter->fwd_bitmask); - limit = find_last_bit(&adapter->fwd_bitmask, 32); + pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools); + set_bit(pool, adapter->fwd_bitmask); + limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools + 1); /* Enable VMDq flag so device will be set in VM mode */ adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED; adapter->ring_feature[RING_F_VMDQ].limit = limit + 1; - adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues; - /* Force reinit of ring allocation with VMDQ enabled */ - err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev)); - if (err) - goto fwd_add_err; fwd_adapter->pool = pool; fwd_adapter->real_adapter = adapter; - if (netif_running(pdev)) { + /* Force reinit of ring allocation with VMDQ enabled */ + err = ixgbe_setup_tc(pdev, adapter->hw_tcs); + + if (!err && netif_running(pdev)) err = ixgbe_fwd_ring_up(vdev, fwd_adapter); - if (err) - goto fwd_add_err; - netif_tx_start_all_queues(vdev); - } - return fwd_adapter; -fwd_add_err: + if (!err) + return fwd_adapter; + /* unwind counter and free adapter struct */ netdev_info(pdev, "%s: dfwd hardware acceleration failed\n", vdev->name); - clear_bit(pool, &adapter->fwd_bitmask); - adapter->num_rx_pools--; + clear_bit(pool, adapter->fwd_bitmask); kfree(fwd_adapter); return ERR_PTR(err); } static void ixgbe_fwd_del(struct net_device *pdev, void *priv) { - struct ixgbe_fwd_adapter *fwd_adapter = priv; - struct ixgbe_adapter *adapter = fwd_adapter->real_adapter; - unsigned int limit; + struct ixgbe_fwd_adapter *accel = priv; + struct ixgbe_adapter *adapter = accel->real_adapter; + unsigned int rxbase = accel->rx_base_queue; + unsigned int limit, i; + + /* delete unicast filter associated with offloaded interface */ + ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr, + VMDQ_P(accel->pool)); + + /* disable ability to receive packets for this pool */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(accel->pool), 0); - clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask); - adapter->num_rx_pools--; + /* Allow remaining Rx packets to get flushed out of the + * Rx FIFO before we drop the netdev for the ring. + */ + usleep_range(10000, 20000); - limit = find_last_bit(&adapter->fwd_bitmask, 32); + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { + struct ixgbe_ring *ring = adapter->rx_ring[rxbase + i]; + struct ixgbe_q_vector *qv = ring->q_vector; + + /* Make sure we aren't processing any packets and clear + * netdev to shut down the ring. + */ + if (netif_running(adapter->netdev)) + napi_synchronize(&qv->napi); + ring->netdev = NULL; + } + + clear_bit(accel->pool, adapter->fwd_bitmask); + limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools); adapter->ring_feature[RING_F_VMDQ].limit = limit + 1; - ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter); /* go back to full RSS if we're done with our VMQs */ if (adapter->ring_feature[RING_F_VMDQ].limit == 1) { @@ -9914,13 +9859,13 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv) adapter->ring_feature[RING_F_RSS].limit = rss; } - ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev)); - netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", - fwd_adapter->pool, adapter->num_rx_pools, - fwd_adapter->rx_base_queue, - fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool, - adapter->fwd_bitmask); - kfree(fwd_adapter); + ixgbe_setup_tc(pdev, adapter->hw_tcs); + netdev_dbg(pdev, "pool %i:%i queues %i:%i\n", + accel->pool, adapter->num_rx_pools, + accel->rx_base_queue, + accel->rx_base_queue + + adapter->num_rx_queues_per_pool); + kfree(accel); } #define IXGBE_MAX_MAC_HDR_LEN 127 @@ -9954,6 +9899,12 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) features &= ~NETIF_F_TSO; +#ifdef CONFIG_XFRM_OFFLOAD + /* IPsec offload doesn't get along well with others *yet* */ + if (skb->sp) + features &= ~(NETIF_F_TSO | NETIF_F_HW_CSUM); +#endif + return features; } @@ -9987,7 +9938,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) /* If transitioning XDP modes reconfigure rings */ if (!!prog != !!old_prog) { - int err = ixgbe_setup_tc(dev, netdev_get_num_tc(dev)); + int err = ixgbe_setup_tc(dev, adapter->hw_tcs); if (err) { rcu_assign_pointer(adapter->xdp_prog, old_prog); @@ -10164,7 +10115,7 @@ static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter) * ixgbe_wol_supported - Check whether device supports WoL * @adapter: the adapter private structure * @device_id: the device ID - * @subdev_id: the subsystem device ID + * @subdevice_id: the subsystem device ID * * This function is used by probe and ethtool to determine * which devices have WoL support @@ -10233,6 +10184,41 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, } /** + * ixgbe_set_fw_version - Set FW version + * @adapter: the adapter private structure + * + * This function is used by probe and ethtool to determine the FW version to + * format to display. The FW version is taken from the EEPROM/NVM. + */ +static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_nvm_version nvm_ver; + + ixgbe_get_oem_prod_version(hw, &nvm_ver); + if (nvm_ver.oem_valid) { + snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), + "%x.%x.%x", nvm_ver.oem_major, nvm_ver.oem_minor, + nvm_ver.oem_release); + return; + } + + ixgbe_get_etk_id(hw, &nvm_ver); + ixgbe_get_orom_version(hw, &nvm_ver); + + if (nvm_ver.or_valid) { + snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), + "0x%08x, %d.%d.%d", nvm_ver.etk_id, nvm_ver.or_major, + nvm_ver.or_build, nvm_ver.or_patch); + return; + } + + /* Set ETrack ID format */ + snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), + "0x%08x", nvm_ver.etk_id); +} + +/** * ixgbe_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in ixgbe_pci_tbl @@ -10508,6 +10494,7 @@ skip_sriov: NETIF_F_FCOE_MTU; } #endif /* IXGBE_FCOE */ + ixgbe_init_ipsec_offload(adapter); if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) netdev->hw_features |= NETIF_F_LRO; @@ -10568,8 +10555,7 @@ skip_sriov: device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); /* save off EEPROM version number */ - hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh); - hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl); + ixgbe_set_fw_version(adapter); /* pick up the PCI bus settings for reporting later */ if (ixgbe_pcie_from_parent(hw)) @@ -10744,6 +10730,7 @@ static void ixgbe_remove(struct pci_dev *pdev) if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev); + ixgbe_stop_ipsec_offload(adapter); ixgbe_clear_interrupt_scheme(adapter); ixgbe_release_hw_control(adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 654a402f0e9e..91bde90f9265 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -378,7 +378,7 @@ static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw) /** * ixgbe_get_phy_type_from_id - Get the phy type - * @hw: pointer to hardware structure + * @phy_id: hardware phy id * **/ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) @@ -489,6 +489,7 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) * the SWFW lock * @hw: pointer to hardware structure * @reg_addr: 32 bit address of PHY register to read + * @device_type: 5 bit device type * @phy_data: Pointer to read data from PHY register **/ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, @@ -564,6 +565,7 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, * using the SWFW lock - this function is needed in most cases * @hw: pointer to hardware structure * @reg_addr: 32 bit address of PHY register to read + * @device_type: 5 bit device type * @phy_data: Pointer to read data from PHY register **/ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, @@ -763,6 +765,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities * @hw: pointer to hardware structure * @speed: new link speed + * @autoneg_wait_to_complete: unused **/ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, ixgbe_link_speed speed, @@ -861,6 +864,8 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, /** * ixgbe_check_phy_link_tnx - Determine link and speed status * @hw: pointer to hardware structure + * @speed: link speed + * @link_up: status of link * * Reads the VS1 register to determine if link is up and the current speed for * the PHY. @@ -1667,7 +1672,7 @@ s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, * ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface * @hw: pointer to hardware structure * @byte_offset: byte offset at address 0xA2 - * @eeprom_data: value read + * @sff8472_data: value read * * Performs byte read operation to SFP module's SFF-8472 data over I2C **/ @@ -1714,6 +1719,7 @@ static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr) * ixgbe_read_i2c_byte_generic_int - Reads 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to read + * @dev_addr: device address * @data: value read * @lock: true if to take and release semaphore * @@ -1804,6 +1810,7 @@ fail: * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to read + * @dev_addr: device address * @data: value read * * Performs byte read operation to SFP module's EEPROM over I2C interface at @@ -1820,6 +1827,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, * ixgbe_read_i2c_byte_generic_unlocked - Reads 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to read + * @dev_addr: device address * @data: value read * * Performs byte read operation to SFP module's EEPROM over I2C interface at @@ -1836,6 +1844,7 @@ s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, * ixgbe_write_i2c_byte_generic_int - Writes 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to write + * @dev_addr: device address * @data: value to write * @lock: true if to take and release semaphore * @@ -1904,6 +1913,7 @@ fail: * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to write + * @dev_addr: device address * @data: value to write * * Performs byte write operation to SFP module's EEPROM over I2C interface at @@ -1920,6 +1930,7 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, * ixgbe_write_i2c_byte_generic_unlocked - Writes 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to write + * @dev_addr: device address * @data: value to write * * Performs byte write operation to SFP module's EEPROM over I2C interface at diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index ae312c45696a..f6cc9166082a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -166,7 +166,7 @@ /** * ixgbe_ptp_setup_sdp_x540 - * @hw: the hardware private structure + * @adapter: private adapter structure * * this function enables or disables the clock out feature on SDP0 for * the X540 device. It will create a 1second periodic output that can @@ -299,7 +299,7 @@ static u64 ixgbe_ptp_read_82599(const struct cyclecounter *cc) * ixgbe_ptp_convert_to_hwtstamp - convert register value to hw timestamp * @adapter: private adapter structure * @hwtstamp: stack timestamp structure - * @systim: unsigned 64bit system time value + * @timestamp: unsigned 64bit system time value * * We need to convert the adapter's RX/TXSTMP registers into a hwtstamp value * which can be used by the stack's ptp functions. @@ -1015,7 +1015,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, /** * ixgbe_ptp_set_ts_config - user entry point for timestamp mode * @adapter: pointer to adapter struct - * @ifreq: ioctl data + * @ifr: ioctl data * * Set hardware to requested mode. If unsupported, return an error with no * changes. Otherwise, store the mode for future reference. @@ -1338,7 +1338,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter) /** * ixgbe_ptp_suspend - stop PTP work items - * @ adapter: pointer to adapter struct + * @adapter: pointer to adapter struct * * this function suspends PTP activity, and prevents more PTP work from being * generated, but does not destroy the PTP clock device. diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 112d24c6c9ce..27a70a52f3c9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -78,12 +78,9 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter, struct ixgbe_hw *hw = &adapter->hw; int i; - adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; - /* Enable VMDq flag so device will be set in VM mode */ - adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED; - if (!adapter->ring_feature[RING_F_VMDQ].limit) - adapter->ring_feature[RING_F_VMDQ].limit = 1; + adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED | + IXGBE_FLAG_VMDQ_ENABLED; /* Allocate memory for per VF control structures */ adapter->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage), @@ -227,9 +224,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs) int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) { unsigned int num_vfs = adapter->num_vfs, vf; - struct ixgbe_hw *hw = &adapter->hw; - u32 gpie; - u32 vmdctl; int rss; /* set num VFs to 0 to prevent access to vfinfo */ @@ -271,18 +265,6 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) pci_disable_sriov(adapter->pdev); #endif - /* turn off device IOV mode */ - IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0); - gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); - gpie &= ~IXGBE_GPIE_VTMODE_MASK; - IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); - - /* set default pool back to 0 */ - vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); - vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; - IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); - IXGBE_WRITE_FLUSH(hw); - /* Disable VMDq flag so device will be set in VM mode */ if (adapter->ring_feature[RING_F_VMDQ].limit == 1) { adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; @@ -305,10 +287,9 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) { #ifdef CONFIG_PCI_IOV struct ixgbe_adapter *adapter = pci_get_drvdata(dev); - int err = 0; - u8 num_tc; - int i; int pre_existing_vfs = pci_num_vf(dev); + int err = 0, num_rx_pools, i, limit; + u8 num_tc; if (pre_existing_vfs && pre_existing_vfs != num_vfs) err = ixgbe_disable_sriov(adapter); @@ -330,23 +311,15 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) * than we have available pools. The PCI bus driver already checks for * other values out of range. */ - num_tc = netdev_get_num_tc(adapter->netdev); - - if (num_tc > 4) { - if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_8TC) { - e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_8TC); - return -EPERM; - } - } else if ((num_tc > 1) && (num_tc <= 4)) { - if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_4TC) { - e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_4TC); - return -EPERM; - } - } else { - if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_1TC) { - e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_1TC); - return -EPERM; - } + num_tc = adapter->hw_tcs; + num_rx_pools = adapter->num_rx_pools; + limit = (num_tc > 4) ? IXGBE_MAX_VFS_8TC : + (num_tc > 1) ? IXGBE_MAX_VFS_4TC : IXGBE_MAX_VFS_1TC; + + if (num_vfs > (limit - num_rx_pools)) { + e_dev_err("Currently configured with %d TCs, and %d offloaded macvlans. Creating more than %d VFs is not allowed\n", + num_tc, num_rx_pools - 1, limit - num_rx_pools); + return -EPERM; } err = __ixgbe_enable_sriov(adapter, num_vfs); @@ -378,13 +351,15 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev) int err; #ifdef CONFIG_PCI_IOV u32 current_flags = adapter->flags; + int prev_num_vf = pci_num_vf(dev); #endif err = ixgbe_disable_sriov(adapter); /* Only reinit if no error and state changed */ #ifdef CONFIG_PCI_IOV - if (!err && current_flags != adapter->flags) + if (!err && (current_flags != adapter->flags || + prev_num_vf != pci_num_vf(dev))) ixgbe_sriov_reinit(adapter); #endif @@ -738,7 +713,7 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; - u8 num_tcs = netdev_get_num_tc(adapter->netdev); + u8 num_tcs = adapter->hw_tcs; /* remove VLAN filters beloning to this VF */ ixgbe_clear_vf_vlans(adapter, vf); @@ -946,7 +921,7 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, { u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); - u8 tcs = netdev_get_num_tc(adapter->netdev); + u8 tcs = adapter->hw_tcs; if (adapter->vfinfo[vf].pf_vlan || tcs) { e_warn(drv, @@ -1034,7 +1009,7 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, struct net_device *dev = adapter->netdev; struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; unsigned int default_tc = 0; - u8 num_tcs = netdev_get_num_tc(dev); + u8 num_tcs = adapter->hw_tcs; /* verify the PF is supporting the correct APIs */ switch (adapter->vfinfo[vf].vf_api) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index ffa0ee5cd0f5..ca45359686d3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -235,6 +235,45 @@ struct ixgbe_thermal_sensor_data { struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS]; }; +#define NVM_OROM_OFFSET 0x17 +#define NVM_OROM_BLK_LOW 0x83 +#define NVM_OROM_BLK_HI 0x84 +#define NVM_OROM_PATCH_MASK 0xFF +#define NVM_OROM_SHIFT 8 + +#define NVM_VER_MASK 0x00FF /* version mask */ +#define NVM_VER_SHIFT 8 /* version bit shift */ +#define NVM_OEM_PROD_VER_PTR 0x1B /* OEM Product version block pointer */ +#define NVM_OEM_PROD_VER_CAP_OFF 0x1 /* OEM Product version format offset */ +#define NVM_OEM_PROD_VER_OFF_L 0x2 /* OEM Product version offset low */ +#define NVM_OEM_PROD_VER_OFF_H 0x3 /* OEM Product version offset high */ +#define NVM_OEM_PROD_VER_CAP_MASK 0xF /* OEM Product version cap mask */ +#define NVM_OEM_PROD_VER_MOD_LEN 0x3 /* OEM Product version module length */ +#define NVM_ETK_OFF_LOW 0x2D /* version low order word */ +#define NVM_ETK_OFF_HI 0x2E /* version high order word */ +#define NVM_ETK_SHIFT 16 /* high version word shift */ +#define NVM_VER_INVALID 0xFFFF +#define NVM_ETK_VALID 0x8000 +#define NVM_INVALID_PTR 0xFFFF +#define NVM_VER_SIZE 32 /* version sting size */ + +struct ixgbe_nvm_version { + u32 etk_id; + u8 nvm_major; + u16 nvm_minor; + u8 nvm_id; + + bool oem_valid; + u8 oem_major; + u8 oem_minor; + u16 oem_release; + + bool or_valid; + u8 or_major; + u16 or_build; + u8 or_patch; +}; + /* Interrupt Registers */ #define IXGBE_EICR 0x00800 #define IXGBE_EICS 0x00808 @@ -2321,11 +2360,6 @@ enum { #define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ #define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ -#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000 -#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 -#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 -#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED 0x18000000 -#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 /* Multiple Transmit Queue Command Register */ #define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */ #define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */ @@ -2377,6 +2411,9 @@ enum { #define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ #define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ #define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ +#define IXGBE_RXDADV_ERR_IPSEC_INV_PROTOCOL 0x08000000 /* overlap ERR_PE */ +#define IXGBE_RXDADV_ERR_IPSEC_INV_LENGTH 0x10000000 /* overlap ERR_OSE */ +#define IXGBE_RXDADV_ERR_IPSEC_AUTH_FAILED 0x18000000 #define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ #define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ #define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ @@ -2398,6 +2435,7 @@ enum { #define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ #define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ #define IXGBE_RXDADV_STAT_TS 0x00010000 /* IEEE 1588 Time Stamp */ +#define IXGBE_RXDADV_STAT_SECP 0x00020000 /* IPsec/MACsec pkt found */ /* PSRTYPE bit definitions */ #define IXGBE_PSRTYPE_TCPHDR 0x00000010 @@ -2464,13 +2502,6 @@ enum { #define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ #define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ -/* Security Processing bit Indication */ -#define IXGBE_RXDADV_LNKSEC_STATUS_SECP 0x00020000 -#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000 -#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000 -#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000 -#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000 - /* Masks to determine if packets should be dropped due to frame errors */ #define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ IXGBE_RXD_ERR_CE | \ @@ -2484,6 +2515,8 @@ enum { IXGBE_RXDADV_ERR_LE | \ IXGBE_RXDADV_ERR_PE | \ IXGBE_RXDADV_ERR_OSE | \ + IXGBE_RXDADV_ERR_IPSEC_INV_PROTOCOL | \ + IXGBE_RXDADV_ERR_IPSEC_INV_LENGTH | \ IXGBE_RXDADV_ERR_USE) /* Multicast bit mask */ @@ -2862,7 +2895,7 @@ union ixgbe_adv_rx_desc { /* Context descriptors */ struct ixgbe_adv_tx_context_desc { __le32 vlan_macip_lens; - __le32 seqnum_seed; + __le32 fceof_saidx; __le32 type_tucmd_mlhl; __le32 mss_l4len_idx; }; @@ -2893,6 +2926,7 @@ struct ixgbe_adv_tx_context_desc { IXGBE_ADVTXD_POPTS_SHIFT) #define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ #define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ #define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ #define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ @@ -2908,7 +2942,6 @@ struct ixgbe_adv_tx_context_desc { #define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ #define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* RSV L4 Packet TYPE */ #define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/ -#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ #define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ #define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */ #define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index cb7da5f9c4da..f470d0204771 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -949,7 +949,7 @@ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr, u16 length, bufsz, i, start; u16 *local_buffer; - bufsz = sizeof(buf) / sizeof(buf[0]); + bufsz = ARRAY_SIZE(buf); /* Read a chunk at the pointer location */ if (!buffer) { @@ -1642,10 +1642,12 @@ static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear) } /** - * ixgbe_setup_mac_link_sfp_x550em - Configure the KR PHY for SFP. - * @hw: pointer to hardware structure + * ixgbe_setup_mac_link_sfp_x550em - Configure the KR PHY for SFP. + * @hw: pointer to hardware structure + * @speed: the link speed to force + * @autoneg_wait_to_complete: unused * - * Configures the extern PHY and the integrated KR PHY for SFP support. + * Configures the extern PHY and the integrated KR PHY for SFP support. */ static s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, @@ -1737,6 +1739,8 @@ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed) /** * ixgbe_setup_mac_link_sfp_n - Setup internal PHY for native SFP * @hw: pointer to hardware structure + * @speed: link speed + * @autoneg_wait_to_complete: unused * * Configure the the integrated PHY for native SFP support. */ @@ -1784,6 +1788,8 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed, /** * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP * @hw: pointer to hardware structure + * @speed: link speed + * @autoneg_wait_to_complete: unused * * Configure the the integrated PHY for SFP support. */ @@ -1859,7 +1865,7 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed, * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg_wait_to_complete: true when waiting for completion is needed + * @autoneg_wait: true when waiting for completion is needed * * Setup internal/external PHY link speed based on link speed, then set * external PHY auto advertised link speed. @@ -1943,6 +1949,8 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, /** * ixgbe_setup_sgmii - Set up link for sgmii * @hw: pointer to hardware structure + * @speed: unused + * @autoneg_wait_to_complete: unused */ static s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed, @@ -2014,6 +2022,8 @@ ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed, /** * ixgbe_setup_sgmii_fw - Set up link for sgmii with firmware-controlled PHYs * @hw: pointer to hardware structure + * @speed: the link speed to force + * @autoneg_wait: true when waiting for completion is needed */ static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait) @@ -3735,6 +3745,7 @@ static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask) * ixgbe_read_phy_reg_x550a - Reads specified PHY register * @hw: pointer to hardware structure * @reg_addr: 32 bit address of PHY register to read + * @device_type: 5 bit device type * @phy_data: Pointer to read data from PHY register * * Reads a value from a specified PHY register using the SWFW lock and PHY diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index ff9d05f308ee..4400e49090b4 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -75,6 +75,9 @@ static struct ixgbe_stats ixgbevf_gstrings_stats[] = { IXGBEVF_STAT("tx_timeout_count", tx_timeout_count), IXGBEVF_NETDEV_STAT(multicast), IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error), + IXGBEVF_STAT("alloc_rx_page", alloc_rx_page), + IXGBEVF_STAT("alloc_rx_page_failed", alloc_rx_page_failed), + IXGBEVF_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), }; #define IXGBEVF_QUEUE_STATS_LEN ( \ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 581f44bbd7b3..f6952425c87d 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -62,7 +62,12 @@ struct ixgbevf_tx_buffer { struct ixgbevf_rx_buffer { dma_addr_t dma; struct page *page; - unsigned int page_offset; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; }; struct ixgbevf_stats { @@ -79,6 +84,7 @@ struct ixgbevf_tx_queue_stats { struct ixgbevf_rx_queue_stats { u64 alloc_rx_page_failed; u64 alloc_rx_buff_failed; + u64 alloc_rx_page; u64 csum_err; }; @@ -260,6 +266,9 @@ static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value) #define MIN_MSIX_Q_VECTORS 1 #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) +#define IXGBEVF_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + /* board specific private data structure */ struct ixgbevf_adapter { /* this field must be first, see ixgbevf_process_skb_fields */ @@ -287,8 +296,9 @@ struct ixgbevf_adapter { u64 hw_csum_rx_error; u64 hw_rx_no_dma_resources; int num_msix_vectors; - u32 alloc_rx_page_failed; - u32 alloc_rx_buff_failed; + u64 alloc_rx_page_failed; + u64 alloc_rx_buff_failed; + u64 alloc_rx_page; struct msix_entry *msix_entries; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 1f4a69134ade..9b3d43d28106 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -206,28 +206,6 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, } } -static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring, - struct ixgbevf_tx_buffer *tx_buffer) -{ - if (tx_buffer->skb) { - dev_kfree_skb_any(tx_buffer->skb); - if (dma_unmap_len(tx_buffer, len)) - dma_unmap_single(tx_ring->dev, - dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - DMA_TO_DEVICE); - } else if (dma_unmap_len(tx_buffer, len)) { - dma_unmap_page(tx_ring->dev, - dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - DMA_TO_DEVICE); - } - tx_buffer->next_to_watch = NULL; - tx_buffer->skb = NULL; - dma_unmap_len_set(tx_buffer, len, 0); - /* tx_buffer must be completely set up in the transmit path */ -} - static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring) { return ring->stats.packets; @@ -349,7 +327,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, DMA_TO_DEVICE); /* clear tx_buffer data */ - tx_buffer->skb = NULL; dma_unmap_len_set(tx_buffer, len, 0); /* unmap remaining buffers */ @@ -554,7 +531,6 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, * ixgbevf_is_non_eop - process handling of non-EOP buffers * @rx_ring: Rx ring being processed * @rx_desc: Rx descriptor for current buffer - * @skb: current socket buffer containing buffer in progress * * This function updates next to clean. If the buffer is an EOP buffer * this function exits returning false, otherwise it will place the @@ -596,8 +572,8 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring, } /* map page for use */ - dma = dma_map_page(rx_ring->dev, page, 0, - PAGE_SIZE, DMA_FROM_DEVICE); + dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE, + DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR); /* if mapping failed free memory back to system since * there isn't much point in holding memory we can't use @@ -605,13 +581,15 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring, if (dma_mapping_error(rx_ring->dev, dma)) { __free_page(page); - rx_ring->rx_stats.alloc_rx_buff_failed++; + rx_ring->rx_stats.alloc_rx_page_failed++; return false; } bi->dma = dma; bi->page = page; bi->page_offset = 0; + bi->pagecnt_bias = 1; + rx_ring->rx_stats.alloc_rx_page++; return true; } @@ -640,6 +618,12 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, if (!ixgbevf_alloc_mapped_page(rx_ring, bi)) break; + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, + IXGBEVF_RX_BUFSZ, + DMA_FROM_DEVICE); + /* Refresh the desc even if pkt_addr didn't change * because each write-back erases this info. */ @@ -654,8 +638,8 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, i -= rx_ring->count; } - /* clear the hdr_addr for the next_to_use descriptor */ - rx_desc->read.hdr_addr = 0; + /* clear the length for the next_to_use descriptor */ + rx_desc->wb.upper.length = 0; cleaned_count--; } while (cleaned_count); @@ -742,12 +726,7 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, new_buff->page = old_buff->page; new_buff->dma = old_buff->dma; new_buff->page_offset = old_buff->page_offset; - - /* sync the buffer for use by the device */ - dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, - new_buff->page_offset, - IXGBEVF_RX_BUFSZ, - DMA_FROM_DEVICE); + new_buff->pagecnt_bias = old_buff->pagecnt_bias; } static inline bool ixgbevf_page_is_reserved(struct page *page) @@ -755,6 +734,45 @@ static inline bool ixgbevf_page_is_reserved(struct page *page) return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); } +static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer, + struct page *page, + const unsigned int truesize) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--; + + /* avoid re-using remote pages */ + if (unlikely(ixgbevf_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_ref_count(page) != pagecnt_bias)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ; + +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ)) + return false; + +#endif + + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX); + rx_buffer->pagecnt_bias = USHRT_MAX; + } + + return true; +} + /** * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_ring: rx descriptor ring to transact packets on @@ -772,12 +790,12 @@ static inline bool ixgbevf_page_is_reserved(struct page *page) **/ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, struct ixgbevf_rx_buffer *rx_buffer, + u16 size, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { struct page *page = rx_buffer->page; unsigned char *va = page_address(page) + rx_buffer->page_offset; - unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); #if (PAGE_SIZE < 8192) unsigned int truesize = IXGBEVF_RX_BUFSZ; #else @@ -796,7 +814,6 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, return true; /* this page cannot be reused so discard it */ - put_page(page); return false; } @@ -816,32 +833,7 @@ add_tail_frag: skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, (unsigned long)va & ~PAGE_MASK, size, truesize); - /* avoid re-using remote pages */ - if (unlikely(ixgbevf_page_is_reserved(page))) - return false; - -#if (PAGE_SIZE < 8192) - /* if we are only owner of page we can reuse it */ - if (unlikely(page_count(page) != 1)) - return false; - - /* flip page offset to other buffer */ - rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ; - -#else - /* move offset up to the next cache line */ - rx_buffer->page_offset += truesize; - - if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ)) - return false; - -#endif - /* Even if we own the page, we are not allowed to use atomic_set() - * This would break get_page_unless_zero() users. - */ - page_ref_inc(page); - - return true; + return ixgbevf_can_reuse_rx_page(rx_buffer, page, truesize); } static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring, @@ -850,11 +842,19 @@ static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring, { struct ixgbevf_rx_buffer *rx_buffer; struct page *page; + u16 size = le16_to_cpu(rx_desc->wb.upper.length); rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; page = rx_buffer->page; prefetchw(page); + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); + if (likely(!skb)) { void *page_addr = page_address(page) + rx_buffer->page_offset; @@ -880,21 +880,18 @@ static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring, prefetchw(skb->data); } - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_buffer->dma, - rx_buffer->page_offset, - IXGBEVF_RX_BUFSZ, - DMA_FROM_DEVICE); - /* pull page into skb */ - if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) { /* hand second half of page back to the ring */ ixgbevf_reuse_rx_page(rx_ring, rx_buffer); } else { - /* we are not reusing the buffer so unmap it */ - dma_unmap_page(rx_ring->dev, rx_buffer->dma, - PAGE_SIZE, DMA_FROM_DEVICE); + /* We are not reusing the buffer so unmap it and free + * any references we are holding to it + */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + PAGE_SIZE, DMA_FROM_DEVICE, + IXGBEVF_RX_DMA_ATTR); + __page_frag_cache_drain(page, rx_buffer->pagecnt_bias); } /* clear contents of buffer_info */ @@ -931,7 +928,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean); - if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) + if (!rx_desc->wb.upper.length) break; /* This memory barrier is needed to keep us from reading @@ -944,8 +941,10 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb); /* exit if we failed to retrieve a buffer */ - if (!skb) + if (!skb) { + rx_ring->rx_stats.alloc_rx_buff_failed++; break; + } cleaned_count++; @@ -1554,6 +1553,10 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter, txdctl |= (1u << 8) | /* HTHRESH = 1 */ 32; /* PTHRESH = 32 */ + /* reinitialize tx_buffer_info */ + memset(ring->tx_buffer_info, 0, + sizeof(struct ixgbevf_tx_buffer) * ring->count); + clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state); IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl); @@ -1722,6 +1725,7 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *ring) { struct ixgbe_hw *hw = &adapter->hw; + union ixgbe_adv_rx_desc *rx_desc; u64 rdba = ring->dma; u32 rxdctl; u8 reg_idx = ring->reg_idx; @@ -1750,6 +1754,14 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0); ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx); + /* initialize rx_buffer_info */ + memset(ring->rx_buffer_info, 0, + sizeof(struct ixgbevf_rx_buffer) * ring->count); + + /* initialize Rx descriptor 0 */ + rx_desc = IXGBEVF_RX_DESC(ring, 0); + rx_desc->wb.upper.length = 0; + /* reset ntu and ntc to place SW in sync with hardwdare */ ring->next_to_clean = 0; ring->next_to_use = 0; @@ -1896,10 +1908,6 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev) unsigned int flags = netdev->flags; int xcast_mode; - xcast_mode = (flags & IFF_ALLMULTI) ? IXGBEVF_XCAST_MODE_ALLMULTI : - (flags & (IFF_BROADCAST | IFF_MULTICAST)) ? - IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE; - /* request the most inclusive mode we need */ if (flags & IFF_PROMISC) xcast_mode = IXGBEVF_XCAST_MODE_PROMISC; @@ -2108,9 +2116,7 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter) **/ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring) { - struct device *dev = rx_ring->dev; - unsigned long size; - unsigned int i; + u16 i = rx_ring->next_to_clean; /* Free Rx ring sk_buff */ if (rx_ring->skb) { @@ -2118,29 +2124,39 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring) rx_ring->skb = NULL; } - /* ring already cleared, nothing to do */ - if (!rx_ring->rx_buffer_info) - return; - /* Free all the Rx ring pages */ - for (i = 0; i < rx_ring->count; i++) { + while (i != rx_ring->next_to_alloc) { struct ixgbevf_rx_buffer *rx_buffer; rx_buffer = &rx_ring->rx_buffer_info[i]; - if (rx_buffer->dma) - dma_unmap_page(dev, rx_buffer->dma, - PAGE_SIZE, DMA_FROM_DEVICE); - rx_buffer->dma = 0; - if (rx_buffer->page) - __free_page(rx_buffer->page); - rx_buffer->page = NULL; - } - size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; - memset(rx_ring->rx_buffer_info, 0, size); + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + IXGBEVF_RX_BUFSZ, + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, + rx_buffer->dma, + PAGE_SIZE, + DMA_FROM_DEVICE, + IXGBEVF_RX_DMA_ATTR); + + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); - /* Zero out the descriptor ring */ - memset(rx_ring->desc, 0, rx_ring->size); + i++; + if (i == rx_ring->count) + i = 0; + } + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; } /** @@ -2149,23 +2165,57 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring) **/ static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring) { - struct ixgbevf_tx_buffer *tx_buffer_info; - unsigned long size; - unsigned int i; + u16 i = tx_ring->next_to_clean; + struct ixgbevf_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; - if (!tx_ring->tx_buffer_info) - return; + while (i != tx_ring->next_to_use) { + union ixgbe_adv_tx_desc *eop_desc, *tx_desc; - /* Free all the Tx ring sk_buffs */ - for (i = 0; i < tx_ring->count; i++) { - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); + /* Free all the Tx ring sk_buffs */ + dev_kfree_skb_any(tx_buffer->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* check for eop_desc to determine the end of the packet */ + eop_desc = tx_buffer->next_to_watch; + tx_desc = IXGBEVF_TX_DESC(tx_ring, i); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + } } - size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; - memset(tx_ring->tx_buffer_info, 0, size); + /* reset next_to_use and next_to_clean */ + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; - memset(tx_ring->desc, 0, tx_ring->size); } /** @@ -2717,6 +2767,8 @@ out: void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; + u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; + u64 alloc_rx_page = 0, hw_csum_rx_error = 0; int i; if (test_bit(__IXGBEVF_DOWN, &adapter->state) || @@ -2737,15 +2789,23 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) adapter->stats.vfmprc); for (i = 0; i < adapter->num_rx_queues; i++) { - adapter->hw_csum_rx_error += - adapter->rx_ring[i]->hw_csum_rx_error; - adapter->rx_ring[i]->hw_csum_rx_error = 0; + struct ixgbevf_ring *rx_ring = adapter->rx_ring[i]; + + hw_csum_rx_error += rx_ring->rx_stats.csum_err; + alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; + alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; + alloc_rx_page += rx_ring->rx_stats.alloc_rx_page; } + + adapter->hw_csum_rx_error = hw_csum_rx_error; + adapter->alloc_rx_page_failed = alloc_rx_page_failed; + adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; + adapter->alloc_rx_page = alloc_rx_page; } /** * ixgbevf_service_timer - Timer Call-back - * @data: pointer to adapter cast into an unsigned long + * @t: pointer to timer_list struct **/ static void ixgbevf_service_timer(struct timer_list *t) { @@ -2888,7 +2948,7 @@ static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter) /** * ixgbevf_watchdog_subtask - worker thread to bring link up - * @work: pointer to work_struct containing our data + * @adapter: board private structure **/ static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter) { @@ -2985,7 +3045,7 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring) int size; size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; - tx_ring->tx_buffer_info = vzalloc(size); + tx_ring->tx_buffer_info = vmalloc(size); if (!tx_ring->tx_buffer_info) goto err; @@ -3045,7 +3105,7 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring) int size; size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; - rx_ring->rx_buffer_info = vzalloc(size); + rx_ring->rx_buffer_info = vmalloc(size); if (!rx_ring->rx_buffer_info) goto err; @@ -3487,34 +3547,37 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, struct ixgbevf_tx_buffer *first, const u8 hdr_len) { - dma_addr_t dma; struct sk_buff *skb = first->skb; struct ixgbevf_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; - unsigned int data_len = skb->data_len; - unsigned int size = skb_headlen(skb); - unsigned int paylen = skb->len - hdr_len; + struct skb_frag_struct *frag; + dma_addr_t dma; + unsigned int data_len, size; u32 tx_flags = first->tx_flags; - __le32 cmd_type; + __le32 cmd_type = ixgbevf_tx_cmd_type(tx_flags); u16 i = tx_ring->next_to_use; tx_desc = IXGBEVF_TX_DESC(tx_ring, i); - ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen); - cmd_type = ixgbevf_tx_cmd_type(tx_flags); + ixgbevf_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); + + size = skb_headlen(skb); + data_len = skb->data_len; dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); - if (dma_mapping_error(tx_ring->dev, dma)) - goto dma_error; - /* record length, and DMA address */ - dma_unmap_len_set(first, len, size); - dma_unmap_addr_set(first, dma, dma); + tx_buffer = first; - tx_desc->read.buffer_addr = cpu_to_le64(dma); + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); - for (;;) { while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD); @@ -3525,12 +3588,12 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); i = 0; } + tx_desc->read.olinfo_status = 0; dma += IXGBE_MAX_DATA_PER_TXD; size -= IXGBE_MAX_DATA_PER_TXD; tx_desc->read.buffer_addr = cpu_to_le64(dma); - tx_desc->read.olinfo_status = 0; } if (likely(!data_len)) @@ -3544,23 +3607,15 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); i = 0; } + tx_desc->read.olinfo_status = 0; size = skb_frag_size(frag); data_len -= size; dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, DMA_TO_DEVICE); - if (dma_mapping_error(tx_ring->dev, dma)) - goto dma_error; tx_buffer = &tx_ring->tx_buffer_info[i]; - dma_unmap_len_set(tx_buffer, len, size); - dma_unmap_addr_set(tx_buffer, dma, dma); - - tx_desc->read.buffer_addr = cpu_to_le64(dma); - tx_desc->read.olinfo_status = 0; - - frag++; } /* write last descriptor with RS and EOP bits */ @@ -3594,18 +3649,32 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, return; dma_error: dev_err(tx_ring->dev, "TX DMA map failed\n"); + tx_buffer = &tx_ring->tx_buffer_info[i]; /* clear dma mappings for failed tx_buffer_info map */ - for (;;) { + while (tx_buffer != first) { + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + + if (i-- == 0) + i += tx_ring->count; tx_buffer = &tx_ring->tx_buffer_info[i]; - ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer); - if (tx_buffer == first) - break; - if (i == 0) - i = tx_ring->count; - i--; } + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + + dev_kfree_skb_any(tx_buffer->skb); + tx_buffer->skb = NULL; + tx_ring->next_to_use = i; } @@ -4368,6 +4437,7 @@ static void __exit ixgbevf_exit_module(void) /** * ixgbevf_get_hw_dev_name - return device name string * used by hardware layer to print debugging information + * @hw: pointer to private hardware struct **/ char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) { diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index 0c25006ce9af..38d3a327c1bc 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -146,6 +146,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) /** * Hyper-V variant; the VF/PF communication is through the PCI * config space. + * @hw: pointer to private hardware struct */ static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw) { @@ -285,7 +286,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) ether_addr_copy(msg_addr, addr); ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, - sizeof(msgbuf) / sizeof(u32)); + ARRAY_SIZE(msgbuf)); if (!ret_val) { msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; @@ -303,7 +304,7 @@ static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) /** * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents. - * @adapter: pointer to the port handle + * @hw: pointer to hardware structure * @reta: buffer to fill with RETA contents. * @num_rx_queues: Number of Rx queues configured for this port * @@ -455,8 +456,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, ether_addr_copy(msg_addr, addr); ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, - sizeof(msgbuf) / sizeof(u32)); - + ARRAY_SIZE(msgbuf)); msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; /* if nacked the address was rejected, use "perm_addr" */ @@ -536,6 +536,8 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, /** * Hyper-V variant - just a stub. + * @hw: unused + * @netdev: unused */ static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw, struct net_device *netdev) @@ -571,7 +573,7 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) msgbuf[1] = xcast_mode; err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, - sizeof(msgbuf) / sizeof(u32)); + ARRAY_SIZE(msgbuf)); if (err) return err; @@ -584,6 +586,8 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) /** * Hyper-V variant - just a stub. + * @hw: unused + * @xcast_mode: unused */ static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) { @@ -609,7 +613,7 @@ static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT; err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, - sizeof(msgbuf) / sizeof(u32)); + ARRAY_SIZE(msgbuf)); if (err) goto mbx_err; @@ -626,6 +630,10 @@ mbx_err: /** * Hyper-V variant - just a stub. + * @hw: unused + * @vlan: unused + * @vind: unused + * @vlan_on: unused */ static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on) @@ -655,7 +663,7 @@ static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw, * @hw: pointer to hardware structure * @speed: pointer to link speed * @link_up: true is link is up, false otherwise - * @autoneg_wait_to_complete: true when waiting for completion is needed + * @autoneg_wait_to_complete: unused * * Reads the links register to determine if link is up and the current speed **/ @@ -740,6 +748,10 @@ out: /** * Hyper-V variant; there is no mailbox communication. + * @hw: pointer to private hardware struct + * @speed: pointer to link speed + * @link_up: true is link is up, false otherwise + * @autoneg_wait_to_complete: unused */ static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, @@ -813,7 +825,7 @@ static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) msgbuf[1] = max_size; ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, - sizeof(msgbuf) / sizeof(u32)); + ARRAY_SIZE(msgbuf)); if (ret_val) return ret_val; if ((msgbuf[0] & IXGBE_VF_SET_LPE) && @@ -859,8 +871,7 @@ static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api) msg[1] = api; msg[2] = 0; - err = ixgbevf_write_msg_read_ack(hw, msg, msg, - sizeof(msg) / sizeof(u32)); + err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg)); if (!err) { msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; @@ -911,8 +922,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, msg[0] = IXGBE_VF_GET_QUEUE; msg[1] = msg[2] = msg[3] = msg[4] = 0; - err = ixgbevf_write_msg_read_ack(hw, msg, msg, - sizeof(msg) / sizeof(u32)); + err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg)); if (!err) { msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index da6fb825afea..ebe5c9148935 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -60,7 +60,7 @@ config MVNETA depends on ARCH_MVEBU || COMPILE_TEST depends on HAS_DMA select MVMDIO - select FIXED_PHY + select PHYLINK ---help--- This driver supports the network interface units in the Marvell ARMADA XP, ARMADA 370, ARMADA 38x and diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index a539263cd79c..25e9a551cc8c 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -28,7 +28,7 @@ #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/phy.h> -#include <linux/phy_fixed.h> +#include <linux/phylink.h> #include <linux/platform_device.h> #include <linux/skbuff.h> #include <net/hwbm.h> @@ -189,6 +189,7 @@ #define MVNETA_GMAC_CTRL_0 0x2c00 #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc +#define MVNETA_GMAC0_PORT_1000BASE_X BIT(1) #define MVNETA_GMAC0_PORT_ENABLE BIT(0) #define MVNETA_GMAC_CTRL_2 0x2c08 #define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0) @@ -204,13 +205,19 @@ #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5) #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6) #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7) +#define MVNETA_GMAC_AN_COMPLETE BIT(11) +#define MVNETA_GMAC_SYNC_OK BIT(14) #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0) #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) #define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2) +#define MVNETA_GMAC_AN_BYPASS_ENABLE BIT(3) +#define MVNETA_GMAC_INBAND_RESTART_AN BIT(4) #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) #define MVNETA_GMAC_AN_SPEED_EN BIT(7) +#define MVNETA_GMAC_CONFIG_FLOW_CTRL BIT(8) +#define MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL BIT(9) #define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11) #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) @@ -237,6 +244,12 @@ #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2)) #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff +#define MVNETA_LPI_CTRL_0 0x2cc0 +#define MVNETA_LPI_CTRL_1 0x2cc4 +#define MVNETA_LPI_REQUEST_ENABLE BIT(0) +#define MVNETA_LPI_CTRL_2 0x2cc8 +#define MVNETA_LPI_STATUS 0x2ccc + #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff /* Descriptor ring Macros */ @@ -313,6 +326,11 @@ #define MVNETA_RX_GET_BM_POOL_ID(rxd) \ (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT) +enum { + ETHTOOL_STAT_EEE_WAKEUP, + ETHTOOL_MAX_STATS, +}; + struct mvneta_statistic { unsigned short offset; unsigned short type; @@ -321,6 +339,7 @@ struct mvneta_statistic { #define T_REG_32 32 #define T_REG_64 64 +#define T_SW 1 static const struct mvneta_statistic mvneta_statistics[] = { { 0x3000, T_REG_64, "good_octets_received", }, @@ -355,6 +374,7 @@ static const struct mvneta_statistic mvneta_statistics[] = { { 0x304c, T_REG_32, "broadcast_frames_sent", }, { 0x3054, T_REG_32, "fc_sent", }, { 0x300c, T_REG_32, "internal_mac_transmit_err", }, + { ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", }, }; struct mvneta_pcpu_stats { @@ -407,20 +427,20 @@ struct mvneta_port { u16 tx_ring_size; u16 rx_ring_size; - struct mii_bus *mii_bus; phy_interface_t phy_interface; - struct device_node *phy_node; - unsigned int link; - unsigned int duplex; - unsigned int speed; + struct device_node *dn; unsigned int tx_csum_limit; - unsigned int use_inband_status:1; + struct phylink *phylink; struct mvneta_bm *bm_priv; struct mvneta_bm_pool *pool_long; struct mvneta_bm_pool *pool_short; int bm_win_id; + bool eee_enabled; + bool eee_active; + bool tx_lpi_enabled; + u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)]; u32 indir[MVNETA_RSS_LU_TABLE_SIZE]; @@ -1214,10 +1234,6 @@ static void mvneta_port_disable(struct mvneta_port *pp) val &= ~MVNETA_GMAC0_PORT_ENABLE; mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); - pp->link = 0; - pp->duplex = -1; - pp->speed = 0; - udelay(200); } @@ -1277,44 +1293,6 @@ static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); } -static void mvneta_set_autoneg(struct mvneta_port *pp, int enable) -{ - u32 val; - - if (enable) { - val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); - val &= ~(MVNETA_GMAC_FORCE_LINK_PASS | - MVNETA_GMAC_FORCE_LINK_DOWN | - MVNETA_GMAC_AN_FLOW_CTRL_EN); - val |= MVNETA_GMAC_INBAND_AN_ENABLE | - MVNETA_GMAC_AN_SPEED_EN | - MVNETA_GMAC_AN_DUPLEX_EN; - mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); - - val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); - val |= MVNETA_GMAC_1MS_CLOCK_ENABLE; - mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val); - - val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); - val |= MVNETA_GMAC2_INBAND_AN_ENABLE; - mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); - } else { - val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); - val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE | - MVNETA_GMAC_AN_SPEED_EN | - MVNETA_GMAC_AN_DUPLEX_EN); - mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); - - val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); - val &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE; - mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val); - - val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); - val &= ~MVNETA_GMAC2_INBAND_AN_ENABLE; - mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); - } -} - static void mvneta_percpu_unmask_interrupt(void *arg) { struct mvneta_port *pp = arg; @@ -1467,7 +1445,6 @@ static void mvneta_defaults_set(struct mvneta_port *pp) val &= ~MVNETA_PHY_POLLING_ENABLE; mvreg_write(pp, MVNETA_UNIT_CONTROL, val); - mvneta_set_autoneg(pp, pp->use_inband_status); mvneta_set_ucast_table(pp, -1); mvneta_set_special_mcast_table(pp, -1); mvneta_set_other_mcast_table(pp, -1); @@ -2692,26 +2669,11 @@ static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id) return IRQ_HANDLED; } -static int mvneta_fixed_link_update(struct mvneta_port *pp, - struct phy_device *phy) +static void mvneta_link_change(struct mvneta_port *pp) { - struct fixed_phy_status status; - struct fixed_phy_status changed = {}; u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); - status.link = !!(gmac_stat & MVNETA_GMAC_LINK_UP); - if (gmac_stat & MVNETA_GMAC_SPEED_1000) - status.speed = SPEED_1000; - else if (gmac_stat & MVNETA_GMAC_SPEED_100) - status.speed = SPEED_100; - else - status.speed = SPEED_10; - status.duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX); - changed.link = 1; - changed.speed = 1; - changed.duplex = 1; - fixed_phy_update_state(phy, &status, &changed); - return 0; + phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP)); } /* NAPI handler @@ -2727,7 +2689,6 @@ static int mvneta_poll(struct napi_struct *napi, int budget) u32 cause_rx_tx; int rx_queue; struct mvneta_port *pp = netdev_priv(napi->dev); - struct net_device *ndev = pp->dev; struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); if (!netif_running(pp->dev)) { @@ -2741,12 +2702,10 @@ static int mvneta_poll(struct napi_struct *napi, int budget) u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE); mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); - if (pp->use_inband_status && (cause_misc & - (MVNETA_CAUSE_PHY_STATUS_CHANGE | - MVNETA_CAUSE_LINK_CHANGE | - MVNETA_CAUSE_PSC_SYNC_CHANGE))) { - mvneta_fixed_link_update(pp, ndev->phydev); - } + + if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE | + MVNETA_CAUSE_LINK_CHANGE)) + mvneta_link_change(pp); } /* Release Tx descriptors */ @@ -3060,7 +3019,6 @@ static int mvneta_setup_txqs(struct mvneta_port *pp) static void mvneta_start_dev(struct mvneta_port *pp) { int cpu; - struct net_device *ndev = pp->dev; mvneta_max_rx_size_set(pp, pp->pkt_size); mvneta_txq_max_tx_size_set(pp, pp->pkt_size); @@ -3085,19 +3043,17 @@ static void mvneta_start_dev(struct mvneta_port *pp) mvreg_write(pp, MVNETA_INTR_MISC_MASK, MVNETA_CAUSE_PHY_STATUS_CHANGE | - MVNETA_CAUSE_LINK_CHANGE | - MVNETA_CAUSE_PSC_SYNC_CHANGE); + MVNETA_CAUSE_LINK_CHANGE); - phy_start(ndev->phydev); + phylink_start(pp->phylink); netif_tx_start_all_queues(pp->dev); } static void mvneta_stop_dev(struct mvneta_port *pp) { unsigned int cpu; - struct net_device *ndev = pp->dev; - phy_stop(ndev->phydev); + phylink_stop(pp->phylink); if (!pp->neta_armada3700) { for_each_online_cpu(cpu) { @@ -3251,103 +3207,260 @@ static int mvneta_set_mac_addr(struct net_device *dev, void *addr) return 0; } -static void mvneta_adjust_link(struct net_device *ndev) +static void mvneta_validate(struct net_device *ndev, unsigned long *supported, + struct phylink_link_state *state) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + + /* We only support QSGMII, SGMII, 802.3z and RGMII modes */ + if (state->interface != PHY_INTERFACE_MODE_NA && + state->interface != PHY_INTERFACE_MODE_QSGMII && + state->interface != PHY_INTERFACE_MODE_SGMII && + !phy_interface_mode_is_8023z(state->interface) && + !phy_interface_mode_is_rgmii(state->interface)) { + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + return; + } + + /* Allow all the expected bits */ + phylink_set(mask, Autoneg); + phylink_set_port_modes(mask); + + /* Asymmetric pause is unsupported */ + phylink_set(mask, Pause); + /* Half-duplex at speeds higher than 100Mbit is unsupported */ + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + + if (!phy_interface_mode_is_8023z(state->interface)) { + /* 10M and 100M are only supported in non-802.3z mode */ + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + } + + bitmap_and(supported, supported, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_and(state->advertising, state->advertising, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static int mvneta_mac_link_state(struct net_device *ndev, + struct phylink_link_state *state) { struct mvneta_port *pp = netdev_priv(ndev); - struct phy_device *phydev = ndev->phydev; - int status_change = 0; + u32 gmac_stat; + + gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); - if (phydev->link) { - if ((pp->speed != phydev->speed) || - (pp->duplex != phydev->duplex)) { - u32 val; + if (gmac_stat & MVNETA_GMAC_SPEED_1000) + state->speed = SPEED_1000; + else if (gmac_stat & MVNETA_GMAC_SPEED_100) + state->speed = SPEED_100; + else + state->speed = SPEED_10; - val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); - val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | - MVNETA_GMAC_CONFIG_GMII_SPEED | - MVNETA_GMAC_CONFIG_FULL_DUPLEX); + state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE); + state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP); + state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX); + + state->pause = 0; + if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE) + state->pause |= MLO_PAUSE_RX; + if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE) + state->pause |= MLO_PAUSE_TX; + + return 1; +} - if (phydev->duplex) - val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; +static void mvneta_mac_an_restart(struct net_device *ndev) +{ + struct mvneta_port *pp = netdev_priv(ndev); + u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); - if (phydev->speed == SPEED_1000) - val |= MVNETA_GMAC_CONFIG_GMII_SPEED; - else if (phydev->speed == SPEED_100) - val |= MVNETA_GMAC_CONFIG_MII_SPEED; + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, + gmac_an | MVNETA_GMAC_INBAND_RESTART_AN); + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, + gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN); +} - mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); +static void mvneta_mac_config(struct net_device *ndev, unsigned int mode, + const struct phylink_link_state *state) +{ + struct mvneta_port *pp = netdev_priv(ndev); + u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0); + u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2); + u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); + u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); + + new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X; + new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE | + MVNETA_GMAC2_PORT_RESET); + new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE; + new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE | + MVNETA_GMAC_INBAND_RESTART_AN | + MVNETA_GMAC_CONFIG_MII_SPEED | + MVNETA_GMAC_CONFIG_GMII_SPEED | + MVNETA_GMAC_AN_SPEED_EN | + MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL | + MVNETA_GMAC_CONFIG_FLOW_CTRL | + MVNETA_GMAC_AN_FLOW_CTRL_EN | + MVNETA_GMAC_CONFIG_FULL_DUPLEX | + MVNETA_GMAC_AN_DUPLEX_EN); - pp->duplex = phydev->duplex; - pp->speed = phydev->speed; - } + /* Even though it might look weird, when we're configured in + * SGMII or QSGMII mode, the RGMII bit needs to be set. + */ + new_ctrl2 |= MVNETA_GMAC2_PORT_RGMII; + + if (state->interface == PHY_INTERFACE_MODE_QSGMII || + state->interface == PHY_INTERFACE_MODE_SGMII || + phy_interface_mode_is_8023z(state->interface)) + new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE; + + if (phylink_test(state->advertising, Pause)) + new_an |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL; + if (state->pause & MLO_PAUSE_TXRX_MASK) + new_an |= MVNETA_GMAC_CONFIG_FLOW_CTRL; + + if (!phylink_autoneg_inband(mode)) { + /* Phy or fixed speed */ + if (state->duplex) + new_an |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; + + if (state->speed == SPEED_1000) + new_an |= MVNETA_GMAC_CONFIG_GMII_SPEED; + else if (state->speed == SPEED_100) + new_an |= MVNETA_GMAC_CONFIG_MII_SPEED; + } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { + /* SGMII mode receives the state from the PHY */ + new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE; + new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE; + new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN | + MVNETA_GMAC_FORCE_LINK_PASS)) | + MVNETA_GMAC_INBAND_AN_ENABLE | + MVNETA_GMAC_AN_SPEED_EN | + MVNETA_GMAC_AN_DUPLEX_EN; + } else { + /* 802.3z negotiation - only 1000base-X */ + new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X; + new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE; + new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN | + MVNETA_GMAC_FORCE_LINK_PASS)) | + MVNETA_GMAC_INBAND_AN_ENABLE | + MVNETA_GMAC_CONFIG_GMII_SPEED | + /* The MAC only supports FD mode */ + MVNETA_GMAC_CONFIG_FULL_DUPLEX; + + if (state->pause & MLO_PAUSE_AN && state->an_enabled) + new_an |= MVNETA_GMAC_AN_FLOW_CTRL_EN; } - if (phydev->link != pp->link) { - if (!phydev->link) { - pp->duplex = -1; - pp->speed = 0; - } + /* Armada 370 documentation says we can only change the port mode + * and in-band enable when the link is down, so force it down + * while making these changes. We also do this for GMAC_CTRL2 */ + if ((new_ctrl0 ^ gmac_ctrl0) & MVNETA_GMAC0_PORT_1000BASE_X || + (new_ctrl2 ^ gmac_ctrl2) & MVNETA_GMAC2_INBAND_AN_ENABLE || + (new_an ^ gmac_an) & MVNETA_GMAC_INBAND_AN_ENABLE) { + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, + (gmac_an & ~MVNETA_GMAC_FORCE_LINK_PASS) | + MVNETA_GMAC_FORCE_LINK_DOWN); + } - pp->link = phydev->link; - status_change = 1; + if (new_ctrl0 != gmac_ctrl0) + mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0); + if (new_ctrl2 != gmac_ctrl2) + mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2); + if (new_clk != gmac_clk) + mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk); + if (new_an != gmac_an) + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an); + + if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) { + while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & + MVNETA_GMAC2_PORT_RESET) != 0) + continue; } +} - if (status_change) { - if (phydev->link) { - if (!pp->use_inband_status) { - u32 val = mvreg_read(pp, - MVNETA_GMAC_AUTONEG_CONFIG); - val &= ~MVNETA_GMAC_FORCE_LINK_DOWN; - val |= MVNETA_GMAC_FORCE_LINK_PASS; - mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, - val); - } - mvneta_port_up(pp); - } else { - if (!pp->use_inband_status) { - u32 val = mvreg_read(pp, - MVNETA_GMAC_AUTONEG_CONFIG); - val &= ~MVNETA_GMAC_FORCE_LINK_PASS; - val |= MVNETA_GMAC_FORCE_LINK_DOWN; - mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, - val); - } - mvneta_port_down(pp); - } - phy_print_status(phydev); +static void mvneta_set_eee(struct mvneta_port *pp, bool enable) +{ + u32 lpi_ctl1; + + lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1); + if (enable) + lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE; + else + lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE; + mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1); +} + +static void mvneta_mac_link_down(struct net_device *ndev, unsigned int mode) +{ + struct mvneta_port *pp = netdev_priv(ndev); + u32 val; + + mvneta_port_down(pp); + + if (!phylink_autoneg_inband(mode)) { + val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); + val &= ~MVNETA_GMAC_FORCE_LINK_PASS; + val |= MVNETA_GMAC_FORCE_LINK_DOWN; + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); } + + pp->eee_active = false; + mvneta_set_eee(pp, false); } -static int mvneta_mdio_probe(struct mvneta_port *pp) +static void mvneta_mac_link_up(struct net_device *ndev, unsigned int mode, + struct phy_device *phy) { - struct phy_device *phy_dev; - struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; + struct mvneta_port *pp = netdev_priv(ndev); + u32 val; - phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0, - pp->phy_interface); - if (!phy_dev) { - netdev_err(pp->dev, "could not find the PHY\n"); - return -ENODEV; + if (!phylink_autoneg_inband(mode)) { + val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); + val &= ~MVNETA_GMAC_FORCE_LINK_DOWN; + val |= MVNETA_GMAC_FORCE_LINK_PASS; + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); } - phy_ethtool_get_wol(phy_dev, &wol); - device_set_wakeup_capable(&pp->dev->dev, !!wol.supported); + mvneta_port_up(pp); - phy_dev->supported &= PHY_GBIT_FEATURES; - phy_dev->advertising = phy_dev->supported; + if (phy && pp->eee_enabled) { + pp->eee_active = phy_init_eee(phy, 0) >= 0; + mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled); + } +} - pp->link = 0; - pp->duplex = 0; - pp->speed = 0; +static const struct phylink_mac_ops mvneta_phylink_ops = { + .validate = mvneta_validate, + .mac_link_state = mvneta_mac_link_state, + .mac_an_restart = mvneta_mac_an_restart, + .mac_config = mvneta_mac_config, + .mac_link_down = mvneta_mac_link_down, + .mac_link_up = mvneta_mac_link_up, +}; - return 0; +static int mvneta_mdio_probe(struct mvneta_port *pp) +{ + struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; + int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0); + + if (err) + netdev_err(pp->dev, "could not attach PHY: %d\n", err); + + phylink_ethtool_get_wol(pp->phylink, &wol); + device_set_wakeup_capable(&pp->dev->dev, !!wol.supported); + + return err; } static void mvneta_mdio_remove(struct mvneta_port *pp) { - struct net_device *ndev = pp->dev; - - phy_disconnect(ndev->phydev); + phylink_disconnect_phy(pp->phylink); } /* Electing a CPU must be done in an atomic way: it should be done @@ -3455,8 +3568,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node) on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); mvreg_write(pp, MVNETA_INTR_MISC_MASK, MVNETA_CAUSE_PHY_STATUS_CHANGE | - MVNETA_CAUSE_LINK_CHANGE | - MVNETA_CAUSE_PSC_SYNC_CHANGE); + MVNETA_CAUSE_LINK_CHANGE); netif_tx_start_all_queues(pp->dev); spin_unlock(&pp->lock); return 0; @@ -3497,8 +3609,7 @@ static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node) on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); mvreg_write(pp, MVNETA_INTR_MISC_MASK, MVNETA_CAUSE_PHY_STATUS_CHANGE | - MVNETA_CAUSE_LINK_CHANGE | - MVNETA_CAUSE_PSC_SYNC_CHANGE); + MVNETA_CAUSE_LINK_CHANGE); netif_tx_start_all_queues(pp->dev); return 0; } @@ -3626,10 +3737,9 @@ static int mvneta_stop(struct net_device *dev) static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { - if (!dev->phydev) - return -ENOTSUPP; + struct mvneta_port *pp = netdev_priv(dev); - return phy_mii_ioctl(dev->phydev, ifr, cmd); + return phylink_mii_ioctl(pp->phylink, ifr, cmd); } /* Ethtool methods */ @@ -3640,44 +3750,25 @@ mvneta_ethtool_set_link_ksettings(struct net_device *ndev, const struct ethtool_link_ksettings *cmd) { struct mvneta_port *pp = netdev_priv(ndev); - struct phy_device *phydev = ndev->phydev; - - if (!phydev) - return -ENODEV; - - if ((cmd->base.autoneg == AUTONEG_ENABLE) != pp->use_inband_status) { - u32 val; - - mvneta_set_autoneg(pp, cmd->base.autoneg == AUTONEG_ENABLE); - - if (cmd->base.autoneg == AUTONEG_DISABLE) { - val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); - val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | - MVNETA_GMAC_CONFIG_GMII_SPEED | - MVNETA_GMAC_CONFIG_FULL_DUPLEX); - - if (phydev->duplex) - val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; - if (phydev->speed == SPEED_1000) - val |= MVNETA_GMAC_CONFIG_GMII_SPEED; - else if (phydev->speed == SPEED_100) - val |= MVNETA_GMAC_CONFIG_MII_SPEED; + return phylink_ethtool_ksettings_set(pp->phylink, cmd); +} - mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); - } +/* Get link ksettings for ethtools */ +static int +mvneta_ethtool_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) +{ + struct mvneta_port *pp = netdev_priv(ndev); - pp->use_inband_status = (cmd->base.autoneg == AUTONEG_ENABLE); - netdev_info(pp->dev, "autoneg status set to %i\n", - pp->use_inband_status); + return phylink_ethtool_ksettings_get(pp->phylink, cmd); +} - if (netif_running(ndev)) { - mvneta_port_down(pp); - mvneta_port_up(pp); - } - } +static int mvneta_ethtool_nway_reset(struct net_device *dev) +{ + struct mvneta_port *pp = netdev_priv(dev); - return phy_ethtool_ksettings_set(ndev->phydev, cmd); + return phylink_ethtool_nway_reset(pp->phylink); } /* Set interrupt coalescing for ethtools */ @@ -3769,6 +3860,22 @@ static int mvneta_ethtool_set_ringparam(struct net_device *dev, return 0; } +static void mvneta_ethtool_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct mvneta_port *pp = netdev_priv(dev); + + phylink_ethtool_get_pauseparam(pp->phylink, pause); +} + +static int mvneta_ethtool_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct mvneta_port *pp = netdev_priv(dev); + + return phylink_ethtool_set_pauseparam(pp->phylink, pause); +} + static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset, u8 *data) { @@ -3785,26 +3892,35 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp) { const struct mvneta_statistic *s; void __iomem *base = pp->base; - u32 high, low, val; - u64 val64; + u32 high, low; + u64 val; int i; for (i = 0, s = mvneta_statistics; s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics); s++, i++) { + val = 0; + switch (s->type) { case T_REG_32: val = readl_relaxed(base + s->offset); - pp->ethtool_stats[i] += val; break; case T_REG_64: /* Docs say to read low 32-bit then high */ low = readl_relaxed(base + s->offset); high = readl_relaxed(base + s->offset + 4); - val64 = (u64)high << 32 | low; - pp->ethtool_stats[i] += val64; + val = (u64)high << 32 | low; + break; + case T_SW: + switch (s->offset) { + case ETHTOOL_STAT_EEE_WAKEUP: + val = phylink_get_eee_err(pp->phylink); + break; + } break; } + + pp->ethtool_stats[i] += val; } } @@ -3939,28 +4055,81 @@ static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, static void mvneta_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { - wol->supported = 0; - wol->wolopts = 0; + struct mvneta_port *pp = netdev_priv(dev); - if (dev->phydev) - phy_ethtool_get_wol(dev->phydev, wol); + phylink_ethtool_get_wol(pp->phylink, wol); } static int mvneta_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { + struct mvneta_port *pp = netdev_priv(dev); int ret; - if (!dev->phydev) - return -EOPNOTSUPP; - - ret = phy_ethtool_set_wol(dev->phydev, wol); + ret = phylink_ethtool_set_wol(pp->phylink, wol); if (!ret) device_set_wakeup_enable(&dev->dev, !!wol->wolopts); return ret; } +static int mvneta_ethtool_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo) +{ + struct mvneta_port *pp = netdev_priv(dev); + + return phylink_ethtool_get_module_info(pp->phylink, modinfo); +} + +static int mvneta_ethtool_get_module_eeprom(struct net_device *dev, + struct ethtool_eeprom *ee, u8 *buf) +{ + struct mvneta_port *pp = netdev_priv(dev); + + return phylink_ethtool_get_module_eeprom(pp->phylink, ee, buf); +} + +static int mvneta_ethtool_get_eee(struct net_device *dev, + struct ethtool_eee *eee) +{ + struct mvneta_port *pp = netdev_priv(dev); + u32 lpi_ctl0; + + lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); + + eee->eee_enabled = pp->eee_enabled; + eee->eee_active = pp->eee_active; + eee->tx_lpi_enabled = pp->tx_lpi_enabled; + eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale; + + return phylink_ethtool_get_eee(pp->phylink, eee); +} + +static int mvneta_ethtool_set_eee(struct net_device *dev, + struct ethtool_eee *eee) +{ + struct mvneta_port *pp = netdev_priv(dev); + u32 lpi_ctl0; + + /* The Armada 37x documents do not give limits for this other than + * it being an 8-bit register. */ + if (eee->tx_lpi_enabled && + (eee->tx_lpi_timer < 0 || eee->tx_lpi_timer > 255)) + return -EINVAL; + + lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); + lpi_ctl0 &= ~(0xff << 8); + lpi_ctl0 |= eee->tx_lpi_timer << 8; + mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0); + + pp->eee_enabled = eee->eee_enabled; + pp->tx_lpi_enabled = eee->tx_lpi_enabled; + + mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled); + + return phylink_ethtool_set_eee(pp->phylink, eee); +} + static const struct net_device_ops mvneta_netdev_ops = { .ndo_open = mvneta_open, .ndo_stop = mvneta_stop, @@ -3974,13 +4143,15 @@ static const struct net_device_ops mvneta_netdev_ops = { }; static const struct ethtool_ops mvneta_eth_tool_ops = { - .nway_reset = phy_ethtool_nway_reset, + .nway_reset = mvneta_ethtool_nway_reset, .get_link = ethtool_op_get_link, .set_coalesce = mvneta_ethtool_set_coalesce, .get_coalesce = mvneta_ethtool_get_coalesce, .get_drvinfo = mvneta_ethtool_get_drvinfo, .get_ringparam = mvneta_ethtool_get_ringparam, .set_ringparam = mvneta_ethtool_set_ringparam, + .get_pauseparam = mvneta_ethtool_get_pauseparam, + .set_pauseparam = mvneta_ethtool_set_pauseparam, .get_strings = mvneta_ethtool_get_strings, .get_ethtool_stats = mvneta_ethtool_get_stats, .get_sset_count = mvneta_ethtool_get_sset_count, @@ -3988,10 +4159,14 @@ static const struct ethtool_ops mvneta_eth_tool_ops = { .get_rxnfc = mvneta_ethtool_get_rxnfc, .get_rxfh = mvneta_ethtool_get_rxfh, .set_rxfh = mvneta_ethtool_set_rxfh, - .get_link_ksettings = phy_ethtool_get_link_ksettings, + .get_link_ksettings = mvneta_ethtool_get_link_ksettings, .set_link_ksettings = mvneta_ethtool_set_link_ksettings, .get_wol = mvneta_ethtool_get_wol, .set_wol = mvneta_ethtool_set_wol, + .get_module_info = mvneta_ethtool_get_module_info, + .get_module_eeprom = mvneta_ethtool_get_module_eeprom, + .get_eee = mvneta_ethtool_get_eee, + .set_eee = mvneta_ethtool_set_eee, }; /* Initialize hw */ @@ -4091,42 +4266,16 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp, /* Power up the port */ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) { - u32 ctrl; - /* MAC Cause register should be cleared */ mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); - ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2); - - /* Even though it might look weird, when we're configured in - * SGMII or QSGMII mode, the RGMII bit needs to be set. - */ - switch(phy_mode) { - case PHY_INTERFACE_MODE_QSGMII: + if (phy_mode == PHY_INTERFACE_MODE_QSGMII) mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO); - ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; - break; - case PHY_INTERFACE_MODE_SGMII: + else if (phy_mode == PHY_INTERFACE_MODE_SGMII || + phy_mode == PHY_INTERFACE_MODE_1000BASEX) mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); - ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; - break; - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_TXID: - ctrl |= MVNETA_GMAC2_PORT_RGMII; - break; - default: + else if (!phy_interface_mode_is_rgmii(phy_mode)) return -EINVAL; - } - - /* Cancel Port Reset */ - ctrl &= ~MVNETA_GMAC2_PORT_RESET; - mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl); - - while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & - MVNETA_GMAC2_PORT_RESET) != 0) - continue; return 0; } @@ -4136,14 +4285,13 @@ static int mvneta_probe(struct platform_device *pdev) { struct resource *res; struct device_node *dn = pdev->dev.of_node; - struct device_node *phy_node; struct device_node *bm_node; struct mvneta_port *pp; struct net_device *dev; + struct phylink *phylink; const char *dt_mac_addr; char hw_mac_addr[ETH_ALEN]; const char *mac_from; - const char *managed; int tx_csum_limit; int phy_mode; int err; @@ -4159,31 +4307,18 @@ static int mvneta_probe(struct platform_device *pdev) goto err_free_netdev; } - phy_node = of_parse_phandle(dn, "phy", 0); - if (!phy_node) { - if (!of_phy_is_fixed_link(dn)) { - dev_err(&pdev->dev, "no PHY specified\n"); - err = -ENODEV; - goto err_free_irq; - } - - err = of_phy_register_fixed_link(dn); - if (err < 0) { - dev_err(&pdev->dev, "cannot register fixed PHY\n"); - goto err_free_irq; - } - - /* In the case of a fixed PHY, the DT node associated - * to the PHY is the Ethernet MAC DT node. - */ - phy_node = of_node_get(dn); - } - phy_mode = of_get_phy_mode(dn); if (phy_mode < 0) { dev_err(&pdev->dev, "incorrect phy-mode\n"); err = -EINVAL; - goto err_put_phy_node; + goto err_free_irq; + } + + phylink = phylink_create(dev, pdev->dev.fwnode, phy_mode, + &mvneta_phylink_ops); + if (IS_ERR(phylink)) { + err = PTR_ERR(phylink); + goto err_free_irq; } dev->tx_queue_len = MVNETA_MAX_TXD; @@ -4194,12 +4329,9 @@ static int mvneta_probe(struct platform_device *pdev) pp = netdev_priv(dev); spin_lock_init(&pp->lock); - pp->phy_node = phy_node; + pp->phylink = phylink; pp->phy_interface = phy_mode; - - err = of_property_read_string(dn, "managed", &managed); - pp->use_inband_status = (err == 0 && - strcmp(managed, "in-band-status") == 0); + pp->dn = dn; pp->rxq_def = rxq_def; @@ -4221,7 +4353,7 @@ static int mvneta_probe(struct platform_device *pdev) pp->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pp->clk)) { err = PTR_ERR(pp->clk); - goto err_put_phy_node; + goto err_free_phylink; } clk_prepare_enable(pp->clk); @@ -4358,14 +4490,6 @@ static int mvneta_probe(struct platform_device *pdev) platform_set_drvdata(pdev, pp->dev); - if (pp->use_inband_status) { - struct phy_device *phy = of_phy_find_device(dn); - - mvneta_fixed_link_update(pp, phy); - - put_device(&phy->mdio.dev); - } - return 0; err_netdev: @@ -4382,10 +4506,9 @@ err_free_ports: err_clk: clk_disable_unprepare(pp->clk_bus); clk_disable_unprepare(pp->clk); -err_put_phy_node: - of_node_put(phy_node); - if (of_phy_is_fixed_link(dn)) - of_phy_deregister_fixed_link(dn); +err_free_phylink: + if (pp->phylink) + phylink_destroy(pp->phylink); err_free_irq: irq_dispose_mapping(dev->irq); err_free_netdev: @@ -4397,7 +4520,6 @@ err_free_netdev: static int mvneta_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); - struct device_node *dn = pdev->dev.of_node; struct mvneta_port *pp = netdev_priv(dev); unregister_netdev(dev); @@ -4405,10 +4527,8 @@ static int mvneta_remove(struct platform_device *pdev) clk_disable_unprepare(pp->clk); free_percpu(pp->ports); free_percpu(pp->stats); - if (of_phy_is_fixed_link(dn)) - of_phy_deregister_fixed_link(dn); irq_dispose_mapping(dev->irq); - of_node_put(pp->phy_node); + phylink_destroy(pp->phylink); free_netdev(dev); if (pp->bm_priv) { @@ -4426,8 +4546,10 @@ static int mvneta_suspend(struct device *device) struct net_device *dev = dev_get_drvdata(device); struct mvneta_port *pp = netdev_priv(dev); + rtnl_lock(); if (netif_running(dev)) mvneta_stop(dev); + rtnl_unlock(); netif_device_detach(dev); clk_disable_unprepare(pp->clk_bus); clk_disable_unprepare(pp->clk); @@ -4460,14 +4582,13 @@ static int mvneta_resume(struct device *device) return err; } - if (pp->use_inband_status) - mvneta_fixed_link_update(pp, dev->phydev); - netif_device_attach(dev); + rtnl_lock(); if (netif_running(dev)) { mvneta_open(dev); mvneta_set_rx_mode(dev); } + rtnl_unlock(); return 0; } diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 634b2f41cc9e..a1d7b88cf083 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -10,6 +10,7 @@ * warranty of any kind, whether express or implied. */ +#include <linux/acpi.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> @@ -454,11 +455,11 @@ /* Various constants */ /* Coalescing */ -#define MVPP2_TXDONE_COAL_PKTS_THRESH 15 +#define MVPP2_TXDONE_COAL_PKTS_THRESH 64 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL #define MVPP2_TXDONE_COAL_USEC 1000 #define MVPP2_RX_COAL_PKTS 32 -#define MVPP2_RX_COAL_USEC 100 +#define MVPP2_RX_COAL_USEC 64 /* The two bytes Marvell header. Either contains a special value used * by Marvell switches when a specific hardware mode is enabled (not @@ -504,10 +505,12 @@ #define MVPP2_DEFAULT_RXQ 4 /* Max number of Rx descriptors */ -#define MVPP2_MAX_RXD 128 +#define MVPP2_MAX_RXD_MAX 1024 +#define MVPP2_MAX_RXD_DFLT 128 /* Max number of Tx descriptors */ -#define MVPP2_MAX_TXD 1024 +#define MVPP2_MAX_TXD_MAX 2048 +#define MVPP2_MAX_TXD_DFLT 1024 /* Amount of Tx descriptors that can be reserved at once by CPU */ #define MVPP2_CPU_DESC_CHUNK 64 @@ -863,7 +866,7 @@ struct mvpp2 { /* List of pointers to port structures */ int port_count; - struct mvpp2_port **port_list; + struct mvpp2_port *port_list[MVPP2_MAX_PORTS]; /* Aggregated TXQs */ struct mvpp2_tx_queue *aggr_txqs; @@ -930,6 +933,9 @@ struct mvpp2_port { struct mvpp2 *priv; + /* Firmware node associated to the port */ + struct fwnode_handle *fwnode; + /* Per-port registers' base address */ void __iomem *base; void __iomem *stats_base; @@ -5802,6 +5808,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port, txq_pcpu->reserved_num = 0; txq_pcpu->txq_put_index = 0; txq_pcpu->txq_get_index = 0; + txq_pcpu->tso_headers = NULL; txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS; txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2; @@ -5829,10 +5836,13 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port, txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); kfree(txq_pcpu->buffs); - dma_free_coherent(port->dev->dev.parent, - txq_pcpu->size * TSO_HEADER_SIZE, - txq_pcpu->tso_headers, - txq_pcpu->tso_headers_dma); + if (txq_pcpu->tso_headers) + dma_free_coherent(port->dev->dev.parent, + txq_pcpu->size * TSO_HEADER_SIZE, + txq_pcpu->tso_headers, + txq_pcpu->tso_headers_dma); + + txq_pcpu->tso_headers = NULL; } if (txq->descs) @@ -6832,13 +6842,13 @@ static int mvpp2_check_ringparam_valid(struct net_device *dev, if (ring->rx_pending == 0 || ring->tx_pending == 0) return -EINVAL; - if (ring->rx_pending > MVPP2_MAX_RXD) - new_rx_pending = MVPP2_MAX_RXD; + if (ring->rx_pending > MVPP2_MAX_RXD_MAX) + new_rx_pending = MVPP2_MAX_RXD_MAX; else if (!IS_ALIGNED(ring->rx_pending, 16)) new_rx_pending = ALIGN(ring->rx_pending, 16); - if (ring->tx_pending > MVPP2_MAX_TXD) - new_tx_pending = MVPP2_MAX_TXD; + if (ring->tx_pending > MVPP2_MAX_TXD_MAX) + new_tx_pending = MVPP2_MAX_TXD_MAX; else if (!IS_ALIGNED(ring->tx_pending, 32)) new_tx_pending = ALIGN(ring->tx_pending, 32); @@ -7318,9 +7328,10 @@ static int mvpp2_ethtool_get_coalesce(struct net_device *dev, { struct mvpp2_port *port = netdev_priv(dev); - c->rx_coalesce_usecs = port->rxqs[0]->time_coal; - c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal; - c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal; + c->rx_coalesce_usecs = port->rxqs[0]->time_coal; + c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal; + c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal; + c->tx_coalesce_usecs = port->tx_time_coal; return 0; } @@ -7340,8 +7351,8 @@ static void mvpp2_ethtool_get_ringparam(struct net_device *dev, { struct mvpp2_port *port = netdev_priv(dev); - ring->rx_max_pending = MVPP2_MAX_RXD; - ring->tx_max_pending = MVPP2_MAX_TXD; + ring->rx_max_pending = MVPP2_MAX_RXD_MAX; + ring->tx_max_pending = MVPP2_MAX_TXD_MAX; ring->rx_pending = port->rx_ring_size; ring->tx_pending = port->tx_ring_size; } @@ -7492,7 +7503,10 @@ static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, strncpy(irqname, "rx-shared", sizeof(irqname)); } - v->irq = of_irq_get_byname(port_node, irqname); + if (port_node) + v->irq = of_irq_get_byname(port_node, irqname); + else + v->irq = fwnode_irq_get(port->fwnode, i); if (v->irq <= 0) { ret = -EINVAL; goto err; @@ -7704,17 +7718,16 @@ static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv, } static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv, - struct device_node *port_node, + struct fwnode_handle *fwnode, char **mac_from) { struct mvpp2_port *port = netdev_priv(dev); char hw_mac_addr[ETH_ALEN] = {0}; - const char *dt_mac_addr; + char fw_mac_addr[ETH_ALEN]; - dt_mac_addr = of_get_mac_address(port_node); - if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) { - *mac_from = "device tree"; - ether_addr_copy(dev->dev_addr, dt_mac_addr); + if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) { + *mac_from = "firmware node"; + ether_addr_copy(dev->dev_addr, fw_mac_addr); return; } @@ -7733,13 +7746,14 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv, /* Ports initialization */ static int mvpp2_port_probe(struct platform_device *pdev, - struct device_node *port_node, - struct mvpp2 *priv, int index) + struct fwnode_handle *port_fwnode, + struct mvpp2 *priv) { struct device_node *phy_node; - struct phy *comphy; + struct phy *comphy = NULL; struct mvpp2_port *port; struct mvpp2_port_pcpu *port_pcpu; + struct device_node *port_node = to_of_node(port_fwnode); struct net_device *dev; struct resource *res; char *mac_from = ""; @@ -7750,7 +7764,12 @@ static int mvpp2_port_probe(struct platform_device *pdev, int phy_mode; int err, i, cpu; - has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node); + if (port_node) { + has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node); + } else { + has_tx_irqs = true; + queue_mode = MVPP2_QDIST_MULTI_MODE; + } if (!has_tx_irqs) queue_mode = MVPP2_QDIST_SINGLE_MODE; @@ -7765,36 +7784,43 @@ static int mvpp2_port_probe(struct platform_device *pdev, if (!dev) return -ENOMEM; - phy_node = of_parse_phandle(port_node, "phy", 0); - phy_mode = of_get_phy_mode(port_node); + if (port_node) + phy_node = of_parse_phandle(port_node, "phy", 0); + else + phy_node = NULL; + + phy_mode = fwnode_get_phy_mode(port_fwnode); if (phy_mode < 0) { dev_err(&pdev->dev, "incorrect phy mode\n"); err = phy_mode; goto err_free_netdev; } - comphy = devm_of_phy_get(&pdev->dev, port_node, NULL); - if (IS_ERR(comphy)) { - if (PTR_ERR(comphy) == -EPROBE_DEFER) { - err = -EPROBE_DEFER; - goto err_free_netdev; + if (port_node) { + comphy = devm_of_phy_get(&pdev->dev, port_node, NULL); + if (IS_ERR(comphy)) { + if (PTR_ERR(comphy) == -EPROBE_DEFER) { + err = -EPROBE_DEFER; + goto err_free_netdev; + } + comphy = NULL; } - comphy = NULL; } - if (of_property_read_u32(port_node, "port-id", &id)) { + if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) { err = -EINVAL; dev_err(&pdev->dev, "missing port-id value\n"); goto err_free_netdev; } - dev->tx_queue_len = MVPP2_MAX_TXD; + dev->tx_queue_len = MVPP2_MAX_TXD_MAX; dev->watchdog_timeo = 5 * HZ; dev->netdev_ops = &mvpp2_netdev_ops; dev->ethtool_ops = &mvpp2_eth_tool_ops; port = netdev_priv(dev); port->dev = dev; + port->fwnode = port_fwnode; port->ntxqs = ntxqs; port->nrxqs = nrxqs; port->priv = priv; @@ -7804,7 +7830,10 @@ static int mvpp2_port_probe(struct platform_device *pdev, if (err) goto err_free_netdev; - port->link_irq = of_irq_get_byname(port_node, "link"); + if (port_node) + port->link_irq = of_irq_get_byname(port_node, "link"); + else + port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1); if (port->link_irq == -EPROBE_DEFER) { err = -EPROBE_DEFER; goto err_deinit_qvecs; @@ -7813,7 +7842,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, /* the link irq is optional */ port->link_irq = 0; - if (of_property_read_bool(port_node, "marvell,loopback")) + if (fwnode_property_read_bool(port_fwnode, "marvell,loopback")) port->flags |= MVPP2_F_LOOPBACK; port->id = id; @@ -7838,8 +7867,8 @@ static int mvpp2_port_probe(struct platform_device *pdev, MVPP21_MIB_COUNTERS_OFFSET + port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ; } else { - if (of_property_read_u32(port_node, "gop-port-id", - &port->gop_id)) { + if (fwnode_property_read_u32(port_fwnode, "gop-port-id", + &port->gop_id)) { err = -EINVAL; dev_err(&pdev->dev, "missing gop-port-id value\n"); goto err_deinit_qvecs; @@ -7869,10 +7898,10 @@ static int mvpp2_port_probe(struct platform_device *pdev, mutex_init(&port->gather_stats_lock); INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics); - mvpp2_port_copy_mac_addr(dev, priv, port_node, &mac_from); + mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from); - port->tx_ring_size = MVPP2_MAX_TXD; - port->rx_ring_size = MVPP2_MAX_RXD; + port->tx_ring_size = MVPP2_MAX_TXD_DFLT; + port->rx_ring_size = MVPP2_MAX_RXD_DFLT; SET_NETDEV_DEV(dev, &pdev->dev); err = mvpp2_port_init(port); @@ -7927,7 +7956,8 @@ static int mvpp2_port_probe(struct platform_device *pdev, } netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); - priv->port_list[index] = port; + priv->port_list[priv->port_count++] = port; + return 0; err_free_port_pcpu: @@ -8186,8 +8216,9 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) static int mvpp2_probe(struct platform_device *pdev) { - struct device_node *dn = pdev->dev.of_node; - struct device_node *port_node; + const struct acpi_device_id *acpi_id; + struct fwnode_handle *fwnode = pdev->dev.fwnode; + struct fwnode_handle *port_fwnode; struct mvpp2 *priv; struct resource *res; void __iomem *base; @@ -8198,8 +8229,14 @@ static int mvpp2_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - priv->hw_version = - (unsigned long)of_device_get_match_data(&pdev->dev); + if (has_acpi_companion(&pdev->dev)) { + acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table, + &pdev->dev); + priv->hw_version = (unsigned long)acpi_id->driver_data; + } else { + priv->hw_version = + (unsigned long)of_device_get_match_data(&pdev->dev); + } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&pdev->dev, res); @@ -8213,10 +8250,23 @@ static int mvpp2_probe(struct platform_device *pdev) return PTR_ERR(priv->lms_base); } else { res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (has_acpi_companion(&pdev->dev)) { + /* In case the MDIO memory region is declared in + * the ACPI, it can already appear as 'in-use' + * in the OS. Because it is overlapped by second + * region of the network controller, make + * sure it is released, before requesting it again. + * The care is taken by mvpp2 driver to avoid + * concurrent access to this memory region. + */ + release_resource(res); + } priv->iface_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->iface_base)) return PTR_ERR(priv->iface_base); + } + if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) { priv->sysctrl_base = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "marvell,system-controller"); @@ -8242,32 +8292,34 @@ static int mvpp2_probe(struct platform_device *pdev) else priv->max_port_rxqs = 32; - priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk"); - if (IS_ERR(priv->pp_clk)) - return PTR_ERR(priv->pp_clk); - err = clk_prepare_enable(priv->pp_clk); - if (err < 0) - return err; - - priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk"); - if (IS_ERR(priv->gop_clk)) { - err = PTR_ERR(priv->gop_clk); - goto err_pp_clk; - } - err = clk_prepare_enable(priv->gop_clk); - if (err < 0) - goto err_pp_clk; + if (dev_of_node(&pdev->dev)) { + priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk"); + if (IS_ERR(priv->pp_clk)) + return PTR_ERR(priv->pp_clk); + err = clk_prepare_enable(priv->pp_clk); + if (err < 0) + return err; - if (priv->hw_version == MVPP22) { - priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk"); - if (IS_ERR(priv->mg_clk)) { - err = PTR_ERR(priv->mg_clk); - goto err_gop_clk; + priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk"); + if (IS_ERR(priv->gop_clk)) { + err = PTR_ERR(priv->gop_clk); + goto err_pp_clk; } - - err = clk_prepare_enable(priv->mg_clk); + err = clk_prepare_enable(priv->gop_clk); if (err < 0) - goto err_gop_clk; + goto err_pp_clk; + + if (priv->hw_version == MVPP22) { + priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk"); + if (IS_ERR(priv->mg_clk)) { + err = PTR_ERR(priv->mg_clk); + goto err_gop_clk; + } + + err = clk_prepare_enable(priv->mg_clk); + if (err < 0) + goto err_gop_clk; + } priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk"); if (IS_ERR(priv->axi_clk)) { @@ -8280,10 +8332,14 @@ static int mvpp2_probe(struct platform_device *pdev) if (err < 0) goto err_gop_clk; } - } - /* Get system's tclk rate */ - priv->tclk = clk_get_rate(priv->pp_clk); + /* Get system's tclk rate */ + priv->tclk = clk_get_rate(priv->pp_clk); + } else if (device_property_read_u32(&pdev->dev, "clock-frequency", + &priv->tclk)) { + dev_err(&pdev->dev, "missing clock-frequency value\n"); + return -EINVAL; + } if (priv->hw_version == MVPP22) { err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); @@ -8306,30 +8362,19 @@ static int mvpp2_probe(struct platform_device *pdev) goto err_mg_clk; } - priv->port_count = of_get_available_child_count(dn); + /* Initialize ports */ + fwnode_for_each_available_child_node(fwnode, port_fwnode) { + err = mvpp2_port_probe(pdev, port_fwnode, priv); + if (err < 0) + goto err_port_probe; + } + if (priv->port_count == 0) { dev_err(&pdev->dev, "no ports enabled\n"); err = -ENODEV; goto err_mg_clk; } - priv->port_list = devm_kcalloc(&pdev->dev, priv->port_count, - sizeof(*priv->port_list), - GFP_KERNEL); - if (!priv->port_list) { - err = -ENOMEM; - goto err_mg_clk; - } - - /* Initialize ports */ - i = 0; - for_each_available_child_of_node(dn, port_node) { - err = mvpp2_port_probe(pdev, port_node, priv, i); - if (err < 0) - goto err_port_probe; - i++; - } - /* Statistics must be gathered regularly because some of them (like * packets counters) are 32-bit registers and could overflow quite * quickly. For instance, a 10Gb link used at full bandwidth with the @@ -8350,7 +8395,7 @@ static int mvpp2_probe(struct platform_device *pdev) err_port_probe: i = 0; - for_each_available_child_of_node(dn, port_node) { + fwnode_for_each_available_child_node(fwnode, port_fwnode) { if (priv->port_list[i]) mvpp2_port_remove(priv->port_list[i]); i++; @@ -8369,14 +8414,14 @@ err_pp_clk: static int mvpp2_remove(struct platform_device *pdev) { struct mvpp2 *priv = platform_get_drvdata(pdev); - struct device_node *dn = pdev->dev.of_node; - struct device_node *port_node; + struct fwnode_handle *fwnode = pdev->dev.fwnode; + struct fwnode_handle *port_fwnode; int i = 0; flush_workqueue(priv->stats_queue); destroy_workqueue(priv->stats_queue); - for_each_available_child_of_node(dn, port_node) { + fwnode_for_each_available_child_node(fwnode, port_fwnode) { if (priv->port_list[i]) { mutex_destroy(&priv->port_list[i]->gather_stats_lock); mvpp2_port_remove(priv->port_list[i]); @@ -8399,6 +8444,9 @@ static int mvpp2_remove(struct platform_device *pdev) aggr_txq->descs_dma); } + if (is_acpi_node(port_fwnode)) + return 0; + clk_disable_unprepare(priv->axi_clk); clk_disable_unprepare(priv->mg_clk); clk_disable_unprepare(priv->pp_clk); @@ -8420,12 +8468,19 @@ static const struct of_device_id mvpp2_match[] = { }; MODULE_DEVICE_TABLE(of, mvpp2_match); +static const struct acpi_device_id mvpp2_acpi_match[] = { + { "MRVL0110", MVPP22 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match); + static struct platform_driver mvpp2_driver = { .probe = mvpp2_probe, .remove = mvpp2_remove, .driver = { .name = MVPP2_DRIVER_NAME, .of_match_table = mvpp2_match, + .acpi_match_table = ACPI_PTR(mvpp2_acpi_match), }, }; diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 9efe1771423c..9fe85300e7b6 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4287,7 +4287,7 @@ static int sky2_vpd_wait(const struct sky2_hw *hw, int cap, u16 busy) dev_err(&hw->pdev->dev, "VPD cycle timed out\n"); return -ETIMEDOUT; } - mdelay(1); + msleep(1); } return 0; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index fc67e35b253e..29826dd15204 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1952,14 +1952,16 @@ static int mtk_hw_init(struct mtk_eth *eth) } regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); - /* Set GE2 driving and slew rate */ - regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00); + if (eth->pctl) { + /* Set GE2 driving and slew rate */ + regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00); - /* set GE2 TDSEL */ - regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5); + /* set GE2 TDSEL */ + regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5); - /* set GE2 TUNE */ - regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0); + /* set GE2 TUNE */ + regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0); + } /* Set linkdown as the default for each GMAC. Its own MCR would be set * up with the more appropriate value when mtk_phy_link_adjust call is @@ -2538,11 +2540,13 @@ static int mtk_probe(struct platform_device *pdev) } } - eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, - "mediatek,pctl"); - if (IS_ERR(eth->pctl)) { - dev_err(&pdev->dev, "no pctl regmap found\n"); - return PTR_ERR(eth->pctl); + if (eth->soc->required_pctl) { + eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "mediatek,pctl"); + if (IS_ERR(eth->pctl)) { + dev_err(&pdev->dev, "no pctl regmap found\n"); + return PTR_ERR(eth->pctl); + } } for (i = 0; i < 3; i++) { @@ -2668,17 +2672,20 @@ static int mtk_remove(struct platform_device *pdev) static const struct mtk_soc_data mt2701_data = { .caps = MTK_GMAC1_TRGMII, - .required_clks = MT7623_CLKS_BITMAP + .required_clks = MT7623_CLKS_BITMAP, + .required_pctl = true, }; static const struct mtk_soc_data mt7622_data = { .caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW, - .required_clks = MT7622_CLKS_BITMAP + .required_clks = MT7622_CLKS_BITMAP, + .required_pctl = false, }; static const struct mtk_soc_data mt7623_data = { .caps = MTK_GMAC1_TRGMII, - .required_clks = MT7623_CLKS_BITMAP + .required_clks = MT7623_CLKS_BITMAP, + .required_pctl = true, }; const struct of_device_id of_mtk_match[] = { diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index a3af4660de81..672b8c353c47 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -573,10 +573,13 @@ struct mtk_rx_ring { * @caps Flags shown the extra capability for the SoC * @required_clks Flags shown the bitmap for required clocks on * the target SoC + * @required_pctl A bool value to show whether the SoC requires + * the extra setup for those pins used by GMAC. */ struct mtk_soc_data { u32 caps; u32 required_clks; + bool required_pctl; }; /* currently no SoC has more than 2 macs */ diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c index 5f41dc92aa68..1a0c3bf86ead 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c @@ -310,6 +310,7 @@ static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets) } switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_VENDOR: case IEEE_8021QAZ_TSA_STRICT: break; case IEEE_8021QAZ_TSA_ETS: @@ -347,6 +348,10 @@ static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv, /* higher TC means higher priority => lower pg */ for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) { switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_VENDOR: + pg[i] = MLX4_EN_TC_VENDOR; + tc_tx_bw[i] = MLX4_EN_BW_MAX; + break; case IEEE_8021QAZ_TSA_STRICT: pg[i] = num_strict++; tc_tx_bw[i] = MLX4_EN_BW_MAX; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index bf1f04164885..ebc1f566a4d9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -1094,12 +1094,21 @@ static int mlx4_en_set_ringparam(struct net_device *dev, if (param->rx_jumbo_pending || param->rx_mini_pending) return -EINVAL; + if (param->rx_pending < MLX4_EN_MIN_RX_SIZE) { + en_warn(priv, "%s: rx_pending (%d) < min (%d)\n", + __func__, param->rx_pending, + MLX4_EN_MIN_RX_SIZE); + return -EINVAL; + } + if (param->tx_pending < MLX4_EN_MIN_TX_SIZE) { + en_warn(priv, "%s: tx_pending (%d) < min (%lu)\n", + __func__, param->tx_pending, + MLX4_EN_MIN_TX_SIZE); + return -EINVAL; + } + rx_size = roundup_pow_of_two(param->rx_pending); - rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE); - rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE); tx_size = roundup_pow_of_two(param->tx_pending); - tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE); - tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE); if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size) && diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 99051a294fa6..8fc51bc29003 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2172,8 +2172,9 @@ static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], prof->rx_ring_size, priv->stride, - node)) + node, i)) goto err; + } #ifdef CONFIG_RFS_ACCEL @@ -3336,6 +3337,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->msg_enable = MLX4_EN_MSG_LEVEL; #ifdef CONFIG_MLX4_EN_DCB if (!mlx4_is_slave(priv->mdev->dev)) { + u8 prio; + + for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; ++prio) { + priv->ets.prio_tc[prio] = prio; + priv->ets.tc_tsa[prio] = IEEE_8021QAZ_TSA_VENDOR; + } + priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; priv->flags |= MLX4_EN_DCB_ENABLED; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 85e28efcda33..b4d144e67514 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -262,7 +262,7 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev) int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring **pring, - u32 size, u16 stride, int node) + u32 size, u16 stride, int node, int queue_index) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_rx_ring *ring; @@ -286,6 +286,9 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, ring->log_stride = ffs(ring->stride) - 1; ring->buf_size = ring->size * ring->stride + TXBB_SIZE; + if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index) < 0) + goto err_ring; + tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * sizeof(struct mlx4_en_rx_alloc)); ring->rx_info = vzalloc_node(tmp, node); @@ -293,7 +296,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, ring->rx_info = vzalloc(tmp); if (!ring->rx_info) { err = -ENOMEM; - goto err_ring; + goto err_xdp_info; } } @@ -317,6 +320,8 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, err_info: vfree(ring->rx_info); ring->rx_info = NULL; +err_xdp_info: + xdp_rxq_info_unreg(&ring->xdp_rxq); err_ring: kfree(ring); *pring = NULL; @@ -440,6 +445,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, lockdep_is_held(&mdev->state_lock)); if (old_prog) bpf_prog_put(old_prog); + xdp_rxq_info_unreg(&ring->xdp_rxq); mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE); vfree(ring->rx_info); ring->rx_info = NULL; @@ -617,6 +623,10 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, return 0; } #endif + +/* We reach this function only after checking that any of + * the (IPv4 | IPv6) bits are set in cqe->status. + */ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, netdev_features_t dev_features) { @@ -632,13 +642,11 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, hdr += sizeof(struct vlan_hdr); } - if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4)) - return get_fixed_ipv4_csum(hw_checksum, skb, hdr); #if IS_ENABLED(CONFIG_IPV6) if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) return get_fixed_ipv6_csum(hw_checksum, skb, hdr); #endif - return 0; + return get_fixed_ipv4_csum(hw_checksum, skb, hdr); } int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) @@ -650,6 +658,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud int cq_ring = cq->ring; bool doorbell_pending; struct mlx4_cqe *cqe; + struct xdp_buff xdp; int polled = 0; int index; @@ -664,6 +673,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud /* Protect accesses to: ring->xdp_prog, priv->mac_hash list */ rcu_read_lock(); xdp_prog = rcu_dereference(ring->xdp_prog); + xdp.rxq = &ring->xdp_rxq; doorbell_pending = 0; /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx @@ -748,7 +758,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud * read bytes but not past the end of the frag. */ if (xdp_prog) { - struct xdp_buff xdp; dma_addr_t dma; void *orig_data; u32 act; @@ -814,33 +823,33 @@ xdp_drop_no_cnt: if (likely(dev->features & NETIF_F_RXCSUM)) { if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | MLX4_CQE_STATUS_UDP)) { - if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && - cqe->checksum == cpu_to_be16(0xffff)) { - bool l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) && - (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL)); - - ip_summed = CHECKSUM_UNNECESSARY; - hash_type = PKT_HASH_TYPE_L4; - if (l2_tunnel) - skb->csum_level = 1; - ring->csum_ok++; - } else { + bool l2_tunnel; + + if (!((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && + cqe->checksum == cpu_to_be16(0xffff))) goto csum_none; - } + + l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) && + (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL)); + ip_summed = CHECKSUM_UNNECESSARY; + hash_type = PKT_HASH_TYPE_L4; + if (l2_tunnel) + skb->csum_level = 1; + ring->csum_ok++; } else { - if (priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP && - (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | - MLX4_CQE_STATUS_IPV6))) { - if (check_csum(cqe, skb, va, dev->features)) { - goto csum_none; - } else { - ip_summed = CHECKSUM_COMPLETE; - hash_type = PKT_HASH_TYPE_L3; - ring->csum_complete++; - } - } else { + if (!(priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP && + (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | +#if IS_ENABLED(CONFIG_IPV6) + MLX4_CQE_STATUS_IPV6)))) +#else + 0)))) +#endif goto csum_none; - } + if (check_csum(cqe, skb, va, dev->features)) + goto csum_none; + ip_summed = CHECKSUM_COMPLETE; + hash_type = PKT_HASH_TYPE_L3; + ring->csum_complete++; } } else { csum_none: diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 2b72677eccd4..f470ae37d937 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -46,6 +46,7 @@ #endif #include <linux/cpu_rmap.h> #include <linux/ptp_clock_kernel.h> +#include <net/xdp.h> #include <linux/mlx4/device.h> #include <linux/mlx4/qp.h> @@ -356,6 +357,7 @@ struct mlx4_en_rx_ring { unsigned long dropped; int hwtstamp_rx_filter; cpumask_var_t affinity_mask; + struct xdp_rxq_info xdp_rxq; }; struct mlx4_en_cq { @@ -479,6 +481,7 @@ struct mlx4_en_frag_info { #define MLX4_EN_BW_MIN 1 #define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */ +#define MLX4_EN_TC_VENDOR 0 #define MLX4_EN_TC_ETS 7 enum dcb_pfc_type { @@ -719,7 +722,7 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev); void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv); int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring **pring, - u32 size, u16 stride, int node); + u32 size, u16 stride, int node, int queue_index); void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring **pring, u32 size, u16 stride); diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index c7c0764991c9..2e84f10f59ba 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -1103,30 +1103,16 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_enable); void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u32 *lkey, u32 *rkey) { - struct mlx4_cmd_mailbox *mailbox; - int err; - if (!fmr->maps) return; - fmr->maps = 0; + /* To unmap: it is sufficient to take back ownership from HW */ + *(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW; - mailbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(mailbox)) { - err = PTR_ERR(mailbox); - pr_warn("mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n", err); - return; - } + /* Make sure MPT status is visible */ + wmb(); - err = mlx4_HW2SW_MPT(dev, NULL, - key_to_hw_index(fmr->mr.key) & - (dev->caps.num_mpts - 1)); - mlx4_free_cmd_mailbox(dev, mailbox); - if (err) { - pr_warn("mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", err); - return; - } - fmr->mr.enabled = MLX4_MPT_EN_SW; + fmr->maps = 0; } EXPORT_SYMBOL_GPL(mlx4_fmr_unmap); @@ -1136,6 +1122,22 @@ int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr) if (fmr->maps) return -EBUSY; + if (fmr->mr.enabled == MLX4_MPT_EN_HW) { + /* In case of FMR was enabled and unmapped + * make sure to give ownership of MPT back to HW + * so HW2SW_MPT command will success. + */ + *(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW; + /* Make sure MPT status is visible before changing MPT fields */ + wmb(); + fmr->mpt->length = 0; + fmr->mpt->start = 0; + /* Make sure MPT data is visible after changing MPT status */ + wmb(); + *(u8 *)fmr->mpt = MLX4_MPT_STATUS_HW; + /* make sure MPT status is visible */ + wmb(); + } ret = mlx4_mr_free(dev, &fmr->mr); if (ret) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 19b21b40ab07..c805769d92a9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -14,7 +14,7 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \ fpga/ipsec.o mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ - en_tx.o en_rx.o en_rx_am.o en_txrx.o en_stats.o vxlan.o \ + en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \ en_arfs.o en_fs_ethtool.o en_selftest.o mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index c2d89bfa1a70..4c9360b25532 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -44,8 +44,11 @@ #include <linux/mlx5/port.h> #include <linux/mlx5/vport.h> #include <linux/mlx5/transobj.h> +#include <linux/mlx5/fs.h> #include <linux/rhashtable.h> #include <net/switchdev.h> +#include <net/xdp.h> +#include <linux/net_dim.h> #include "wq.h" #include "mlx5_core.h" #include "en_stats.h" @@ -226,12 +229,6 @@ enum mlx5e_priv_flag { #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ #endif -struct mlx5e_cq_moder { - u16 usec; - u16 pkts; - u8 cq_period_mode; -}; - struct mlx5e_params { u8 log_sq_size; u8 rq_wq_type; @@ -242,8 +239,8 @@ struct mlx5e_params { u16 num_channels; u8 num_tc; bool rx_cqe_compress_def; - struct mlx5e_cq_moder rx_cq_moderation; - struct mlx5e_cq_moder tx_cq_moderation; + struct net_dim_cq_moder rx_cq_moderation; + struct net_dim_cq_moder tx_cq_moderation; bool lro_en; u32 lro_wqe_sz; u16 tx_max_inline; @@ -253,7 +250,7 @@ struct mlx5e_params { u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE]; bool vlan_strip_disable; bool scatter_fcs_en; - bool rx_am_enabled; + bool rx_dim_enabled; u32 lro_timeout; u32 pflags; struct bpf_prog *xdp_prog; @@ -472,32 +469,6 @@ struct mlx5e_mpw_info { u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE]; }; -struct mlx5e_rx_am_stats { - int ppms; /* packets per msec */ - int bpms; /* bytes per msec */ - int epms; /* events per msec */ -}; - -struct mlx5e_rx_am_sample { - ktime_t time; - u32 pkt_ctr; - u32 byte_ctr; - u16 event_ctr; -}; - -struct mlx5e_rx_am { /* Adaptive Moderation */ - u8 state; - struct mlx5e_rx_am_stats prev_stats; - struct mlx5e_rx_am_sample start_sample; - struct work_struct work; - u8 profile_ix; - u8 mode; - u8 tune_state; - u8 steps_right; - u8 steps_left; - u8 tired; -}; - /* a single cache unit is capable to serve one napi call (for non-striding rq) * or a MPWQE (for striding rq). */ @@ -558,7 +529,7 @@ struct mlx5e_rq { unsigned long state; int ix; - struct mlx5e_rx_am am; /* Adaptive Moderation */ + struct net_dim dim; /* Dynamic Interrupt Moderation */ /* XDP */ struct bpf_prog *xdp_prog; @@ -571,6 +542,9 @@ struct mlx5e_rq { u32 rqn; struct mlx5_core_dev *mdev; struct mlx5_core_mkey umr_mkey; + + /* XDP read-mostly */ + struct xdp_rxq_info xdp_rxq; } ____cacheline_aligned_in_smp; struct mlx5e_channel { @@ -587,6 +561,7 @@ struct mlx5e_channel { /* data path - accessed per napi poll */ struct irq_desc *irq_desc; + struct mlx5e_ch_stats stats; /* control */ struct mlx5e_priv *priv; @@ -655,6 +630,7 @@ struct mlx5e_tc_table { struct rhashtable ht; DECLARE_HASHTABLE(mod_hdr_tbl, 8); + DECLARE_HASHTABLE(hairpin_tbl, 8); }; struct mlx5e_vlan_table { @@ -722,6 +698,11 @@ enum { MLX5E_ARFS_FT_LEVEL }; +enum { + MLX5E_TC_FT_LEVEL = 0, + MLX5E_TC_TTC_FT_LEVEL, +}; + struct mlx5e_ethtool_table { struct mlx5_flow_table *ft; int num_rules; @@ -860,11 +841,7 @@ void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix); void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix); void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi); -void mlx5e_rx_am(struct mlx5e_rq *rq); -void mlx5e_rx_am_work(struct work_struct *work); -struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode); - -void mlx5e_update_stats(struct mlx5e_priv *priv, bool full); +void mlx5e_update_stats(struct mlx5e_priv *priv); int mlx5e_create_flow_steering(struct mlx5e_priv *priv); void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv); @@ -1054,11 +1031,26 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv); void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv); void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt); -int mlx5e_create_ttc_table(struct mlx5e_priv *priv); -void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv); +struct ttc_params { + struct mlx5_flow_table_attr ft_attr; + u32 any_tt_tirn; + u32 indir_tirn[MLX5E_NUM_INDIR_TIRS]; + struct mlx5e_ttc_table *inner_ttc; +}; + +void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, struct ttc_params *ttc_params); +void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params); +void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params); -int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv); -void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv); +int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params, + struct mlx5e_ttc_table *ttc); +void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv, + struct mlx5e_ttc_table *ttc); + +int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params, + struct mlx5e_ttc_table *ttc); +void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv, + struct mlx5e_ttc_table *ttc); int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc, u32 underlay_qpn, u32 *tisn); @@ -1071,6 +1063,8 @@ int mlx5e_open(struct net_device *netdev); void mlx5e_update_stats_work(struct work_struct *work); u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout); +int mlx5e_bits_invert(unsigned long a, int size); + /* ethtool helpers */ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv, struct ethtool_drvinfo *drvinfo); @@ -1110,4 +1104,5 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params, u16 max_channels); u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev); +void mlx5e_rx_dim_work(struct work_struct *work); #endif /* __MLX5_EN_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c new file mode 100644 index 000000000000..602851ab5b14 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/net_dim.h> +#include "en.h" + +void mlx5e_rx_dim_work(struct work_struct *work) +{ + struct net_dim *dim = container_of(work, struct net_dim, + work); + struct mlx5e_rq *rq = container_of(dim, struct mlx5e_rq, dim); + struct net_dim_cq_moder cur_profile = net_dim_get_profile(dim->mode, + dim->profile_ix); + + mlx5_core_modify_cq_moderation(rq->mdev, &rq->cq.mcq, + cur_profile.usec, cur_profile.pkts); + + dim->state = NET_DIM_START_MEASURE; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index ea5fff2c3143..cc8048f68f11 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -207,7 +207,7 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, return; mutex_lock(&priv->state_lock); - mlx5e_update_stats(priv, true); + mlx5e_update_stats(priv); mutex_unlock(&priv->state_lock); for (i = 0; i < mlx5e_num_stats_grps; i++) @@ -295,7 +295,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, struct mlx5e_channels new_channels = {}; u32 rx_pending_wqes; u32 min_rq_size; - u32 max_rq_size; u8 log_rq_size; u8 log_sq_size; u32 num_mtts; @@ -314,8 +313,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, 1 << mlx5_min_log_rq_size(rq_wq_type)); - max_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, - 1 << mlx5_max_log_rq_size(rq_wq_type)); rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type, param->rx_pending); @@ -325,12 +322,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, min_rq_size); return -EINVAL; } - if (param->rx_pending > max_rq_size) { - netdev_info(priv->netdev, "%s: rx_pending (%d) > max (%d)\n", - __func__, param->rx_pending, - max_rq_size); - return -EINVAL; - } num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes); if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ && @@ -346,12 +337,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, 1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE); return -EINVAL; } - if (param->tx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE)) { - netdev_info(priv->netdev, "%s: tx_pending (%d) > max (%d)\n", - __func__, param->tx_pending, - 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE); - return -EINVAL; - } log_rq_size = order_base_2(rx_pending_wqes); log_sq_size = order_base_2(param->tx_pending); @@ -479,7 +464,7 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv, coal->rx_max_coalesced_frames = priv->channels.params.rx_cq_moderation.pkts; coal->tx_coalesce_usecs = priv->channels.params.tx_cq_moderation.usec; coal->tx_max_coalesced_frames = priv->channels.params.tx_cq_moderation.pkts; - coal->use_adaptive_rx_coalesce = priv->channels.params.rx_am_enabled; + coal->use_adaptive_rx_coalesce = priv->channels.params.rx_dim_enabled; return 0; } @@ -533,7 +518,7 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, new_channels.params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames; new_channels.params.rx_cq_moderation.usec = coal->rx_coalesce_usecs; new_channels.params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames; - new_channels.params.rx_am_enabled = !!coal->use_adaptive_rx_coalesce; + new_channels.params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce; if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { priv->channels.params = new_channels.params; @@ -541,7 +526,7 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, } /* we are opened */ - reset = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_am_enabled; + reset = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled; if (!reset) { mlx5e_set_priv_channels_coalesce(priv, coal); priv->channels.params = new_channels.params; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index def513484845..f64dda2bed31 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -806,25 +806,25 @@ mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, return err ? ERR_PTR(err) : rule; } -static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv) +static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv, + struct ttc_params *params, + struct mlx5e_ttc_table *ttc) { struct mlx5_flow_destination dest = {}; - struct mlx5e_ttc_table *ttc; struct mlx5_flow_handle **rules; struct mlx5_flow_table *ft; int tt; int err; - ttc = &priv->fs.ttc; ft = ttc->ft.t; rules = ttc->rules; dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; for (tt = 0; tt < MLX5E_NUM_TT; tt++) { if (tt == MLX5E_TT_ANY) - dest.tir_num = priv->direct_tir[0].tirn; + dest.tir_num = params->any_tt_tirn; else - dest.tir_num = priv->indir_tir[tt].tirn; + dest.tir_num = params->indir_tirn[tt]; rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest, ttc_rules[tt].etype, ttc_rules[tt].proto); @@ -832,12 +832,12 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv) goto del_rules; } - if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) + if (!params->inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev)) return 0; rules = ttc->tunnel_rules; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest.ft = priv->fs.inner_ttc.ft.t; + dest.ft = params->inner_ttc->ft.t; for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) { rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest, ttc_tunnel_rules[tt].etype, @@ -977,25 +977,25 @@ mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv, return err ? ERR_PTR(err) : rule; } -static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv) +static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv, + struct ttc_params *params, + struct mlx5e_ttc_table *ttc) { struct mlx5_flow_destination dest = {}; struct mlx5_flow_handle **rules; - struct mlx5e_ttc_table *ttc; struct mlx5_flow_table *ft; int err; int tt; - ttc = &priv->fs.inner_ttc; ft = ttc->ft.t; rules = ttc->rules; dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; for (tt = 0; tt < MLX5E_NUM_TT; tt++) { if (tt == MLX5E_TT_ANY) - dest.tir_num = priv->direct_tir[0].tirn; + dest.tir_num = params->any_tt_tirn; else - dest.tir_num = priv->inner_indir_tir[tt].tirn; + dest.tir_num = params->indir_tirn[tt]; rules[tt] = mlx5e_generate_inner_ttc_rule(priv, ft, &dest, ttc_rules[tt].etype, @@ -1075,21 +1075,42 @@ err: return err; } -int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv) +void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, + struct ttc_params *ttc_params) +{ + ttc_params->any_tt_tirn = priv->direct_tir[0].tirn; + ttc_params->inner_ttc = &priv->fs.inner_ttc; +} + +void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params) +{ + struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr; + + ft_attr->max_fte = MLX5E_INNER_TTC_TABLE_SIZE; + ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL; + ft_attr->prio = MLX5E_NIC_PRIO; +} + +void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params) + +{ + struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr; + + ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE; + ft_attr->level = MLX5E_TTC_FT_LEVEL; + ft_attr->prio = MLX5E_NIC_PRIO; +} + +int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params, + struct mlx5e_ttc_table *ttc) { - struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc; - struct mlx5_flow_table_attr ft_attr = {}; struct mlx5e_flow_table *ft = &ttc->ft; int err; if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) return 0; - ft_attr.max_fte = MLX5E_INNER_TTC_TABLE_SIZE; - ft_attr.level = MLX5E_INNER_TTC_FT_LEVEL; - ft_attr.prio = MLX5E_NIC_PRIO; - - ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr); + ft->t = mlx5_create_flow_table(priv->fs.ns, ¶ms->ft_attr); if (IS_ERR(ft->t)) { err = PTR_ERR(ft->t); ft->t = NULL; @@ -1100,7 +1121,7 @@ int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv) if (err) goto err; - err = mlx5e_generate_inner_ttc_table_rules(priv); + err = mlx5e_generate_inner_ttc_table_rules(priv, params, ttc); if (err) goto err; @@ -1111,10 +1132,9 @@ err: return err; } -void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv) +void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv, + struct mlx5e_ttc_table *ttc) { - struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc; - if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) return; @@ -1122,27 +1142,21 @@ void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv) mlx5e_destroy_flow_table(&ttc->ft); } -void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv) +void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv, + struct mlx5e_ttc_table *ttc) { - struct mlx5e_ttc_table *ttc = &priv->fs.ttc; - mlx5e_cleanup_ttc_rules(ttc); mlx5e_destroy_flow_table(&ttc->ft); } -int mlx5e_create_ttc_table(struct mlx5e_priv *priv) +int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params, + struct mlx5e_ttc_table *ttc) { bool match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version); - struct mlx5e_ttc_table *ttc = &priv->fs.ttc; - struct mlx5_flow_table_attr ft_attr = {}; struct mlx5e_flow_table *ft = &ttc->ft; int err; - ft_attr.max_fte = MLX5E_TTC_TABLE_SIZE; - ft_attr.level = MLX5E_TTC_FT_LEVEL; - ft_attr.prio = MLX5E_NIC_PRIO; - - ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr); + ft->t = mlx5_create_flow_table(priv->fs.ns, ¶ms->ft_attr); if (IS_ERR(ft->t)) { err = PTR_ERR(ft->t); ft->t = NULL; @@ -1153,7 +1167,7 @@ int mlx5e_create_ttc_table(struct mlx5e_priv *priv) if (err) goto err; - err = mlx5e_generate_ttc_table_rules(priv); + err = mlx5e_generate_ttc_table_rules(priv, params, ttc); if (err) goto err; @@ -1474,7 +1488,8 @@ static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv) int mlx5e_create_flow_steering(struct mlx5e_priv *priv) { - int err; + struct ttc_params ttc_params = {}; + int tt, err; priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL); @@ -1489,14 +1504,23 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv) priv->netdev->hw_features &= ~NETIF_F_NTUPLE; } - err = mlx5e_create_inner_ttc_table(priv); + mlx5e_set_ttc_basic_params(priv, &ttc_params); + mlx5e_set_inner_ttc_ft_params(&ttc_params); + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) + ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn; + + err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc); if (err) { netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n", err); goto err_destroy_arfs_tables; } - err = mlx5e_create_ttc_table(priv); + mlx5e_set_ttc_ft_params(&ttc_params); + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) + ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn; + + err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc); if (err) { netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", err); @@ -1524,9 +1548,9 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv) err_destroy_l2_table: mlx5e_destroy_l2_table(priv); err_destroy_ttc_table: - mlx5e_destroy_ttc_table(priv); + mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); err_destroy_inner_ttc_table: - mlx5e_destroy_inner_ttc_table(priv); + mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc); err_destroy_arfs_tables: mlx5e_arfs_destroy_tables(priv); @@ -1537,8 +1561,8 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv) { mlx5e_destroy_vlan_table(priv); mlx5e_destroy_l2_table(priv); - mlx5e_destroy_ttc_table(priv); - mlx5e_destroy_inner_ttc_table(priv); + mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); + mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc); mlx5e_arfs_destroy_tables(priv); mlx5e_ethtool_cleanup_steering(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index d8aefeed124d..47bab842c5ee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -173,182 +173,23 @@ unlock: rtnl_unlock(); } -static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) +void mlx5e_update_stats(struct mlx5e_priv *priv) { - struct mlx5e_sw_stats temp, *s = &temp; - struct mlx5e_rq_stats *rq_stats; - struct mlx5e_sq_stats *sq_stats; - int i, j; - - memset(s, 0, sizeof(*s)); - for (i = 0; i < priv->channels.num; i++) { - struct mlx5e_channel *c = priv->channels.c[i]; - - rq_stats = &c->rq.stats; - - s->rx_packets += rq_stats->packets; - s->rx_bytes += rq_stats->bytes; - s->rx_lro_packets += rq_stats->lro_packets; - s->rx_lro_bytes += rq_stats->lro_bytes; - s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; - s->rx_csum_none += rq_stats->csum_none; - s->rx_csum_complete += rq_stats->csum_complete; - s->rx_csum_unnecessary += rq_stats->csum_unnecessary; - s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; - s->rx_xdp_drop += rq_stats->xdp_drop; - s->rx_xdp_tx += rq_stats->xdp_tx; - s->rx_xdp_tx_full += rq_stats->xdp_tx_full; - s->rx_wqe_err += rq_stats->wqe_err; - s->rx_mpwqe_filler += rq_stats->mpwqe_filler; - s->rx_buff_alloc_err += rq_stats->buff_alloc_err; - s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks; - s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts; - s->rx_page_reuse += rq_stats->page_reuse; - s->rx_cache_reuse += rq_stats->cache_reuse; - s->rx_cache_full += rq_stats->cache_full; - s->rx_cache_empty += rq_stats->cache_empty; - s->rx_cache_busy += rq_stats->cache_busy; - s->rx_cache_waive += rq_stats->cache_waive; - - for (j = 0; j < priv->channels.params.num_tc; j++) { - sq_stats = &c->sq[j].stats; - - s->tx_packets += sq_stats->packets; - s->tx_bytes += sq_stats->bytes; - s->tx_tso_packets += sq_stats->tso_packets; - s->tx_tso_bytes += sq_stats->tso_bytes; - s->tx_tso_inner_packets += sq_stats->tso_inner_packets; - s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes; - s->tx_added_vlan_packets += sq_stats->added_vlan_packets; - s->tx_queue_stopped += sq_stats->stopped; - s->tx_queue_wake += sq_stats->wake; - s->tx_queue_dropped += sq_stats->dropped; - s->tx_xmit_more += sq_stats->xmit_more; - s->tx_csum_partial_inner += sq_stats->csum_partial_inner; - s->tx_csum_none += sq_stats->csum_none; - s->tx_csum_partial += sq_stats->csum_partial; - } - } - - s->link_down_events_phy = MLX5_GET(ppcnt_reg, - priv->stats.pport.phy_counters, - counter_set.phys_layer_cntrs.link_down_events); - memcpy(&priv->stats.sw, s, sizeof(*s)); -} - -static void mlx5e_update_vport_counters(struct mlx5e_priv *priv) -{ - int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); - u32 *out = (u32 *)priv->stats.vport.query_vport_out; - u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0}; - struct mlx5_core_dev *mdev = priv->mdev; - - MLX5_SET(query_vport_counter_in, in, opcode, - MLX5_CMD_OP_QUERY_VPORT_COUNTER); - MLX5_SET(query_vport_counter_in, in, op_mod, 0); - MLX5_SET(query_vport_counter_in, in, other_vport, 0); - - mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen); -} - -static void mlx5e_update_pport_counters(struct mlx5e_priv *priv, bool full) -{ - struct mlx5e_pport_stats *pstats = &priv->stats.pport; - struct mlx5_core_dev *mdev = priv->mdev; - u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; - int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); - int prio; - void *out; - - MLX5_SET(ppcnt_reg, in, local_port, 1); - - out = pstats->IEEE_802_3_counters; - MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); - mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); - - if (!full) - return; - - out = pstats->RFC_2863_counters; - MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP); - mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); - - out = pstats->RFC_2819_counters; - MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP); - mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); - - out = pstats->phy_counters; - MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP); - mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); - - if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) { - out = pstats->phy_statistical_counters; - MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP); - mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); - } - - if (MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters)) { - out = pstats->eth_ext_counters; - MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP); - mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); - } - - MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP); - for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { - out = pstats->per_prio_counters[prio]; - MLX5_SET(ppcnt_reg, in, prio_tc, prio); - mlx5_core_access_reg(mdev, in, sz, out, sz, - MLX5_REG_PPCNT, 0, 0); - } -} - -static void mlx5e_update_q_counter(struct mlx5e_priv *priv) -{ - struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt; - u32 out[MLX5_ST_SZ_DW(query_q_counter_out)]; - int err; - - if (!priv->q_counter) - return; - - err = mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, sizeof(out)); - if (err) - return; - - qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer); -} - -static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv) -{ - struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie; - struct mlx5_core_dev *mdev = priv->mdev; - u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0}; - int sz = MLX5_ST_SZ_BYTES(mpcnt_reg); - void *out; - - if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group)) - return; - - out = pcie_stats->pcie_perf_counters; - MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP); - mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); -} + int i; -void mlx5e_update_stats(struct mlx5e_priv *priv, bool full) -{ - if (full) { - mlx5e_update_pcie_counters(priv); - mlx5e_ipsec_update_stats(priv); - } - mlx5e_update_pport_counters(priv, full); - mlx5e_update_vport_counters(priv); - mlx5e_update_q_counter(priv); - mlx5e_update_sw_counters(priv); + for (i = mlx5e_num_stats_grps - 1; i >= 0; i--) + if (mlx5e_stats_grps[i].update_stats) + mlx5e_stats_grps[i].update_stats(priv); } static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv) { - mlx5e_update_stats(priv, false); + int i; + + for (i = mlx5e_num_stats_grps - 1; i >= 0; i--) + if (mlx5e_stats_grps[i].update_stats_mask & + MLX5E_NDO_UPDATE_STATS) + mlx5e_stats_grps[i].update_stats(priv); } void mlx5e_update_stats_work(struct work_struct *work) @@ -582,6 +423,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, goto err_rq_wq_destroy; } + err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix); + if (err < 0) + goto err_rq_wq_destroy; + rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; rq->buff.headroom = params->rq_headroom; @@ -674,8 +519,17 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, wqe->data.lkey = rq->mkey_be; } - INIT_WORK(&rq->am.work, mlx5e_rx_am_work); - rq->am.mode = params->rx_cq_moderation.cq_period_mode; + INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work); + + switch (params->rx_cq_moderation.cq_period_mode) { + case MLX5_CQ_PERIOD_MODE_START_FROM_CQE: + rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE; + break; + case MLX5_CQ_PERIOD_MODE_START_FROM_EQE: + default: + rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; + } + rq->page_cache.head = 0; rq->page_cache.tail = 0; @@ -687,6 +541,7 @@ err_destroy_umr_mkey: err_rq_wq_destroy: if (rq->xdp_prog) bpf_prog_put(rq->xdp_prog); + xdp_rxq_info_unreg(&rq->xdp_rxq); mlx5_wq_destroy(&rq->wq_ctrl); return err; @@ -699,6 +554,8 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) if (rq->xdp_prog) bpf_prog_put(rq->xdp_prog); + xdp_rxq_info_unreg(&rq->xdp_rxq); + switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: mlx5e_rq_free_mpwqe_info(rq); @@ -919,7 +776,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, if (err) goto err_destroy_rq; - if (params->rx_am_enabled) + if (params->rx_dim_enabled) c->rq.state |= BIT(MLX5E_RQ_STATE_AM); return 0; @@ -952,7 +809,7 @@ static void mlx5e_deactivate_rq(struct mlx5e_rq *rq) static void mlx5e_close_rq(struct mlx5e_rq *rq) { - cancel_work_sync(&rq->am.work); + cancel_work_sync(&rq->dim.work); mlx5e_destroy_rq(rq); mlx5e_free_rx_descs(rq); mlx5e_free_rq(rq); @@ -1565,7 +1422,7 @@ static void mlx5e_destroy_cq(struct mlx5e_cq *cq) } static int mlx5e_open_cq(struct mlx5e_channel *c, - struct mlx5e_cq_moder moder, + struct net_dim_cq_moder moder, struct mlx5e_cq_param *param, struct mlx5e_cq *cq) { @@ -1747,7 +1604,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, struct mlx5e_channel_param *cparam, struct mlx5e_channel **cp) { - struct mlx5e_cq_moder icocq_moder = {0, 0}; + struct net_dim_cq_moder icocq_moder = {0, 0}; struct net_device *netdev = priv->netdev; int cpu = mlx5e_get_cpu(priv, ix); struct mlx5e_channel *c; @@ -1999,7 +1856,7 @@ static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv, mlx5e_build_common_cq_param(priv, param); - param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; + param->cq_period_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; } static void mlx5e_build_icosq_param(struct mlx5e_priv *priv, @@ -2203,7 +2060,7 @@ static int mlx5e_rx_hash_fn(int hfunc) MLX5_RX_HASH_FN_INVERTED_XOR8; } -static int mlx5e_bits_invert(unsigned long a, int size) +int mlx5e_bits_invert(unsigned long a, int size) { int inv = 0; int i; @@ -2765,6 +2622,9 @@ static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev, if (err) return err; + /* Mark as unused given "Drop-RQ" packets never reach XDP */ + xdp_rxq_info_unused(&rq->xdp_rxq); + rq->mdev = mdev; return 0; @@ -3084,9 +2944,6 @@ out: static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv, struct tc_cls_flower_offload *cls_flower) { - if (cls_flower->common.chain_index) - return -EOPNOTSUPP; - switch (cls_flower->command) { case TC_CLSFLOWER_REPLACE: return mlx5e_configure_flower(priv, cls_flower); @@ -3104,7 +2961,7 @@ int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, { struct mlx5e_priv *priv = cb_priv; - if (!tc_can_offload(priv->netdev)) + if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data)) return -EOPNOTSUPP; switch (type) { @@ -3738,26 +3595,62 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb, return features; } +static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev, + struct mlx5e_txqsq *sq) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + int irqn_not_used, eqn; + struct mlx5_eq *eq; + u32 eqe_count; + + if (mlx5_vector2eqn(mdev, sq->cq.mcq.vector, &eqn, &irqn_not_used)) + return false; + + eq = mlx5_eqn2eq(mdev, eqn); + if (IS_ERR(eq)) + return false; + + netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n", + eqn, eq->cons_index, eq->irqn); + + eqe_count = mlx5_eq_poll_irq_disabled(eq); + if (!eqe_count) + return false; + + netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->eqn); + sq->channel->stats.eq_rearm++; + return true; +} + static void mlx5e_tx_timeout(struct net_device *dev) { struct mlx5e_priv *priv = netdev_priv(dev); - bool sched_work = false; + bool reopen_channels = false; int i; netdev_err(dev, "TX timeout detected\n"); for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) { + struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, i); struct mlx5e_txqsq *sq = priv->txq2sq[i]; - if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i))) + if (!netif_xmit_stopped(dev_queue)) continue; - sched_work = true; - clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); - netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n", - i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc); + netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n", + i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc, + jiffies_to_usecs(jiffies - dev_queue->trans_start)); + + /* If we recover a lost interrupt, most likely TX timeout will + * be resolved, skip reopening channels + */ + if (!mlx5e_tx_timeout_eq_recover(dev, sq)) { + clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); + reopen_channels = true; + } } - if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state)) + if (reopen_channels && test_bit(MLX5E_STATE_OPENED, &priv->state)) schedule_work(&priv->tx_timeout_work); } @@ -4044,9 +3937,18 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) params->rx_cq_moderation.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; - if (params->rx_am_enabled) - params->rx_cq_moderation = - mlx5e_am_get_def_profile(cq_period_mode); + if (params->rx_dim_enabled) { + switch (cq_period_mode) { + case MLX5_CQ_PERIOD_MODE_START_FROM_CQE: + params->rx_cq_moderation = + net_dim_get_def_profile(NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE); + break; + case MLX5_CQ_PERIOD_MODE_START_FROM_EQE: + default: + params->rx_cq_moderation = + net_dim_get_def_profile(NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE); + } + } MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER, params->rx_cq_moderation.cq_period_mode == @@ -4108,7 +4010,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE; - params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation); + params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); mlx5e_set_rx_cq_mode_params(params, cq_period_mode); mlx5e_set_tx_cq_mode_params(params, cq_period_mode); @@ -4315,9 +4217,6 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) { mlx5e_ipsec_cleanup(priv); mlx5e_vxlan_cleanup(priv); - - if (priv->channels.params.xdp_prog) - bpf_prog_put(priv->channels.params.xdp_prog); } static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 3409d86eb06b..363d8dcb7f17 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -190,6 +190,63 @@ int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr) return 0; } +static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep) +{ + struct mlx5e_rep_sq *rep_sq, *tmp; + struct mlx5e_rep_priv *rpriv; + + if (esw->mode != SRIOV_OFFLOADS) + return; + + rpriv = mlx5e_rep_to_rep_priv(rep); + list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) { + mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule); + list_del(&rep_sq->list); + kfree(rep_sq); + } +} + +static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep, + u16 *sqns_array, int sqns_num) +{ + struct mlx5_flow_handle *flow_rule; + struct mlx5e_rep_priv *rpriv; + struct mlx5e_rep_sq *rep_sq; + int err; + int i; + + if (esw->mode != SRIOV_OFFLOADS) + return 0; + + rpriv = mlx5e_rep_to_rep_priv(rep); + for (i = 0; i < sqns_num; i++) { + rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL); + if (!rep_sq) { + err = -ENOMEM; + goto out_err; + } + + /* Add re-inject rule to the PF/representor sqs */ + flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, + rep->vport, + sqns_array[i]); + if (IS_ERR(flow_rule)) { + err = PTR_ERR(flow_rule); + kfree(rep_sq); + goto out_err; + } + rep_sq->send_to_vport_rule = flow_rule; + list_add(&rep_sq->list, &rpriv->vport_sqs_list); + } + return 0; + +out_err: + mlx5e_sqs2vport_stop(esw, rep); + return err; +} + int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; @@ -210,7 +267,7 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) sqs[num_sqs++] = c->sq[tc].sqn; } - err = mlx5_eswitch_sqs2vport_start(esw, rep, sqs, num_sqs); + err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs); kfree(sqs); out: @@ -225,7 +282,7 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep = rpriv->rep; - mlx5_eswitch_sqs2vport_stop(esw, rep); + mlx5e_sqs2vport_stop(esw, rep); } static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv) @@ -238,7 +295,7 @@ static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv) #endif unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME); - struct net_device *netdev = rpriv->rep->netdev; + struct net_device *netdev = rpriv->netdev; struct mlx5e_priv *priv = netdev_priv(netdev); rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval); @@ -259,7 +316,7 @@ static void mlx5e_rep_neigh_stats_work(struct work_struct *work) { struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv, neigh_update.neigh_stats_work.work); - struct net_device *netdev = rpriv->rep->netdev; + struct net_device *netdev = rpriv->netdev; struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_neigh_hash_entry *nhe; @@ -355,7 +412,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb, struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv, neigh_update.netevent_nb); struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; - struct net_device *netdev = rpriv->rep->netdev; + struct net_device *netdev = rpriv->netdev; struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_neigh_hash_entry *nhe = NULL; struct mlx5e_neigh m_neigh = {}; @@ -483,7 +540,7 @@ out_err: static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv) { struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; - struct mlx5e_priv *priv = netdev_priv(rpriv->rep->netdev); + struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); unregister_netevent_notifier(&neigh_update->netevent_nb); @@ -662,9 +719,6 @@ static int mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv, struct tc_cls_flower_offload *cls_flower) { - if (cls_flower->common.chain_index) - return -EOPNOTSUPP; - switch (cls_flower->command) { case TC_CLSFLOWER_REPLACE: return mlx5e_configure_flower(priv, cls_flower); @@ -682,7 +736,7 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data, { struct mlx5e_priv *priv = cb_priv; - if (!tc_can_offload(priv->netdev)) + if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data)) return -EOPNOTSUPP; switch (type) { @@ -827,7 +881,7 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST; params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; - params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation); + params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); mlx5e_set_rx_cq_mode_params(params, cq_period_mode); params->tx_max_inline = mlx5e_get_max_inline_cap(mdev); @@ -906,7 +960,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) err = PTR_ERR(flow_rule); goto err_destroy_direct_tirs; } - rep->vport_rx_rule = flow_rule; + rpriv->vport_rx_rule = flow_rule; err = mlx5e_tc_init(priv); if (err) @@ -915,7 +969,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) return 0; err_del_flow_rule: - mlx5_del_flow_rules(rep->vport_rx_rule); + mlx5_del_flow_rules(rpriv->vport_rx_rule); err_destroy_direct_tirs: mlx5e_destroy_direct_tirs(priv); err_destroy_direct_rqts: @@ -926,10 +980,9 @@ err_destroy_direct_rqts: static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) { struct mlx5e_rep_priv *rpriv = priv->ppriv; - struct mlx5_eswitch_rep *rep = rpriv->rep; mlx5e_tc_cleanup(priv); - mlx5_del_flow_rules(rep->vport_rx_rule); + mlx5_del_flow_rules(rpriv->vport_rx_rule); mlx5e_destroy_direct_tirs(priv); mlx5e_destroy_direct_rqts(priv); } @@ -969,10 +1022,10 @@ static const struct mlx5e_profile mlx5e_rep_profile = { /* e-Switch vport representors */ static int -mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) +mlx5e_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) { - struct mlx5e_priv *priv = netdev_priv(rep->netdev); - struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep); + struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); int err; @@ -994,10 +1047,10 @@ err_remove_sqs: } static void -mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) +mlx5e_nic_rep_unload(struct mlx5_eswitch_rep *rep) { - struct mlx5e_priv *priv = netdev_priv(rep->netdev); - struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep); + struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); if (test_bit(MLX5E_STATE_OPENED, &priv->state)) mlx5e_remove_sqs_fwd_rules(priv); @@ -1010,8 +1063,9 @@ mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) } static int -mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) +mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) { + struct mlx5e_rep_priv *uplink_rpriv; struct mlx5e_rep_priv *rpriv; struct net_device *netdev; struct mlx5e_priv *upriv; @@ -1021,7 +1075,7 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) if (!rpriv) return -ENOMEM; - netdev = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rpriv); + netdev = mlx5e_create_netdev(dev, &mlx5e_rep_profile, rpriv); if (!netdev) { pr_warn("Failed to create representor netdev for vport %d\n", rep->vport); @@ -1029,8 +1083,10 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) return -EINVAL; } - rep->netdev = netdev; + rpriv->netdev = netdev; rpriv->rep = rep; + rep->rep_if[REP_ETH].priv = rpriv; + INIT_LIST_HEAD(&rpriv->vport_sqs_list); err = mlx5e_attach_netdev(netdev_priv(netdev)); if (err) { @@ -1046,7 +1102,8 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) goto err_detach_netdev; } - upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw)); + uplink_rpriv = mlx5_eswitch_get_uplink_priv(dev->priv.eswitch, REP_ETH); + upriv = netdev_priv(uplink_rpriv->netdev); err = tc_setup_cb_egdev_register(netdev, mlx5e_setup_tc_block_cb, upriv); if (err) @@ -1078,16 +1135,19 @@ err_destroy_netdev: } static void -mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) +mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep) { - struct net_device *netdev = rep->netdev; + struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep); + struct net_device *netdev = rpriv->netdev; struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5e_rep_priv *uplink_rpriv; void *ppriv = priv->ppriv; struct mlx5e_priv *upriv; - unregister_netdev(rep->netdev); - upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw)); + unregister_netdev(netdev); + uplink_rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch, + REP_ETH); + upriv = netdev_priv(uplink_rpriv->netdev); tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb, upriv); mlx5e_rep_neigh_cleanup(rpriv); @@ -1102,18 +1162,13 @@ static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv) struct mlx5_eswitch *esw = mdev->priv.eswitch; int total_vfs = MLX5_TOTAL_VPORTS(mdev); int vport; - u8 mac[ETH_ALEN]; - - mlx5_query_nic_vport_mac_address(mdev, 0, mac); for (vport = 1; vport < total_vfs; vport++) { - struct mlx5_eswitch_rep rep; + struct mlx5_eswitch_rep_if rep_if = {}; - rep.load = mlx5e_vport_rep_load; - rep.unload = mlx5e_vport_rep_unload; - rep.vport = vport; - ether_addr_copy(rep.hw_id, mac); - mlx5_eswitch_register_vport_rep(esw, vport, &rep); + rep_if.load = mlx5e_vport_rep_load; + rep_if.unload = mlx5e_vport_rep_unload; + mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_ETH); } } @@ -1125,21 +1180,24 @@ static void mlx5e_rep_unregister_vf_vports(struct mlx5e_priv *priv) int vport; for (vport = 1; vport < total_vfs; vport++) - mlx5_eswitch_unregister_vport_rep(esw, vport); + mlx5_eswitch_unregister_vport_rep(esw, vport, REP_ETH); } void mlx5e_register_vport_reps(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_eswitch *esw = mdev->priv.eswitch; - struct mlx5_eswitch_rep rep; + struct mlx5_eswitch_rep_if rep_if; + struct mlx5e_rep_priv *rpriv; + + rpriv = priv->ppriv; + rpriv->netdev = priv->netdev; - mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id); - rep.load = mlx5e_nic_rep_load; - rep.unload = mlx5e_nic_rep_unload; - rep.vport = FDB_UPLINK_VPORT; - rep.netdev = priv->netdev; - mlx5_eswitch_register_vport_rep(esw, 0, &rep); /* UPLINK PF vport*/ + rep_if.load = mlx5e_nic_rep_load; + rep_if.unload = mlx5e_nic_rep_unload; + rep_if.priv = rpriv; + INIT_LIST_HEAD(&rpriv->vport_sqs_list); + mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_ETH); /* UPLINK PF vport*/ mlx5e_rep_register_vf_vports(priv); /* VFs vports */ } @@ -1150,7 +1208,7 @@ void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv) struct mlx5_eswitch *esw = mdev->priv.eswitch; mlx5e_rep_unregister_vf_vports(priv); /* VFs vports */ - mlx5_eswitch_unregister_vport_rep(esw, 0); /* UPLINK PF*/ + mlx5_eswitch_unregister_vport_rep(esw, 0, REP_ETH); /* UPLINK PF*/ } void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index 5659ed9f51e6..b9b481f2833a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h @@ -56,8 +56,17 @@ struct mlx5e_neigh_update_table { struct mlx5e_rep_priv { struct mlx5_eswitch_rep *rep; struct mlx5e_neigh_update_table neigh_update; + struct net_device *netdev; + struct mlx5_flow_handle *vport_rx_rule; + struct list_head vport_sqs_list; }; +static inline +struct mlx5e_rep_priv *mlx5e_rep_to_rep_priv(struct mlx5_eswitch_rep *rep) +{ + return (struct mlx5e_rep_priv *)rep->rep_if[REP_ETH].priv; +} + struct mlx5e_neigh { struct net_device *dev; union { @@ -124,6 +133,11 @@ struct mlx5e_encap_entry { int encap_size; }; +struct mlx5e_rep_sq { + struct mlx5_flow_handle *send_to_vport_rule; + struct list_head list; +}; + void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev); void mlx5e_register_vport_reps(struct mlx5e_priv *priv); void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 5b499c7a698f..0d4bb0688faa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -495,8 +495,8 @@ static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq, mlx5_cqwq_pop(&cq->wq); if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) { - WARN_ONCE(true, "mlx5e: Bad OP in ICOSQ CQE: 0x%x\n", - cqe->op_own); + netdev_WARN_ONCE(cq->channel->netdev, + "Bad OP in ICOSQ CQE: 0x%x\n", cqe->op_own); return; } @@ -506,9 +506,8 @@ static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq, } if (unlikely(icowi->opcode != MLX5_OPCODE_NOP)) - WARN_ONCE(true, - "mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n", - icowi->opcode); + netdev_WARN_ONCE(cq->channel->netdev, + "Bad OPCODE in ICOSQ WQE info: 0x%x\n", icowi->opcode); } static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq) @@ -632,7 +631,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, return; } - if (is_last_ethertype_ip(skb, &network_depth)) { + if (likely(is_last_ethertype_ip(skb, &network_depth))) { skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = csum_unfold((__force __sum16)cqe->check_sum); if (network_depth > ETH_HLEN) @@ -812,6 +811,7 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq, xdp_set_data_meta_invalid(&xdp); xdp.data_end = xdp.data + *len; xdp.data_hard_start = va; + xdp.rxq = &rq->xdp_rxq; act = bpf_prog_run_xdp(prog, &xdp); switch (act) { @@ -1175,7 +1175,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, u32 cqe_bcnt, struct sk_buff *skb) { + struct hwtstamp_config *tstamp; struct net_device *netdev; + struct mlx5e_priv *priv; char *pseudo_header; u32 qpn; u8 *dgid; @@ -1194,6 +1196,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, return; } + priv = mlx5i_epriv(netdev); + tstamp = &priv->tstamp; + g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET; if ((!g) || dgid[0] != 0xff) @@ -1214,7 +1219,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = csum_unfold((__force __sum16)cqe->check_sum); - if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp))) + if (unlikely(mlx5e_rx_hw_stamp(tstamp))) skb_hwtstamps(skb)->hwtstamp = mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c deleted file mode 100644 index b69a705fd787..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c +++ /dev/null @@ -1,347 +0,0 @@ -/* - * Copyright (c) 2016, Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "en.h" - -/* Adaptive moderation profiles */ -#define MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256 -#define MLX5E_RX_AM_DEF_PROFILE_CQE 1 -#define MLX5E_RX_AM_DEF_PROFILE_EQE 1 -#define MLX5E_PARAMS_AM_NUM_PROFILES 5 - -/* All profiles sizes must be MLX5E_PARAMS_AM_NUM_PROFILES */ -#define MLX5_AM_EQE_PROFILES { \ - {1, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {8, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {64, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {128, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {256, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ -} - -#define MLX5_AM_CQE_PROFILES { \ - {2, 256}, \ - {8, 128}, \ - {16, 64}, \ - {32, 64}, \ - {64, 64} \ -} - -static const struct mlx5e_cq_moder -profile[MLX5_CQ_PERIOD_NUM_MODES][MLX5E_PARAMS_AM_NUM_PROFILES] = { - MLX5_AM_EQE_PROFILES, - MLX5_AM_CQE_PROFILES, -}; - -static inline struct mlx5e_cq_moder mlx5e_am_get_profile(u8 cq_period_mode, int ix) -{ - struct mlx5e_cq_moder cq_moder; - - cq_moder = profile[cq_period_mode][ix]; - cq_moder.cq_period_mode = cq_period_mode; - return cq_moder; -} - -struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode) -{ - int default_profile_ix; - - if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) - default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_CQE; - else /* MLX5_CQ_PERIOD_MODE_START_FROM_EQE */ - default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_EQE; - - return mlx5e_am_get_profile(rx_cq_period_mode, default_profile_ix); -} - -/* Adaptive moderation logic */ -enum { - MLX5E_AM_START_MEASURE, - MLX5E_AM_MEASURE_IN_PROGRESS, - MLX5E_AM_APPLY_NEW_PROFILE, -}; - -enum { - MLX5E_AM_PARKING_ON_TOP, - MLX5E_AM_PARKING_TIRED, - MLX5E_AM_GOING_RIGHT, - MLX5E_AM_GOING_LEFT, -}; - -enum { - MLX5E_AM_STATS_WORSE, - MLX5E_AM_STATS_SAME, - MLX5E_AM_STATS_BETTER, -}; - -enum { - MLX5E_AM_STEPPED, - MLX5E_AM_TOO_TIRED, - MLX5E_AM_ON_EDGE, -}; - -static bool mlx5e_am_on_top(struct mlx5e_rx_am *am) -{ - switch (am->tune_state) { - case MLX5E_AM_PARKING_ON_TOP: - case MLX5E_AM_PARKING_TIRED: - return true; - case MLX5E_AM_GOING_RIGHT: - return (am->steps_left > 1) && (am->steps_right == 1); - default: /* MLX5E_AM_GOING_LEFT */ - return (am->steps_right > 1) && (am->steps_left == 1); - } -} - -static void mlx5e_am_turn(struct mlx5e_rx_am *am) -{ - switch (am->tune_state) { - case MLX5E_AM_PARKING_ON_TOP: - case MLX5E_AM_PARKING_TIRED: - break; - case MLX5E_AM_GOING_RIGHT: - am->tune_state = MLX5E_AM_GOING_LEFT; - am->steps_left = 0; - break; - case MLX5E_AM_GOING_LEFT: - am->tune_state = MLX5E_AM_GOING_RIGHT; - am->steps_right = 0; - break; - } -} - -static int mlx5e_am_step(struct mlx5e_rx_am *am) -{ - if (am->tired == (MLX5E_PARAMS_AM_NUM_PROFILES * 2)) - return MLX5E_AM_TOO_TIRED; - - switch (am->tune_state) { - case MLX5E_AM_PARKING_ON_TOP: - case MLX5E_AM_PARKING_TIRED: - break; - case MLX5E_AM_GOING_RIGHT: - if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1)) - return MLX5E_AM_ON_EDGE; - am->profile_ix++; - am->steps_right++; - break; - case MLX5E_AM_GOING_LEFT: - if (am->profile_ix == 0) - return MLX5E_AM_ON_EDGE; - am->profile_ix--; - am->steps_left++; - break; - } - - am->tired++; - return MLX5E_AM_STEPPED; -} - -static void mlx5e_am_park_on_top(struct mlx5e_rx_am *am) -{ - am->steps_right = 0; - am->steps_left = 0; - am->tired = 0; - am->tune_state = MLX5E_AM_PARKING_ON_TOP; -} - -static void mlx5e_am_park_tired(struct mlx5e_rx_am *am) -{ - am->steps_right = 0; - am->steps_left = 0; - am->tune_state = MLX5E_AM_PARKING_TIRED; -} - -static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am) -{ - am->tune_state = am->profile_ix ? MLX5E_AM_GOING_LEFT : - MLX5E_AM_GOING_RIGHT; - mlx5e_am_step(am); -} - -#define IS_SIGNIFICANT_DIFF(val, ref) \ - (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */ - -static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr, - struct mlx5e_rx_am_stats *prev) -{ - if (!prev->bpms) - return curr->bpms ? MLX5E_AM_STATS_BETTER : - MLX5E_AM_STATS_SAME; - - if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms)) - return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER : - MLX5E_AM_STATS_WORSE; - - if (!prev->ppms) - return curr->ppms ? MLX5E_AM_STATS_BETTER : - MLX5E_AM_STATS_SAME; - - if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms)) - return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER : - MLX5E_AM_STATS_WORSE; - if (!prev->epms) - return MLX5E_AM_STATS_SAME; - - if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms)) - return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER : - MLX5E_AM_STATS_WORSE; - - return MLX5E_AM_STATS_SAME; -} - -static bool mlx5e_am_decision(struct mlx5e_rx_am_stats *curr_stats, - struct mlx5e_rx_am *am) -{ - int prev_state = am->tune_state; - int prev_ix = am->profile_ix; - int stats_res; - int step_res; - - switch (am->tune_state) { - case MLX5E_AM_PARKING_ON_TOP: - stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats); - if (stats_res != MLX5E_AM_STATS_SAME) - mlx5e_am_exit_parking(am); - break; - - case MLX5E_AM_PARKING_TIRED: - am->tired--; - if (!am->tired) - mlx5e_am_exit_parking(am); - break; - - case MLX5E_AM_GOING_RIGHT: - case MLX5E_AM_GOING_LEFT: - stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats); - if (stats_res != MLX5E_AM_STATS_BETTER) - mlx5e_am_turn(am); - - if (mlx5e_am_on_top(am)) { - mlx5e_am_park_on_top(am); - break; - } - - step_res = mlx5e_am_step(am); - switch (step_res) { - case MLX5E_AM_ON_EDGE: - mlx5e_am_park_on_top(am); - break; - case MLX5E_AM_TOO_TIRED: - mlx5e_am_park_tired(am); - break; - } - - break; - } - - if ((prev_state != MLX5E_AM_PARKING_ON_TOP) || - (am->tune_state != MLX5E_AM_PARKING_ON_TOP)) - am->prev_stats = *curr_stats; - - return am->profile_ix != prev_ix; -} - -static void mlx5e_am_sample(struct mlx5e_rq *rq, - struct mlx5e_rx_am_sample *s) -{ - s->time = ktime_get(); - s->pkt_ctr = rq->stats.packets; - s->byte_ctr = rq->stats.bytes; - s->event_ctr = rq->cq.event_ctr; -} - -#define MLX5E_AM_NEVENTS 64 -#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) -#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1)) - -static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start, - struct mlx5e_rx_am_sample *end, - struct mlx5e_rx_am_stats *curr_stats) -{ - /* u32 holds up to 71 minutes, should be enough */ - u32 delta_us = ktime_us_delta(end->time, start->time); - u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr); - u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr, - start->byte_ctr); - - if (!delta_us) - return; - - curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us); - curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us); - curr_stats->epms = DIV_ROUND_UP(MLX5E_AM_NEVENTS * USEC_PER_MSEC, - delta_us); -} - -void mlx5e_rx_am_work(struct work_struct *work) -{ - struct mlx5e_rx_am *am = container_of(work, struct mlx5e_rx_am, - work); - struct mlx5e_rq *rq = container_of(am, struct mlx5e_rq, am); - struct mlx5e_cq_moder cur_profile = profile[am->mode][am->profile_ix]; - - mlx5_core_modify_cq_moderation(rq->mdev, &rq->cq.mcq, - cur_profile.usec, cur_profile.pkts); - - am->state = MLX5E_AM_START_MEASURE; -} - -void mlx5e_rx_am(struct mlx5e_rq *rq) -{ - struct mlx5e_rx_am *am = &rq->am; - struct mlx5e_rx_am_sample end_sample; - struct mlx5e_rx_am_stats curr_stats; - u16 nevents; - - switch (am->state) { - case MLX5E_AM_MEASURE_IN_PROGRESS: - nevents = BIT_GAP(BITS_PER_TYPE(u16), rq->cq.event_ctr, - am->start_sample.event_ctr); - if (nevents < MLX5E_AM_NEVENTS) - break; - mlx5e_am_sample(rq, &end_sample); - mlx5e_am_calc_stats(&am->start_sample, &end_sample, - &curr_stats); - if (mlx5e_am_decision(&curr_stats, am)) { - am->state = MLX5E_AM_APPLY_NEW_PROFILE; - schedule_work(&am->work); - break; - } - /* fall through */ - case MLX5E_AM_START_MEASURE: - mlx5e_am_sample(rq, &am->start_sample); - am->state = MLX5E_AM_MEASURE_IN_PROGRESS; - break; - case MLX5E_AM_APPLY_NEW_PROFILE: - break; - } -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index b74ddc7984bc..5f0f3493d747 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -71,6 +71,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) }, }; @@ -99,6 +100,72 @@ static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) return idx; } +static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) +{ + struct mlx5e_sw_stats temp, *s = &temp; + struct mlx5e_rq_stats *rq_stats; + struct mlx5e_sq_stats *sq_stats; + struct mlx5e_ch_stats *ch_stats; + int i, j; + + memset(s, 0, sizeof(*s)); + for (i = 0; i < priv->channels.num; i++) { + struct mlx5e_channel *c = priv->channels.c[i]; + + rq_stats = &c->rq.stats; + ch_stats = &c->stats; + + s->rx_packets += rq_stats->packets; + s->rx_bytes += rq_stats->bytes; + s->rx_lro_packets += rq_stats->lro_packets; + s->rx_lro_bytes += rq_stats->lro_bytes; + s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; + s->rx_csum_none += rq_stats->csum_none; + s->rx_csum_complete += rq_stats->csum_complete; + s->rx_csum_unnecessary += rq_stats->csum_unnecessary; + s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; + s->rx_xdp_drop += rq_stats->xdp_drop; + s->rx_xdp_tx += rq_stats->xdp_tx; + s->rx_xdp_tx_full += rq_stats->xdp_tx_full; + s->rx_wqe_err += rq_stats->wqe_err; + s->rx_mpwqe_filler += rq_stats->mpwqe_filler; + s->rx_buff_alloc_err += rq_stats->buff_alloc_err; + s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks; + s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts; + s->rx_page_reuse += rq_stats->page_reuse; + s->rx_cache_reuse += rq_stats->cache_reuse; + s->rx_cache_full += rq_stats->cache_full; + s->rx_cache_empty += rq_stats->cache_empty; + s->rx_cache_busy += rq_stats->cache_busy; + s->rx_cache_waive += rq_stats->cache_waive; + s->ch_eq_rearm += ch_stats->eq_rearm; + + for (j = 0; j < priv->channels.params.num_tc; j++) { + sq_stats = &c->sq[j].stats; + + s->tx_packets += sq_stats->packets; + s->tx_bytes += sq_stats->bytes; + s->tx_tso_packets += sq_stats->tso_packets; + s->tx_tso_bytes += sq_stats->tso_bytes; + s->tx_tso_inner_packets += sq_stats->tso_inner_packets; + s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes; + s->tx_added_vlan_packets += sq_stats->added_vlan_packets; + s->tx_queue_stopped += sq_stats->stopped; + s->tx_queue_wake += sq_stats->wake; + s->tx_queue_dropped += sq_stats->dropped; + s->tx_xmit_more += sq_stats->xmit_more; + s->tx_csum_partial_inner += sq_stats->csum_partial_inner; + s->tx_csum_none += sq_stats->csum_none; + s->tx_csum_partial += sq_stats->csum_partial; + } + } + + s->link_down_events_phy = MLX5_GET(ppcnt_reg, + priv->stats.pport.phy_counters, + counter_set.phys_layer_cntrs.link_down_events); + memcpy(&priv->stats.sw, s, sizeof(*s)); +} + static const struct counter_desc q_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) }, }; @@ -128,6 +195,22 @@ static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) return idx; } +static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv) +{ + struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt; + u32 out[MLX5_ST_SZ_DW(query_q_counter_out)]; + int err; + + if (!priv->q_counter) + return; + + err = mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, sizeof(out)); + if (err) + return; + + qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer); +} + #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c) static const struct counter_desc vport_stats_desc[] = { { "rx_vport_unicast_packets", @@ -200,6 +283,19 @@ static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data, return idx; } +static void mlx5e_grp_vport_update_stats(struct mlx5e_priv *priv) +{ + int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); + u32 *out = (u32 *)priv->stats.vport.query_vport_out; + u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0}; + struct mlx5_core_dev *mdev = priv->mdev; + + MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER); + MLX5_SET(query_vport_counter_in, in, op_mod, 0); + MLX5_SET(query_vport_counter_in, in, other_vport, 0); + mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen); +} + #define PPORT_802_3_OFF(c) \ MLX5_BYTE_OFF(ppcnt_reg, \ counter_set.eth_802_3_cntrs_grp_data_layout.c##_high) @@ -252,6 +348,20 @@ static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data, return idx; } +static void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv) +{ + struct mlx5e_pport_stats *pstats = &priv->stats.pport; + struct mlx5_core_dev *mdev = priv->mdev; + u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; + int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); + void *out; + + MLX5_SET(ppcnt_reg, in, local_port, 1); + out = pstats->IEEE_802_3_counters; + MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); +} + #define PPORT_2863_OFF(c) \ MLX5_BYTE_OFF(ppcnt_reg, \ counter_set.eth_2863_cntrs_grp_data_layout.c##_high) @@ -289,6 +399,20 @@ static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data, return idx; } +static void mlx5e_grp_2863_update_stats(struct mlx5e_priv *priv) +{ + struct mlx5e_pport_stats *pstats = &priv->stats.pport; + struct mlx5_core_dev *mdev = priv->mdev; + u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; + int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); + void *out; + + MLX5_SET(ppcnt_reg, in, local_port, 1); + out = pstats->RFC_2863_counters; + MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); +} + #define PPORT_2819_OFF(c) \ MLX5_BYTE_OFF(ppcnt_reg, \ counter_set.eth_2819_cntrs_grp_data_layout.c##_high) @@ -336,6 +460,20 @@ static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data, return idx; } +static void mlx5e_grp_2819_update_stats(struct mlx5e_priv *priv) +{ + struct mlx5e_pport_stats *pstats = &priv->stats.pport; + struct mlx5_core_dev *mdev = priv->mdev; + u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; + int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); + void *out; + + MLX5_SET(ppcnt_reg, in, local_port, 1); + out = pstats->RFC_2819_counters; + MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); +} + #define PPORT_PHY_STATISTICAL_OFF(c) \ MLX5_BYTE_OFF(ppcnt_reg, \ counter_set.phys_layer_statistical_cntrs.c##_high) @@ -376,6 +514,27 @@ static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) return idx; } +static void mlx5e_grp_phy_update_stats(struct mlx5e_priv *priv) +{ + struct mlx5e_pport_stats *pstats = &priv->stats.pport; + struct mlx5_core_dev *mdev = priv->mdev; + u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; + int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); + void *out; + + MLX5_SET(ppcnt_reg, in, local_port, 1); + out = pstats->phy_counters; + MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); + + if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) + return; + + out = pstats->phy_statistical_counters; + MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); +} + #define PPORT_ETH_EXT_OFF(c) \ MLX5_BYTE_OFF(ppcnt_reg, \ counter_set.eth_extended_cntrs_grp_data_layout.c##_high) @@ -418,6 +577,23 @@ static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data, return idx; } +static void mlx5e_grp_eth_ext_update_stats(struct mlx5e_priv *priv) +{ + struct mlx5e_pport_stats *pstats = &priv->stats.pport; + struct mlx5_core_dev *mdev = priv->mdev; + u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; + int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); + void *out; + + if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters)) + return; + + MLX5_SET(ppcnt_reg, in, local_port, 1); + out = pstats->eth_ext_counters; + MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); +} + #define PCIE_PERF_OFF(c) \ MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c) static const struct counter_desc pcie_perf_stats_desc[] = { @@ -505,6 +681,22 @@ static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data, return idx; } +static void mlx5e_grp_pcie_update_stats(struct mlx5e_priv *priv) +{ + struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie; + struct mlx5_core_dev *mdev = priv->mdev; + u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0}; + int sz = MLX5_ST_SZ_BYTES(mpcnt_reg); + void *out; + + if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group)) + return; + + out = pcie_stats->pcie_perf_counters; + MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); +} + #define PPORT_PER_PRIO_OFF(c) \ MLX5_BYTE_OFF(ppcnt_reg, \ counter_set.eth_per_prio_grp_data_layout.c##_high) @@ -656,6 +848,47 @@ static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv, return idx; } +static int mlx5e_grp_per_prio_get_num_stats(struct mlx5e_priv *priv) +{ + return mlx5e_grp_per_prio_traffic_get_num_stats(priv) + + mlx5e_grp_per_prio_pfc_get_num_stats(priv); +} + +static int mlx5e_grp_per_prio_fill_strings(struct mlx5e_priv *priv, u8 *data, + int idx) +{ + idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx); + idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx); + return idx; +} + +static int mlx5e_grp_per_prio_fill_stats(struct mlx5e_priv *priv, u64 *data, + int idx) +{ + idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx); + idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx); + return idx; +} + +static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv) +{ + struct mlx5e_pport_stats *pstats = &priv->stats.pport; + struct mlx5_core_dev *mdev = priv->mdev; + u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; + int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); + int prio; + void *out; + + MLX5_SET(ppcnt_reg, in, local_port, 1); + MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP); + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { + out = pstats->per_prio_counters[prio]; + MLX5_SET(ppcnt_reg, in, prio_tc, prio); + mlx5_core_access_reg(mdev, in, sz, out, sz, + MLX5_REG_PPCNT, 0, 0); + } +} + static const struct counter_desc mlx5e_pme_status_desc[] = { { "module_unplug", 8 }, }; @@ -723,6 +956,11 @@ static int mlx5e_grp_ipsec_fill_stats(struct mlx5e_priv *priv, u64 *data, return idx + mlx5e_ipsec_get_stats(priv, data + idx); } +static void mlx5e_grp_ipsec_update_stats(struct mlx5e_priv *priv) +{ + mlx5e_ipsec_update_stats(priv); +} + static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) }, @@ -767,12 +1005,18 @@ static const struct counter_desc sq_stats_desc[] = { { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, }; +static const struct counter_desc ch_stats_desc[] = { + { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) }, +}; + #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) +#define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc) static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv) { return (NUM_RQ_STATS * priv->channels.num) + + (NUM_CH_STATS * priv->channels.num) + (NUM_SQ_STATS * priv->channels.num * priv->channels.params.num_tc); } @@ -785,6 +1029,11 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, return idx; for (i = 0; i < priv->channels.num; i++) + for (j = 0; j < NUM_CH_STATS; j++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + ch_stats_desc[j].format, i); + + for (i = 0; i < priv->channels.num; i++) for (j = 0; j < NUM_RQ_STATS; j++) sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i); @@ -808,6 +1057,12 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data, return idx; for (i = 0; i < channels->num; i++) + for (j = 0; j < NUM_CH_STATS; j++) + data[idx++] = + MLX5E_READ_CTR64_CPU(&channels->c[i]->stats, + ch_stats_desc, j); + + for (i = 0; i < channels->num; i++) for (j = 0; j < NUM_RQ_STATS; j++) data[idx++] = MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats, @@ -823,61 +1078,71 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data, return idx; } +/* The stats groups order is opposite to the update_stats() order calls */ const struct mlx5e_stats_grp mlx5e_stats_grps[] = { { .get_num_stats = mlx5e_grp_sw_get_num_stats, .fill_strings = mlx5e_grp_sw_fill_strings, .fill_stats = mlx5e_grp_sw_fill_stats, + .update_stats_mask = MLX5E_NDO_UPDATE_STATS, + .update_stats = mlx5e_grp_sw_update_stats, }, { .get_num_stats = mlx5e_grp_q_get_num_stats, .fill_strings = mlx5e_grp_q_fill_strings, .fill_stats = mlx5e_grp_q_fill_stats, + .update_stats_mask = MLX5E_NDO_UPDATE_STATS, + .update_stats = mlx5e_grp_q_update_stats, }, { .get_num_stats = mlx5e_grp_vport_get_num_stats, .fill_strings = mlx5e_grp_vport_fill_strings, .fill_stats = mlx5e_grp_vport_fill_stats, + .update_stats_mask = MLX5E_NDO_UPDATE_STATS, + .update_stats = mlx5e_grp_vport_update_stats, }, { .get_num_stats = mlx5e_grp_802_3_get_num_stats, .fill_strings = mlx5e_grp_802_3_fill_strings, .fill_stats = mlx5e_grp_802_3_fill_stats, + .update_stats_mask = MLX5E_NDO_UPDATE_STATS, + .update_stats = mlx5e_grp_802_3_update_stats, }, { .get_num_stats = mlx5e_grp_2863_get_num_stats, .fill_strings = mlx5e_grp_2863_fill_strings, .fill_stats = mlx5e_grp_2863_fill_stats, + .update_stats = mlx5e_grp_2863_update_stats, }, { .get_num_stats = mlx5e_grp_2819_get_num_stats, .fill_strings = mlx5e_grp_2819_fill_strings, .fill_stats = mlx5e_grp_2819_fill_stats, + .update_stats = mlx5e_grp_2819_update_stats, }, { .get_num_stats = mlx5e_grp_phy_get_num_stats, .fill_strings = mlx5e_grp_phy_fill_strings, .fill_stats = mlx5e_grp_phy_fill_stats, + .update_stats = mlx5e_grp_phy_update_stats, }, { .get_num_stats = mlx5e_grp_eth_ext_get_num_stats, .fill_strings = mlx5e_grp_eth_ext_fill_strings, .fill_stats = mlx5e_grp_eth_ext_fill_stats, + .update_stats = mlx5e_grp_eth_ext_update_stats, }, { .get_num_stats = mlx5e_grp_pcie_get_num_stats, .fill_strings = mlx5e_grp_pcie_fill_strings, .fill_stats = mlx5e_grp_pcie_fill_stats, + .update_stats = mlx5e_grp_pcie_update_stats, }, { - .get_num_stats = mlx5e_grp_per_prio_traffic_get_num_stats, - .fill_strings = mlx5e_grp_per_prio_traffic_fill_strings, - .fill_stats = mlx5e_grp_per_prio_traffic_fill_stats, - }, - { - .get_num_stats = mlx5e_grp_per_prio_pfc_get_num_stats, - .fill_strings = mlx5e_grp_per_prio_pfc_fill_strings, - .fill_stats = mlx5e_grp_per_prio_pfc_fill_stats, + .get_num_stats = mlx5e_grp_per_prio_get_num_stats, + .fill_strings = mlx5e_grp_per_prio_fill_strings, + .fill_stats = mlx5e_grp_per_prio_fill_stats, + .update_stats = mlx5e_grp_per_prio_update_stats, }, { .get_num_stats = mlx5e_grp_pme_get_num_stats, @@ -888,6 +1153,7 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = { .get_num_stats = mlx5e_grp_ipsec_get_num_stats, .fill_strings = mlx5e_grp_ipsec_fill_strings, .fill_stats = mlx5e_grp_ipsec_fill_stats, + .update_stats = mlx5e_grp_ipsec_update_stats, }, { .get_num_stats = mlx5e_grp_channels_get_num_stats, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index d679e21f686e..0b3320a2b072 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -44,6 +44,7 @@ #define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld) #define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld) #define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld) +#define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld) struct counter_desc { char format[ETH_GSTRING_LEN]; @@ -88,6 +89,7 @@ struct mlx5e_sw_stats { u64 rx_cache_empty; u64 rx_cache_busy; u64 rx_cache_waive; + u64 ch_eq_rearm; /* Special handling counters */ u64 link_down_events_phy; @@ -192,6 +194,10 @@ struct mlx5e_sq_stats { u64 dropped; }; +struct mlx5e_ch_stats { + u64 eq_rearm; +}; + struct mlx5e_stats { struct mlx5e_sw_stats sw; struct mlx5e_qcounter_stats qcnt; @@ -201,11 +207,17 @@ struct mlx5e_stats { struct mlx5e_pcie_stats pcie; }; +enum { + MLX5E_NDO_UPDATE_STATS = BIT(0x1), +}; + struct mlx5e_priv; struct mlx5e_stats_grp { + u16 update_stats_mask; int (*get_num_stats)(struct mlx5e_priv *priv); int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx); int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx); + void (*update_stats)(struct mlx5e_priv *priv); }; extern const struct mlx5e_stats_grp mlx5e_stats_grps[]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 55979ec2e88a..fd98b0dc610f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -51,17 +51,22 @@ #include "en_tc.h" #include "eswitch.h" #include "vxlan.h" +#include "fs_core.h" struct mlx5_nic_flow_attr { u32 action; u32 flow_tag; u32 mod_hdr_id; + u32 hairpin_tirn; + struct mlx5_flow_table *hairpin_ft; }; enum { MLX5E_TC_FLOW_ESWITCH = BIT(0), MLX5E_TC_FLOW_NIC = BIT(1), MLX5E_TC_FLOW_OFFLOADED = BIT(2), + MLX5E_TC_FLOW_HAIRPIN = BIT(3), + MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(4), }; struct mlx5e_tc_flow { @@ -71,6 +76,7 @@ struct mlx5e_tc_flow { struct mlx5_flow_handle *rule; struct list_head encap; /* flows sharing the same encap ID */ struct list_head mod_hdr; /* flows sharing the same mod hdr ID */ + struct list_head hairpin; /* flows sharing the same hairpin */ union { struct mlx5_esw_flow_attr esw_attr[0]; struct mlx5_nic_flow_attr nic_attr[0]; @@ -93,6 +99,32 @@ enum { #define MLX5E_TC_TABLE_NUM_GROUPS 4 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE (1 << 16) +struct mlx5e_hairpin { + struct mlx5_hairpin *pair; + + struct mlx5_core_dev *func_mdev; + struct mlx5e_priv *func_priv; + u32 tdn; + u32 tirn; + + int num_channels; + struct mlx5e_rqt indir_rqt; + u32 indir_tirn[MLX5E_NUM_INDIR_TIRS]; + struct mlx5e_ttc_table ttc; +}; + +struct mlx5e_hairpin_entry { + /* a node of a hash table which keeps all the hairpin entries */ + struct hlist_node hairpin_hlist; + + /* flows sharing the same hairpin */ + struct list_head flows; + + u16 peer_vhca_id; + u8 prio; + struct mlx5e_hairpin *hp; +}; + struct mod_hdr_key { int num_actions; void *actions; @@ -222,6 +254,417 @@ static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv, } } +static +struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex) +{ + struct net_device *netdev; + struct mlx5e_priv *priv; + + netdev = __dev_get_by_index(net, ifindex); + priv = netdev_priv(netdev); + return priv->mdev; +} + +static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp) +{ + u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0}; + void *tirc; + int err; + + err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn); + if (err) + goto alloc_tdn_err; + + tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); + + MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT); + MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]); + MLX5_SET(tirc, tirc, transport_domain, hp->tdn); + + err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn); + if (err) + goto create_tir_err; + + return 0; + +create_tir_err: + mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn); +alloc_tdn_err: + return err; +} + +static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp) +{ + mlx5_core_destroy_tir(hp->func_mdev, hp->tirn); + mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn); +} + +static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc) +{ + u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn; + struct mlx5e_priv *priv = hp->func_priv; + int i, ix, sz = MLX5E_INDIR_RQT_SIZE; + + mlx5e_build_default_indir_rqt(indirection_rqt, sz, + hp->num_channels); + + for (i = 0; i < sz; i++) { + ix = i; + if (priv->channels.params.rss_hfunc == ETH_RSS_HASH_XOR) + ix = mlx5e_bits_invert(i, ilog2(sz)); + ix = indirection_rqt[ix]; + rqn = hp->pair->rqn[ix]; + MLX5_SET(rqtc, rqtc, rq_num[i], rqn); + } +} + +static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp) +{ + int inlen, err, sz = MLX5E_INDIR_RQT_SIZE; + struct mlx5e_priv *priv = hp->func_priv; + struct mlx5_core_dev *mdev = priv->mdev; + void *rqtc; + u32 *in; + + inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); + + MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); + MLX5_SET(rqtc, rqtc, rqt_max_size, sz); + + mlx5e_hairpin_fill_rqt_rqns(hp, rqtc); + + err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn); + if (!err) + hp->indir_rqt.enabled = true; + + kvfree(in); + return err; +} + +static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp) +{ + struct mlx5e_priv *priv = hp->func_priv; + u32 in[MLX5_ST_SZ_DW(create_tir_in)]; + int tt, i, err; + void *tirc; + + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { + memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in)); + tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); + + MLX5_SET(tirc, tirc, transport_domain, hp->tdn); + MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); + MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn); + mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false); + + err = mlx5_core_create_tir(hp->func_mdev, in, + MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]); + if (err) { + mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err); + goto err_destroy_tirs; + } + } + return 0; + +err_destroy_tirs: + for (i = 0; i < tt; i++) + mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]); + return err; +} + +static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp) +{ + int tt; + + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) + mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]); +} + +static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp, + struct ttc_params *ttc_params) +{ + struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr; + int tt; + + memset(ttc_params, 0, sizeof(*ttc_params)); + + ttc_params->any_tt_tirn = hp->tirn; + + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) + ttc_params->indir_tirn[tt] = hp->indir_tirn[tt]; + + ft_attr->max_fte = MLX5E_NUM_TT; + ft_attr->level = MLX5E_TC_TTC_FT_LEVEL; + ft_attr->prio = MLX5E_TC_PRIO; +} + +static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp) +{ + struct mlx5e_priv *priv = hp->func_priv; + struct ttc_params ttc_params; + int err; + + err = mlx5e_hairpin_create_indirect_rqt(hp); + if (err) + return err; + + err = mlx5e_hairpin_create_indirect_tirs(hp); + if (err) + goto err_create_indirect_tirs; + + mlx5e_hairpin_set_ttc_params(hp, &ttc_params); + err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc); + if (err) + goto err_create_ttc_table; + + netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n", + hp->num_channels, hp->ttc.ft.t->id); + + return 0; + +err_create_ttc_table: + mlx5e_hairpin_destroy_indirect_tirs(hp); +err_create_indirect_tirs: + mlx5e_destroy_rqt(priv, &hp->indir_rqt); + + return err; +} + +static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp) +{ + struct mlx5e_priv *priv = hp->func_priv; + + mlx5e_destroy_ttc_table(priv, &hp->ttc); + mlx5e_hairpin_destroy_indirect_tirs(hp); + mlx5e_destroy_rqt(priv, &hp->indir_rqt); +} + +static struct mlx5e_hairpin * +mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params, + int peer_ifindex) +{ + struct mlx5_core_dev *func_mdev, *peer_mdev; + struct mlx5e_hairpin *hp; + struct mlx5_hairpin *pair; + int err; + + hp = kzalloc(sizeof(*hp), GFP_KERNEL); + if (!hp) + return ERR_PTR(-ENOMEM); + + func_mdev = priv->mdev; + peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex); + + pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params); + if (IS_ERR(pair)) { + err = PTR_ERR(pair); + goto create_pair_err; + } + hp->pair = pair; + hp->func_mdev = func_mdev; + hp->func_priv = priv; + hp->num_channels = params->num_channels; + + err = mlx5e_hairpin_create_transport(hp); + if (err) + goto create_transport_err; + + if (hp->num_channels > 1) { + err = mlx5e_hairpin_rss_init(hp); + if (err) + goto rss_init_err; + } + + return hp; + +rss_init_err: + mlx5e_hairpin_destroy_transport(hp); +create_transport_err: + mlx5_core_hairpin_destroy(hp->pair); +create_pair_err: + kfree(hp); + return ERR_PTR(err); +} + +static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp) +{ + if (hp->num_channels > 1) + mlx5e_hairpin_rss_cleanup(hp); + mlx5e_hairpin_destroy_transport(hp); + mlx5_core_hairpin_destroy(hp->pair); + kvfree(hp); +} + +static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio) +{ + return (peer_vhca_id << 16 | prio); +} + +static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv, + u16 peer_vhca_id, u8 prio) +{ + struct mlx5e_hairpin_entry *hpe; + u32 hash_key = hash_hairpin_info(peer_vhca_id, prio); + + hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe, + hairpin_hlist, hash_key) { + if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) + return hpe; + } + + return NULL; +} + +#define UNKNOWN_MATCH_PRIO 8 + +static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, u8 *match_prio) +{ + void *headers_c, *headers_v; + u8 prio_val, prio_mask = 0; + bool vlan_present; + +#ifdef CONFIG_MLX5_CORE_EN_DCB + if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) { + netdev_warn(priv->netdev, + "only PCP trust state supported for hairpin\n"); + return -EOPNOTSUPP; + } +#endif + headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); + + vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag); + if (vlan_present) { + prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio); + prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio); + } + + if (!vlan_present || !prio_mask) { + prio_val = UNKNOWN_MATCH_PRIO; + } else if (prio_mask != 0x7) { + netdev_warn(priv->netdev, + "masked priority match not supported for hairpin\n"); + return -EOPNOTSUPP; + } + + *match_prio = prio_val; + return 0; +} + +static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct mlx5e_tc_flow_parse_attr *parse_attr) +{ + int peer_ifindex = parse_attr->mirred_ifindex; + struct mlx5_hairpin_params params; + struct mlx5_core_dev *peer_mdev; + struct mlx5e_hairpin_entry *hpe; + struct mlx5e_hairpin *hp; + u64 link_speed64; + u32 link_speed; + u8 match_prio; + u16 peer_id; + int err; + + peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex); + if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) { + netdev_warn(priv->netdev, "hairpin is not supported\n"); + return -EOPNOTSUPP; + } + + peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id); + err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio); + if (err) + return err; + hpe = mlx5e_hairpin_get(priv, peer_id, match_prio); + if (hpe) + goto attach_flow; + + hpe = kzalloc(sizeof(*hpe), GFP_KERNEL); + if (!hpe) + return -ENOMEM; + + INIT_LIST_HEAD(&hpe->flows); + hpe->peer_vhca_id = peer_id; + hpe->prio = match_prio; + + params.log_data_size = 15; + params.log_data_size = min_t(u8, params.log_data_size, + MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz)); + params.log_data_size = max_t(u8, params.log_data_size, + MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz)); + + params.log_num_packets = params.log_data_size - + MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev); + params.log_num_packets = min_t(u8, params.log_num_packets, + MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets)); + + params.q_counter = priv->q_counter; + /* set hairpin pair per each 50Gbs share of the link */ + mlx5e_get_max_linkspeed(priv->mdev, &link_speed); + link_speed = max_t(u32, link_speed, 50000); + link_speed64 = link_speed; + do_div(link_speed64, 50000); + params.num_channels = link_speed64; + + hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex); + if (IS_ERR(hp)) { + err = PTR_ERR(hp); + goto create_hairpin_err; + } + + netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n", + hp->tirn, hp->pair->rqn[0], hp->pair->peer_mdev->priv.name, + hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets); + + hpe->hp = hp; + hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist, + hash_hairpin_info(peer_id, match_prio)); + +attach_flow: + if (hpe->hp->num_channels > 1) { + flow->flags |= MLX5E_TC_FLOW_HAIRPIN_RSS; + flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t; + } else { + flow->nic_attr->hairpin_tirn = hpe->hp->tirn; + } + list_add(&flow->hairpin, &hpe->flows); + + return 0; + +create_hairpin_err: + kfree(hpe); + return err; +} + +static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow) +{ + struct list_head *next = flow->hairpin.next; + + list_del(&flow->hairpin); + + /* no more hairpin flows for us, release the hairpin pair */ + if (list_empty(next)) { + struct mlx5e_hairpin_entry *hpe; + + hpe = list_entry(next, struct mlx5e_hairpin_entry, flows); + + netdev_dbg(priv->netdev, "del hairpin: peer %s\n", + hpe->hp->pair->peer_mdev->priv.name); + + mlx5e_hairpin_destroy(hpe->hp); + hash_del(&hpe->hairpin_hlist); + kfree(hpe); + } +} + static struct mlx5_flow_handle * mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow_parse_attr *parse_attr, @@ -229,7 +672,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, { struct mlx5_nic_flow_attr *attr = flow->nic_attr; struct mlx5_core_dev *dev = priv->mdev; - struct mlx5_flow_destination dest = {}; + struct mlx5_flow_destination dest[2] = {}; struct mlx5_flow_act flow_act = { .action = attr->action, .flow_tag = attr->flow_tag, @@ -238,18 +681,37 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, struct mlx5_fc *counter = NULL; struct mlx5_flow_handle *rule; bool table_created = false; - int err; + int err, dest_ix = 0; - if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { - dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest.ft = priv->fs.vlan.ft.t; - } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { - counter = mlx5_fc_create(dev, true); - if (IS_ERR(counter)) - return ERR_CAST(counter); + if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) { + err = mlx5e_hairpin_flow_add(priv, flow, parse_attr); + if (err) { + rule = ERR_PTR(err); + goto err_add_hairpin_flow; + } + if (flow->flags & MLX5E_TC_FLOW_HAIRPIN_RSS) { + dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest[dest_ix].ft = attr->hairpin_ft; + } else { + dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR; + dest[dest_ix].tir_num = attr->hairpin_tirn; + } + dest_ix++; + } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { + dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest[dest_ix].ft = priv->fs.vlan.ft.t; + dest_ix++; + } - dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest.counter = counter; + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { + counter = mlx5_fc_create(dev, true); + if (IS_ERR(counter)) { + rule = ERR_CAST(counter); + goto err_fc_create; + } + dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest[dest_ix].counter = counter; + dest_ix++; } if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { @@ -279,7 +741,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, MLX5E_TC_PRIO, tc_tbl_size, MLX5E_TC_TABLE_NUM_GROUPS, - 0, 0); + MLX5E_TC_FT_LEVEL, 0); if (IS_ERR(priv->fs.tc.t)) { netdev_err(priv->netdev, "Failed to create tc offload table\n"); @@ -292,7 +754,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec, - &flow_act, &dest, 1); + &flow_act, dest, dest_ix); if (IS_ERR(rule)) goto err_add_rule; @@ -309,7 +771,10 @@ err_create_ft: mlx5e_detach_mod_hdr(priv, flow); err_create_mod_hdr_id: mlx5_fc_destroy(dev, counter); - +err_fc_create: + if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) + mlx5e_hairpin_flow_del(priv, flow); +err_add_hairpin_flow: return rule; } @@ -330,6 +795,9 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) mlx5e_detach_mod_hdr(priv, flow); + + if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) + mlx5e_hairpin_flow_del(priv, flow); } static void mlx5e_detach_encap(struct mlx5e_priv *priv, @@ -617,7 +1085,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, FLOW_DISSECTOR_KEY_ENC_PORTS, f->mask); struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw); + struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); + struct net_device *up_dev = uplink_rpriv->netdev; struct mlx5e_priv *up_priv = netdev_priv(up_dev); /* Full udp dst port must be given */ @@ -1421,6 +1890,20 @@ static bool actions_match_supported(struct mlx5e_priv *priv, return true; } +static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) +{ + struct mlx5_core_dev *fmdev, *pmdev; + u16 func_id, peer_id; + + fmdev = priv->mdev; + pmdev = peer_priv->mdev; + + func_id = (u16)((fmdev->pdev->bus->number << 8) | PCI_SLOT(fmdev->pdev->devfn)); + peer_id = (u16)((pmdev->pdev->bus->number << 8) | PCI_SLOT(pmdev->pdev->devfn)); + + return (func_id == peer_id); +} + static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, struct mlx5e_tc_flow_parse_attr *parse_attr, struct mlx5e_tc_flow *flow) @@ -1465,6 +1948,23 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, return -EOPNOTSUPP; } + if (is_tcf_mirred_egress_redirect(a)) { + struct net_device *peer_dev = tcf_mirred_dev(a); + + if (priv->netdev->netdev_ops == peer_dev->netdev_ops && + same_hw_devs(priv, netdev_priv(peer_dev))) { + parse_attr->mirred_ifindex = peer_dev->ifindex; + flow->flags |= MLX5E_TC_FLOW_HAIRPIN; + attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + } else { + netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n", + peer_dev->name); + return -EINVAL; + } + continue; + } + if (is_tcf_skbedit_mark(a)) { u32 mark = tcf_skbedit_mark(a); @@ -1507,6 +2007,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, int *out_ttl) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_rep_priv *uplink_rpriv; struct rtable *rt; struct neighbour *n = NULL; @@ -1520,9 +2021,10 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, #else return -EOPNOTSUPP; #endif + uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); /* if the egress device isn't on the same HW e-switch, we use the uplink */ if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) - *out_dev = mlx5_eswitch_get_uplink_netdev(esw); + *out_dev = uplink_rpriv->netdev; else *out_dev = rt->dst.dev; @@ -1547,6 +2049,7 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, struct dst_entry *dst; #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) + struct mlx5e_rep_priv *uplink_rpriv; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; int ret; @@ -1557,9 +2060,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, *out_ttl = ip6_dst_hoplimit(dst); + uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); /* if the egress device isn't on the same HW e-switch, we use the uplink */ if (!switchdev_port_same_parent_id(priv->netdev, dst->dev)) - *out_dev = mlx5_eswitch_get_uplink_netdev(esw); + *out_dev = uplink_rpriv->netdev; else *out_dev = dst->dev; #else @@ -1859,7 +2363,9 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw); + struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, + REP_ETH); + struct net_device *up_dev = uplink_rpriv->netdev; unsigned short family = ip_tunnel_info_af(tun_info); struct mlx5e_priv *up_priv = netdev_priv(up_dev); struct mlx5_esw_flow_attr *attr = flow->esw_attr; @@ -1982,11 +2488,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, } if (is_tcf_mirred_egress_redirect(a)) { - int ifindex = tcf_mirred_ifindex(a); struct net_device *out_dev; struct mlx5e_priv *out_priv; - out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex); + out_dev = tcf_mirred_dev(a); if (switchdev_port_same_parent_id(priv->netdev, out_dev)) { @@ -1996,7 +2501,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, rpriv = out_priv->ppriv; attr->out_rep = rpriv->rep; } else if (encap) { - parse_attr->mirred_ifindex = ifindex; + parse_attr->mirred_ifindex = out_dev->ifindex; parse_attr->tun_info = *info; attr->parse_attr = parse_attr; attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP | @@ -2182,6 +2687,7 @@ int mlx5e_tc_init(struct mlx5e_priv *priv) struct mlx5e_tc_table *tc = &priv->fs.tc; hash_init(tc->mod_hdr_tbl); + hash_init(tc->hairpin_tbl); tc->ht_params = mlx5e_tc_flow_ht_params; return rhashtable_init(&tc->ht, &tc->ht_params); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index ab92298eafc3..f292bb346985 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -78,8 +78,14 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) for (i = 0; i < c->num_tc; i++) mlx5e_cq_arm(&c->sq[i].cq); - if (MLX5E_TEST_BIT(c->rq.state, MLX5E_RQ_STATE_AM)) - mlx5e_rx_am(&c->rq); + if (MLX5E_TEST_BIT(c->rq.state, MLX5E_RQ_STATE_AM)) { + struct net_dim_sample dim_sample; + net_dim_sample(c->rq.cq.event_ctr, + c->rq.stats.packets, + c->rq.stats.bytes, + &dim_sample); + net_dim(&c->rq.dim, dim_sample); + } mlx5e_cq_arm(&c->rq.cq); mlx5e_cq_arm(&c->icosq.cq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 14d57828945d..05c1157bc9f2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -534,6 +534,24 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) return IRQ_HANDLED; } +/* Some architectures don't latch interrupts when they are disabled, so using + * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to + * avoid losing them. It is not recommended to use it, unless this is the last + * resort. + */ +u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq) +{ + u32 count_eqe; + + disable_irq(eq->irqn); + count_eqe = eq->cons_index; + mlx5_eq_int(eq->irqn, eq); + count_eqe = eq->cons_index - count_eqe; + enable_irq(eq->irqn); + + return count_eqe; +} + static void init_eq_buf(struct mlx5_eq *eq) { struct mlx5_eqe *eqe; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index bbb140f517c4..5ecf2cddc16d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -37,6 +37,7 @@ #include <linux/mlx5/fs.h> #include "mlx5_core.h" #include "eswitch.h" +#include "fs_core.h" #define UPLINK_VPORT 0xFFFF @@ -867,9 +868,10 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n", vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size)); - root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS); + root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS, + vport->vport); if (!root_ns) { - esw_warn(dev, "Failed to get E-Switch egress flow namespace\n"); + esw_warn(dev, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport->vport); return -EOPNOTSUPP; } @@ -984,9 +986,10 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n", vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size)); - root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS); + root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS, + vport->vport); if (!root_ns) { - esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n"); + esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport); return -EOPNOTSUPP; } @@ -1121,8 +1124,12 @@ static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, static int esw_vport_ingress_config(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { + struct mlx5_fc *counter = vport->ingress.drop_counter; + struct mlx5_flow_destination drop_ctr_dst = {0}; + struct mlx5_flow_destination *dst = NULL; struct mlx5_flow_act flow_act = {0}; struct mlx5_flow_spec *spec; + int dest_num = 0; int err = 0; u8 *smac_v; @@ -1186,9 +1193,18 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, memset(spec, 0, sizeof(*spec)); flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; + + /* Attach drop flow counter */ + if (counter) { + flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; + drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + drop_ctr_dst.counter = counter; + dst = &drop_ctr_dst; + dest_num++; + } vport->ingress.drop_rule = mlx5_add_flow_rules(vport->ingress.acl, spec, - &flow_act, NULL, 0); + &flow_act, dst, dest_num); if (IS_ERR(vport->ingress.drop_rule)) { err = PTR_ERR(vport->ingress.drop_rule); esw_warn(esw->dev, @@ -1208,8 +1224,12 @@ out: static int esw_vport_egress_config(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { + struct mlx5_fc *counter = vport->egress.drop_counter; + struct mlx5_flow_destination drop_ctr_dst = {0}; + struct mlx5_flow_destination *dst = NULL; struct mlx5_flow_act flow_act = {0}; struct mlx5_flow_spec *spec; + int dest_num = 0; int err = 0; esw_vport_cleanup_egress_rules(esw, vport); @@ -1260,9 +1280,18 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, /* Drop others rule (star rule) */ memset(spec, 0, sizeof(*spec)); flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; + + /* Attach egress drop flow counter */ + if (counter) { + flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; + drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + drop_ctr_dst.counter = counter; + dst = &drop_ctr_dst; + dest_num++; + } vport->egress.drop_rule = mlx5_add_flow_rules(vport->egress.acl, spec, - &flow_act, NULL, 0); + &flow_act, dst, dest_num); if (IS_ERR(vport->egress.drop_rule)) { err = PTR_ERR(vport->egress.drop_rule); esw_warn(esw->dev, @@ -1290,7 +1319,7 @@ static int esw_create_tsar(struct mlx5_eswitch *esw) err = mlx5_create_scheduling_element_cmd(dev, SCHEDULING_HIERARCHY_E_SWITCH, - &tsar_ctx, + tsar_ctx, &esw->qos.root_tsar_id); if (err) { esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err); @@ -1333,20 +1362,20 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num, if (vport->qos.enabled) return -EEXIST; - MLX5_SET(scheduling_context, &sched_ctx, element_type, + MLX5_SET(scheduling_context, sched_ctx, element_type, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); - vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx, + vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes); MLX5_SET(vport_element, vport_elem, vport_number, vport_num); - MLX5_SET(scheduling_context, &sched_ctx, parent_element_id, + MLX5_SET(scheduling_context, sched_ctx, parent_element_id, esw->qos.root_tsar_id); - MLX5_SET(scheduling_context, &sched_ctx, max_average_bw, + MLX5_SET(scheduling_context, sched_ctx, max_average_bw, initial_max_rate); - MLX5_SET(scheduling_context, &sched_ctx, bw_share, initial_bw_share); + MLX5_SET(scheduling_context, sched_ctx, bw_share, initial_bw_share); err = mlx5_create_scheduling_element_cmd(dev, SCHEDULING_HIERARCHY_E_SWITCH, - &sched_ctx, + sched_ctx, &vport->qos.esw_tsar_ix); if (err) { esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n", @@ -1392,22 +1421,22 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num, if (!vport->qos.enabled) return -EIO; - MLX5_SET(scheduling_context, &sched_ctx, element_type, + MLX5_SET(scheduling_context, sched_ctx, element_type, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); - vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx, + vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes); MLX5_SET(vport_element, vport_elem, vport_number, vport_num); - MLX5_SET(scheduling_context, &sched_ctx, parent_element_id, + MLX5_SET(scheduling_context, sched_ctx, parent_element_id, esw->qos.root_tsar_id); - MLX5_SET(scheduling_context, &sched_ctx, max_average_bw, + MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate); - MLX5_SET(scheduling_context, &sched_ctx, bw_share, bw_share); + MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE; err = mlx5_modify_scheduling_element_cmd(dev, SCHEDULING_HIERARCHY_E_SWITCH, - &sched_ctx, + sched_ctx, vport->qos.esw_tsar_ix, bitmask); if (err) { @@ -1455,6 +1484,41 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw, } } +static void esw_vport_create_drop_counters(struct mlx5_vport *vport) +{ + struct mlx5_core_dev *dev = vport->dev; + + if (MLX5_CAP_ESW_INGRESS_ACL(dev, flow_counter)) { + vport->ingress.drop_counter = mlx5_fc_create(dev, false); + if (IS_ERR(vport->ingress.drop_counter)) { + esw_warn(dev, + "vport[%d] configure ingress drop rule counter failed\n", + vport->vport); + vport->ingress.drop_counter = NULL; + } + } + + if (MLX5_CAP_ESW_EGRESS_ACL(dev, flow_counter)) { + vport->egress.drop_counter = mlx5_fc_create(dev, false); + if (IS_ERR(vport->egress.drop_counter)) { + esw_warn(dev, + "vport[%d] configure egress drop rule counter failed\n", + vport->vport); + vport->egress.drop_counter = NULL; + } + } +} + +static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport) +{ + struct mlx5_core_dev *dev = vport->dev; + + if (vport->ingress.drop_counter) + mlx5_fc_destroy(dev, vport->ingress.drop_counter); + if (vport->egress.drop_counter) + mlx5_fc_destroy(dev, vport->egress.drop_counter); +} + static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, int enable_events) { @@ -1481,6 +1545,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, if (!vport_num) vport->info.trusted = true; + /* create steering drop counters for ingress and egress ACLs */ + if (vport_num && esw->mode == SRIOV_LEGACY) + esw_vport_create_drop_counters(vport); + esw_vport_change_handle_locked(vport); esw->enabled_vports++; @@ -1519,6 +1587,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) MLX5_ESW_VPORT_ADMIN_STATE_DOWN); esw_vport_disable_egress_acl(esw, vport); esw_vport_disable_ingress_acl(esw, vport); + esw_vport_destroy_drop_counters(vport); } esw->enabled_vports--; mutex_unlock(&esw->state_lock); @@ -1644,13 +1713,9 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) goto abort; } - esw->offloads.vport_reps = - kzalloc(total_vports * sizeof(struct mlx5_eswitch_rep), - GFP_KERNEL); - if (!esw->offloads.vport_reps) { - err = -ENOMEM; + err = esw_offloads_init_reps(esw); + if (err) goto abort; - } hash_init(esw->offloads.encap_tbl); hash_init(esw->offloads.mod_hdr_tbl); @@ -1681,8 +1746,8 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) abort: if (esw->work_queue) destroy_workqueue(esw->work_queue); + esw_offloads_cleanup_reps(esw); kfree(esw->vports); - kfree(esw->offloads.vport_reps); kfree(esw); return err; } @@ -1696,7 +1761,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) esw->dev->priv.eswitch = NULL; destroy_workqueue(esw->work_queue); - kfree(esw->offloads.vport_reps); + esw_offloads_cleanup_reps(esw); kfree(esw->vports); kfree(esw); } @@ -2018,12 +2083,36 @@ unlock: return err; } +static void mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev, + int vport_idx, + struct mlx5_vport_drop_stats *stats) +{ + struct mlx5_eswitch *esw = dev->priv.eswitch; + struct mlx5_vport *vport = &esw->vports[vport_idx]; + u64 bytes = 0; + u16 idx = 0; + + if (!vport->enabled || esw->mode != SRIOV_LEGACY) + return; + + if (vport->egress.drop_counter) { + idx = vport->egress.drop_counter->id; + mlx5_fc_query(dev, idx, &stats->rx_dropped, &bytes); + } + + if (vport->ingress.drop_counter) { + idx = vport->ingress.drop_counter->id; + mlx5_fc_query(dev, idx, &stats->tx_dropped, &bytes); + } +} + int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, int vport, struct ifla_vf_stats *vf_stats) { int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0}; + struct mlx5_vport_drop_stats stats = {0}; int err = 0; u32 *out; @@ -2078,6 +2167,10 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, vf_stats->broadcast = MLX5_GET_CTR(out, received_eth_broadcast.packets); + mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats); + vf_stats->rx_dropped = stats.rx_dropped; + vf_stats->tx_dropped = stats.tx_dropped; + free_out: kvfree(out); return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 565c8b7a399a..2fa037066b2f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -45,6 +45,11 @@ enum { SRIOV_OFFLOADS }; +enum { + REP_ETH, + NUM_REP_TYPES, +}; + #ifdef CONFIG_MLX5_ESWITCH #define MLX5_MAX_UC_PER_VPORT(dev) \ @@ -68,6 +73,7 @@ struct vport_ingress { struct mlx5_flow_group *drop_grp; struct mlx5_flow_handle *allow_rule; struct mlx5_flow_handle *drop_rule; + struct mlx5_fc *drop_counter; }; struct vport_egress { @@ -76,6 +82,12 @@ struct vport_egress { struct mlx5_flow_group *drop_grp; struct mlx5_flow_handle *allowed_vlan; struct mlx5_flow_handle *drop_rule; + struct mlx5_fc *drop_counter; +}; + +struct mlx5_vport_drop_stats { + u64 rx_dropped; + u64 tx_dropped; }; struct mlx5_vport_info { @@ -133,25 +145,21 @@ struct mlx5_eswitch_fdb { }; }; -struct mlx5_esw_sq { - struct mlx5_flow_handle *send_to_vport_rule; - struct list_head list; +struct mlx5_eswitch_rep; +struct mlx5_eswitch_rep_if { + int (*load)(struct mlx5_core_dev *dev, + struct mlx5_eswitch_rep *rep); + void (*unload)(struct mlx5_eswitch_rep *rep); + void *priv; + bool valid; }; struct mlx5_eswitch_rep { - int (*load)(struct mlx5_eswitch *esw, - struct mlx5_eswitch_rep *rep); - void (*unload)(struct mlx5_eswitch *esw, - struct mlx5_eswitch_rep *rep); + struct mlx5_eswitch_rep_if rep_if[NUM_REP_TYPES]; u16 vport; u8 hw_id[ETH_ALEN]; - struct net_device *netdev; - - struct mlx5_flow_handle *vport_rx_rule; - struct list_head vport_sqs_list; u16 vlan; u32 vlan_refcount; - bool valid; }; struct mlx5_esw_offload { @@ -197,6 +205,8 @@ struct mlx5_eswitch { void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports); int esw_offloads_init(struct mlx5_eswitch *esw, int nvports); +void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw); +int esw_offloads_init_reps(struct mlx5_eswitch *esw); /* E-Switch API */ int mlx5_eswitch_init(struct mlx5_core_dev *dev); @@ -221,6 +231,10 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, int vport, struct ifla_vf_stats *vf_stats); +struct mlx5_flow_handle * +mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, + u32 sqn); +void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule); struct mlx5_flow_spec; struct mlx5_esw_flow_attr; @@ -257,12 +271,6 @@ struct mlx5_esw_flow_attr { struct mlx5e_tc_flow_parse_attr *parse_attr; }; -int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw, - struct mlx5_eswitch_rep *rep, - u16 *sqns_array, int sqns_num); -void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw, - struct mlx5_eswitch_rep *rep); - int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode); int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode); int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode); @@ -272,10 +280,12 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap); int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap); void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, int vport_index, - struct mlx5_eswitch_rep *rep); + struct mlx5_eswitch_rep_if *rep_if, + u8 rep_type); void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, - int vport_index); -struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw); + int vport_index, + u8 rep_type); +void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type); int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 1143d80119bd..99f583a15cc3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -130,7 +130,7 @@ static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none"); for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) { rep = &esw->offloads.vport_reps[vf_vport]; - if (!rep->valid) + if (!rep->rep_if[REP_ETH].valid) continue; err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val); @@ -302,7 +302,7 @@ out: return err; } -static struct mlx5_flow_handle * +struct mlx5_flow_handle * mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn) { struct mlx5_flow_act flow_act = {0}; @@ -339,57 +339,9 @@ out: return flow_rule; } -void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw, - struct mlx5_eswitch_rep *rep) -{ - struct mlx5_esw_sq *esw_sq, *tmp; - - if (esw->mode != SRIOV_OFFLOADS) - return; - - list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) { - mlx5_del_flow_rules(esw_sq->send_to_vport_rule); - list_del(&esw_sq->list); - kfree(esw_sq); - } -} - -int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw, - struct mlx5_eswitch_rep *rep, - u16 *sqns_array, int sqns_num) +void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule) { - struct mlx5_flow_handle *flow_rule; - struct mlx5_esw_sq *esw_sq; - int err; - int i; - - if (esw->mode != SRIOV_OFFLOADS) - return 0; - - for (i = 0; i < sqns_num; i++) { - esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL); - if (!esw_sq) { - err = -ENOMEM; - goto out_err; - } - - /* Add re-inject rule to the PF/representor sqs */ - flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, - rep->vport, - sqns_array[i]); - if (IS_ERR(flow_rule)) { - err = PTR_ERR(flow_rule); - kfree(esw_sq); - goto out_err; - } - esw_sq->send_to_vport_rule = flow_rule; - list_add(&esw_sq->list, &rep->vport_sqs_list); - } - return 0; - -out_err: - mlx5_eswitch_sqs2vport_stop(esw, rep); - return err; + mlx5_del_flow_rules(rule); } static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) @@ -732,12 +684,111 @@ static int esw_offloads_start(struct mlx5_eswitch *esw) return err; } -int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) +void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw) +{ + kfree(esw->offloads.vport_reps); +} + +int esw_offloads_init_reps(struct mlx5_eswitch *esw) +{ + int total_vfs = MLX5_TOTAL_VPORTS(esw->dev); + struct mlx5_core_dev *dev = esw->dev; + struct mlx5_esw_offload *offloads; + struct mlx5_eswitch_rep *rep; + u8 hw_id[ETH_ALEN]; + int vport; + + esw->offloads.vport_reps = kcalloc(total_vfs, + sizeof(struct mlx5_eswitch_rep), + GFP_KERNEL); + if (!esw->offloads.vport_reps) + return -ENOMEM; + + offloads = &esw->offloads; + mlx5_query_nic_vport_mac_address(dev, 0, hw_id); + + for (vport = 0; vport < total_vfs; vport++) { + rep = &offloads->vport_reps[vport]; + + rep->vport = vport; + ether_addr_copy(rep->hw_id, hw_id); + } + + offloads->vport_reps[0].vport = FDB_UPLINK_VPORT; + + return 0; +} + +static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports, + u8 rep_type) +{ + struct mlx5_eswitch_rep *rep; + int vport; + + for (vport = nvports - 1; vport >= 0; vport--) { + rep = &esw->offloads.vport_reps[vport]; + if (!rep->rep_if[rep_type].valid) + continue; + + rep->rep_if[rep_type].unload(rep); + } +} + +static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports) +{ + u8 rep_type = NUM_REP_TYPES; + + while (rep_type-- > 0) + esw_offloads_unload_reps_type(esw, nvports, rep_type); +} + +static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports, + u8 rep_type) { struct mlx5_eswitch_rep *rep; int vport; int err; + for (vport = 0; vport < nvports; vport++) { + rep = &esw->offloads.vport_reps[vport]; + if (!rep->rep_if[rep_type].valid) + continue; + + err = rep->rep_if[rep_type].load(esw->dev, rep); + if (err) + goto err_reps; + } + + return 0; + +err_reps: + esw_offloads_unload_reps_type(esw, vport, rep_type); + return err; +} + +static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports) +{ + u8 rep_type = 0; + int err; + + for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) { + err = esw_offloads_load_reps_type(esw, nvports, rep_type); + if (err) + goto err_reps; + } + + return err; + +err_reps: + while (rep_type-- > 0) + esw_offloads_unload_reps_type(esw, nvports, rep_type); + return err; +} + +int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) +{ + int err; + /* disable PF RoCE so missed packets don't go through RoCE steering */ mlx5_dev_list_lock(); mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); @@ -755,25 +806,13 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) if (err) goto create_fg_err; - for (vport = 0; vport < nvports; vport++) { - rep = &esw->offloads.vport_reps[vport]; - if (!rep->valid) - continue; - - err = rep->load(esw, rep); - if (err) - goto err_reps; - } + err = esw_offloads_load_reps(esw, nvports); + if (err) + goto err_reps; return 0; err_reps: - for (vport--; vport >= 0; vport--) { - rep = &esw->offloads.vport_reps[vport]; - if (!rep->valid) - continue; - rep->unload(esw, rep); - } esw_destroy_vport_rx_group(esw); create_fg_err: @@ -814,16 +853,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw) void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports) { - struct mlx5_eswitch_rep *rep; - int vport; - - for (vport = nvports - 1; vport >= 0; vport--) { - rep = &esw->offloads.vport_reps[vport]; - if (!rep->valid) - continue; - rep->unload(esw, rep); - } - + esw_offloads_unload_reps(esw, nvports); esw_destroy_vport_rx_group(esw); esw_destroy_offloads_table(esw); esw_destroy_offloads_fdb_tables(esw); @@ -1120,27 +1150,23 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap) void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, int vport_index, - struct mlx5_eswitch_rep *__rep) + struct mlx5_eswitch_rep_if *__rep_if, + u8 rep_type) { struct mlx5_esw_offload *offloads = &esw->offloads; - struct mlx5_eswitch_rep *rep; - - rep = &offloads->vport_reps[vport_index]; + struct mlx5_eswitch_rep_if *rep_if; - memset(rep, 0, sizeof(*rep)); + rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type]; - rep->load = __rep->load; - rep->unload = __rep->unload; - rep->vport = __rep->vport; - rep->netdev = __rep->netdev; - ether_addr_copy(rep->hw_id, __rep->hw_id); + rep_if->load = __rep_if->load; + rep_if->unload = __rep_if->unload; + rep_if->priv = __rep_if->priv; - INIT_LIST_HEAD(&rep->vport_sqs_list); - rep->valid = true; + rep_if->valid = true; } void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, - int vport_index) + int vport_index, u8 rep_type) { struct mlx5_esw_offload *offloads = &esw->offloads; struct mlx5_eswitch_rep *rep; @@ -1148,17 +1174,17 @@ void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, rep = &offloads->vport_reps[vport_index]; if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled) - rep->unload(esw, rep); + rep->rep_if[rep_type].unload(rep); - rep->valid = false; + rep->rep_if[rep_type].valid = false; } -struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw) +void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type) { #define UPLINK_REP_INDEX 0 struct mlx5_esw_offload *offloads = &esw->offloads; struct mlx5_eswitch_rep *rep; rep = &offloads->vport_reps[UPLINK_REP_INDEX]; - return rep->netdev; + return rep->rep_if[rep_type].priv; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index dfaad9ecb2b8..c025c98700e4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -89,6 +89,9 @@ /* One more level for tc */ #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1) +#define KERNEL_NIC_TC_NUM_PRIOS 1 +#define KERNEL_NIC_TC_NUM_LEVELS 2 + #define ANCHOR_NUM_LEVELS 1 #define ANCHOR_NUM_PRIOS 1 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1) @@ -134,7 +137,7 @@ static struct init_tree_node { ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS, ETHTOOL_PRIO_NUM_LEVELS))), ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {}, - ADD_NS(ADD_MULTIPLE_PRIO(1, 1), + ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS, KERNEL_NIC_TC_NUM_LEVELS), ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS, KERNEL_NIC_PRIO_NUM_LEVELS))), ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, @@ -2026,16 +2029,6 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, return &steering->fdb_root_ns->ns; else return NULL; - case MLX5_FLOW_NAMESPACE_ESW_EGRESS: - if (steering->esw_egress_root_ns) - return &steering->esw_egress_root_ns->ns; - else - return NULL; - case MLX5_FLOW_NAMESPACE_ESW_INGRESS: - if (steering->esw_ingress_root_ns) - return &steering->esw_ingress_root_ns->ns; - else - return NULL; case MLX5_FLOW_NAMESPACE_SNIFFER_RX: if (steering->sniffer_rx_root_ns) return &steering->sniffer_rx_root_ns->ns; @@ -2066,6 +2059,33 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, } EXPORT_SYMBOL(mlx5_get_flow_namespace); +struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev, + enum mlx5_flow_namespace_type type, + int vport) +{ + struct mlx5_flow_steering *steering = dev->priv.steering; + + if (!steering || vport >= MLX5_TOTAL_VPORTS(dev)) + return NULL; + + switch (type) { + case MLX5_FLOW_NAMESPACE_ESW_EGRESS: + if (steering->esw_egress_root_ns && + steering->esw_egress_root_ns[vport]) + return &steering->esw_egress_root_ns[vport]->ns; + else + return NULL; + case MLX5_FLOW_NAMESPACE_ESW_INGRESS: + if (steering->esw_ingress_root_ns && + steering->esw_ingress_root_ns[vport]) + return &steering->esw_ingress_root_ns[vport]->ns; + else + return NULL; + default: + return NULL; + } +} + static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns, unsigned int prio, int num_levels) { @@ -2343,13 +2363,41 @@ static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns) clean_tree(&root_ns->ns.node); } +static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev) +{ + struct mlx5_flow_steering *steering = dev->priv.steering; + int i; + + if (!steering->esw_egress_root_ns) + return; + + for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) + cleanup_root_ns(steering->esw_egress_root_ns[i]); + + kfree(steering->esw_egress_root_ns); +} + +static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev) +{ + struct mlx5_flow_steering *steering = dev->priv.steering; + int i; + + if (!steering->esw_ingress_root_ns) + return; + + for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) + cleanup_root_ns(steering->esw_ingress_root_ns[i]); + + kfree(steering->esw_ingress_root_ns); +} + void mlx5_cleanup_fs(struct mlx5_core_dev *dev) { struct mlx5_flow_steering *steering = dev->priv.steering; cleanup_root_ns(steering->root_ns); - cleanup_root_ns(steering->esw_egress_root_ns); - cleanup_root_ns(steering->esw_ingress_root_ns); + cleanup_egress_acls_root_ns(dev); + cleanup_ingress_acls_root_ns(dev); cleanup_root_ns(steering->fdb_root_ns); cleanup_root_ns(steering->sniffer_rx_root_ns); cleanup_root_ns(steering->sniffer_tx_root_ns); @@ -2418,34 +2466,86 @@ out_err: return PTR_ERR(prio); } -static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering) +static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport) { struct fs_prio *prio; - steering->esw_egress_root_ns = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL); - if (!steering->esw_egress_root_ns) + steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL); + if (!steering->esw_egress_root_ns[vport]) return -ENOMEM; /* create 1 prio*/ - prio = fs_create_prio(&steering->esw_egress_root_ns->ns, 0, - MLX5_TOTAL_VPORTS(steering->dev)); + prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1); return PTR_ERR_OR_ZERO(prio); } -static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering) +static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport) { struct fs_prio *prio; - steering->esw_ingress_root_ns = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL); - if (!steering->esw_ingress_root_ns) + steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL); + if (!steering->esw_ingress_root_ns[vport]) return -ENOMEM; /* create 1 prio*/ - prio = fs_create_prio(&steering->esw_ingress_root_ns->ns, 0, - MLX5_TOTAL_VPORTS(steering->dev)); + prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1); return PTR_ERR_OR_ZERO(prio); } +static int init_egress_acls_root_ns(struct mlx5_core_dev *dev) +{ + struct mlx5_flow_steering *steering = dev->priv.steering; + int err; + int i; + + steering->esw_egress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev), + sizeof(*steering->esw_egress_root_ns), + GFP_KERNEL); + if (!steering->esw_egress_root_ns) + return -ENOMEM; + + for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) { + err = init_egress_acl_root_ns(steering, i); + if (err) + goto cleanup_root_ns; + } + + return 0; + +cleanup_root_ns: + for (i--; i >= 0; i--) + cleanup_root_ns(steering->esw_egress_root_ns[i]); + kfree(steering->esw_egress_root_ns); + return err; +} + +static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev) +{ + struct mlx5_flow_steering *steering = dev->priv.steering; + int err; + int i; + + steering->esw_ingress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev), + sizeof(*steering->esw_ingress_root_ns), + GFP_KERNEL); + if (!steering->esw_ingress_root_ns) + return -ENOMEM; + + for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) { + err = init_ingress_acl_root_ns(steering, i); + if (err) + goto cleanup_root_ns; + } + + return 0; + +cleanup_root_ns: + for (i--; i >= 0; i--) + cleanup_root_ns(steering->esw_ingress_root_ns[i]); + kfree(steering->esw_ingress_root_ns); + return err; +} + int mlx5_init_fs(struct mlx5_core_dev *dev) { struct mlx5_flow_steering *steering; @@ -2488,12 +2588,12 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) goto err; } if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { - err = init_egress_acl_root_ns(steering); + err = init_egress_acls_root_ns(dev); if (err) goto err; } if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { - err = init_ingress_acl_root_ns(steering); + err = init_ingress_acls_root_ns(dev); if (err) goto err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 397d24a621a4..05262708f14b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -71,8 +71,8 @@ struct mlx5_flow_steering { struct kmem_cache *ftes_cache; struct mlx5_flow_root_namespace *root_ns; struct mlx5_flow_root_namespace *fdb_root_ns; - struct mlx5_flow_root_namespace *esw_egress_root_ns; - struct mlx5_flow_root_namespace *esw_ingress_root_ns; + struct mlx5_flow_root_namespace **esw_egress_root_ns; + struct mlx5_flow_root_namespace **esw_ingress_root_ns; struct mlx5_flow_root_namespace *sniffer_tx_root_ns; struct mlx5_flow_root_namespace *sniffer_rx_root_ns; }; @@ -233,6 +233,8 @@ void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev, unsigned long delay); void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev, unsigned long interval); +int mlx5_fc_query(struct mlx5_core_dev *dev, u16 id, + u64 *packets, u64 *bytes); int mlx5_init_fs(struct mlx5_core_dev *dev); void mlx5_cleanup_fs(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index 89d1f8650033..b7ab929d5f8e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -312,6 +312,12 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev) } } +int mlx5_fc_query(struct mlx5_core_dev *dev, u16 id, + u64 *packets, u64 *bytes) +{ + return mlx5_cmd_fc_query(dev, id, packets, bytes); +} + void mlx5_fc_query_cached(struct mlx5_fc *counter, u64 *bytes, u64 *packets, u64 *lastuse) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c index 6f338a9219c8..90cb50fe17fd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c @@ -254,4 +254,5 @@ const struct ethtool_ops mlx5i_ethtool_ops = { const struct ethtool_ops mlx5i_pkey_ethtool_ops = { .get_drvinfo = mlx5i_get_drvinfo, .get_link = ethtool_op_get_link, + .get_ts_info = mlx5i_get_ts_info, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index a281d95ce17c..f953378bd13d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -41,7 +41,6 @@ static int mlx5i_open(struct net_device *netdev); static int mlx5i_close(struct net_device *netdev); static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu); -static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); static const struct net_device_ops mlx5i_netdev_ops = { .ndo_open = mlx5i_open, @@ -242,7 +241,8 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv) static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) { - int err; + struct ttc_params ttc_params = {}; + int tt, err; priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL); @@ -257,14 +257,23 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) priv->netdev->hw_features &= ~NETIF_F_NTUPLE; } - err = mlx5e_create_inner_ttc_table(priv); + mlx5e_set_ttc_basic_params(priv, &ttc_params); + mlx5e_set_inner_ttc_ft_params(&ttc_params); + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) + ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn; + + err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc); if (err) { netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n", err); goto err_destroy_arfs_tables; } - err = mlx5e_create_ttc_table(priv); + mlx5e_set_ttc_ft_params(&ttc_params); + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) + ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn; + + err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc); if (err) { netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", err); @@ -274,7 +283,7 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) return 0; err_destroy_inner_ttc_table: - mlx5e_destroy_inner_ttc_table(priv); + mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc); err_destroy_arfs_tables: mlx5e_arfs_destroy_tables(priv); @@ -283,8 +292,8 @@ err_destroy_arfs_tables: static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv) { - mlx5e_destroy_ttc_table(priv); - mlx5e_destroy_inner_ttc_table(priv); + mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); + mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc); mlx5e_arfs_destroy_tables(priv); } @@ -398,7 +407,7 @@ int mlx5i_dev_init(struct net_device *dev) return 0; } -static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct mlx5e_priv *priv = mlx5i_epriv(dev); @@ -486,7 +495,7 @@ static int mlx5i_close(struct net_device *netdev) mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn); mlx5i_uninit_underlay_qp(epriv); mlx5e_deactivate_priv_channels(epriv); - mlx5e_close_channels(&epriv->channels);; + mlx5e_close_channels(&epriv->channels); unlock: mutex_unlock(&epriv->state_lock); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h index 49008022c306..6d9053bcbe95 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h @@ -76,9 +76,10 @@ int mlx5i_pkey_del_qpn(struct net_device *netdev, u32 qpn); /* Get the net-device corresponding to the given underlay QPN */ struct net_device *mlx5i_pkey_get_netdev(struct net_device *netdev, u32 qpn); -/* Shared ndo functionts */ +/* Shared ndo functions */ int mlx5i_dev_init(struct net_device *dev); void mlx5i_dev_cleanup(struct net_device *dev); +int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); /* Parent profile functions */ void mlx5i_init(struct mlx5_core_dev *mdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c index 531b02cc979b..b69e9d847a6b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c @@ -140,6 +140,7 @@ static int mlx5i_pkey_close(struct net_device *netdev); static int mlx5i_pkey_dev_init(struct net_device *dev); static void mlx5i_pkey_dev_cleanup(struct net_device *netdev); static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu); +static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); static const struct net_device_ops mlx5i_pkey_netdev_ops = { .ndo_open = mlx5i_pkey_open, @@ -147,6 +148,7 @@ static const struct net_device_ops mlx5i_pkey_netdev_ops = { .ndo_init = mlx5i_pkey_dev_init, .ndo_uninit = mlx5i_pkey_dev_cleanup, .ndo_change_mtu = mlx5i_pkey_change_mtu, + .ndo_do_ioctl = mlx5i_pkey_ioctl, }; /* Child NDOs */ @@ -174,6 +176,11 @@ static int mlx5i_pkey_dev_init(struct net_device *dev) return mlx5i_dev_init(dev); } +static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + return mlx5i_ioctl(dev, ifr, cmd); +} + static void mlx5i_pkey_dev_cleanup(struct net_device *netdev) { return mlx5i_dev_cleanup(netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index b05868728da7..394552f36fcf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -116,6 +116,7 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev); u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev); struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn); +u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq); void mlx5_cq_tasklet_cb(unsigned long data); int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c index 5e128d7a9ffd..9e38343a951f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c @@ -398,3 +398,217 @@ void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn) mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_destroy_rqt); + +static int mlx5_hairpin_create_rq(struct mlx5_core_dev *mdev, + struct mlx5_hairpin_params *params, u32 *rqn) +{ + u32 in[MLX5_ST_SZ_DW(create_rq_in)] = {0}; + void *rqc, *wq; + + rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); + wq = MLX5_ADDR_OF(rqc, rqc, wq); + + MLX5_SET(rqc, rqc, hairpin, 1); + MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); + MLX5_SET(rqc, rqc, counter_set_id, params->q_counter); + + MLX5_SET(wq, wq, log_hairpin_data_sz, params->log_data_size); + MLX5_SET(wq, wq, log_hairpin_num_packets, params->log_num_packets); + + return mlx5_core_create_rq(mdev, in, MLX5_ST_SZ_BYTES(create_rq_in), rqn); +} + +static int mlx5_hairpin_create_sq(struct mlx5_core_dev *mdev, + struct mlx5_hairpin_params *params, u32 *sqn) +{ + u32 in[MLX5_ST_SZ_DW(create_sq_in)] = {0}; + void *sqc, *wq; + + sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); + wq = MLX5_ADDR_OF(sqc, sqc, wq); + + MLX5_SET(sqc, sqc, hairpin, 1); + MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); + + MLX5_SET(wq, wq, log_hairpin_data_sz, params->log_data_size); + MLX5_SET(wq, wq, log_hairpin_num_packets, params->log_num_packets); + + return mlx5_core_create_sq(mdev, in, MLX5_ST_SZ_BYTES(create_sq_in), sqn); +} + +static int mlx5_hairpin_create_queues(struct mlx5_hairpin *hp, + struct mlx5_hairpin_params *params) +{ + int i, j, err; + + for (i = 0; i < hp->num_channels; i++) { + err = mlx5_hairpin_create_rq(hp->func_mdev, params, &hp->rqn[i]); + if (err) + goto out_err_rq; + } + + for (i = 0; i < hp->num_channels; i++) { + err = mlx5_hairpin_create_sq(hp->peer_mdev, params, &hp->sqn[i]); + if (err) + goto out_err_sq; + } + + return 0; + +out_err_sq: + for (j = 0; j < i; j++) + mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[j]); + i = hp->num_channels; +out_err_rq: + for (j = 0; j < i; j++) + mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[j]); + return err; +} + +static void mlx5_hairpin_destroy_queues(struct mlx5_hairpin *hp) +{ + int i; + + for (i = 0; i < hp->num_channels; i++) { + mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[i]); + mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]); + } +} + +static int mlx5_hairpin_modify_rq(struct mlx5_core_dev *func_mdev, u32 rqn, + int curr_state, int next_state, + u16 peer_vhca, u32 peer_sq) +{ + u32 in[MLX5_ST_SZ_DW(modify_rq_in)] = {0}; + void *rqc; + + rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); + + if (next_state == MLX5_RQC_STATE_RDY) { + MLX5_SET(rqc, rqc, hairpin_peer_sq, peer_sq); + MLX5_SET(rqc, rqc, hairpin_peer_vhca, peer_vhca); + } + + MLX5_SET(modify_rq_in, in, rq_state, curr_state); + MLX5_SET(rqc, rqc, state, next_state); + + return mlx5_core_modify_rq(func_mdev, rqn, + in, MLX5_ST_SZ_BYTES(modify_rq_in)); +} + +static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn, + int curr_state, int next_state, + u16 peer_vhca, u32 peer_rq) +{ + u32 in[MLX5_ST_SZ_DW(modify_sq_in)] = {0}; + void *sqc; + + sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); + + if (next_state == MLX5_RQC_STATE_RDY) { + MLX5_SET(sqc, sqc, hairpin_peer_rq, peer_rq); + MLX5_SET(sqc, sqc, hairpin_peer_vhca, peer_vhca); + } + + MLX5_SET(modify_sq_in, in, sq_state, curr_state); + MLX5_SET(sqc, sqc, state, next_state); + + return mlx5_core_modify_sq(peer_mdev, sqn, + in, MLX5_ST_SZ_BYTES(modify_sq_in)); +} + +static int mlx5_hairpin_pair_queues(struct mlx5_hairpin *hp) +{ + int i, j, err; + + /* set peer SQs */ + for (i = 0; i < hp->num_channels; i++) { + err = mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], + MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY, + MLX5_CAP_GEN(hp->func_mdev, vhca_id), hp->rqn[i]); + if (err) + goto err_modify_sq; + } + + /* set func RQs */ + for (i = 0; i < hp->num_channels; i++) { + err = mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[i], + MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY, + MLX5_CAP_GEN(hp->peer_mdev, vhca_id), hp->sqn[i]); + if (err) + goto err_modify_rq; + } + + return 0; + +err_modify_rq: + for (j = 0; j < i; j++) + mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[j], MLX5_RQC_STATE_RDY, + MLX5_RQC_STATE_RST, 0, 0); + i = hp->num_channels; +err_modify_sq: + for (j = 0; j < i; j++) + mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[j], MLX5_SQC_STATE_RDY, + MLX5_SQC_STATE_RST, 0, 0); + return err; +} + +static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp) +{ + int i; + + /* unset func RQs */ + for (i = 0; i < hp->num_channels; i++) + mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[i], MLX5_RQC_STATE_RDY, + MLX5_RQC_STATE_RST, 0, 0); + + /* unset peer SQs */ + for (i = 0; i < hp->num_channels; i++) + mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY, + MLX5_SQC_STATE_RST, 0, 0); +} + +struct mlx5_hairpin * +mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev, + struct mlx5_core_dev *peer_mdev, + struct mlx5_hairpin_params *params) +{ + struct mlx5_hairpin *hp; + int size, err; + + size = sizeof(*hp) + params->num_channels * 2 * sizeof(u32); + hp = kzalloc(size, GFP_KERNEL); + if (!hp) + return ERR_PTR(-ENOMEM); + + hp->func_mdev = func_mdev; + hp->peer_mdev = peer_mdev; + hp->num_channels = params->num_channels; + + hp->rqn = (void *)hp + sizeof(*hp); + hp->sqn = hp->rqn + params->num_channels; + + /* alloc and pair func --> peer hairpin */ + err = mlx5_hairpin_create_queues(hp, params); + if (err) + goto err_create_queues; + + err = mlx5_hairpin_pair_queues(hp); + if (err) + goto err_pair_queues; + + return hp; + +err_pair_queues: + mlx5_hairpin_destroy_queues(hp); +err_create_queues: + kfree(hp); + return ERR_PTR(err); +} + +void mlx5_core_hairpin_destroy(struct mlx5_hairpin *hp) +{ + mlx5_hairpin_unpair_queues(hp); + mlx5_hairpin_destroy_queues(hp); + kfree(hp); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index f3315bc874ad..3529b545675d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -113,6 +113,7 @@ struct mlxsw_core { struct mlxsw_thermal *thermal; struct mlxsw_core_port *ports; unsigned int max_ports; + bool reload_fail; unsigned long driver_priv[0]; /* driver_priv has to be always the last item */ }; @@ -962,7 +963,28 @@ mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port, pool_type, p_cur, p_max); } +static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + const struct mlxsw_bus *mlxsw_bus = mlxsw_core->bus; + int err; + + if (!mlxsw_bus->reset) + return -EOPNOTSUPP; + + mlxsw_core_bus_device_unregister(mlxsw_core, true); + mlxsw_bus->reset(mlxsw_core->bus_priv); + err = mlxsw_core_bus_device_register(mlxsw_core->bus_info, + mlxsw_core->bus, + mlxsw_core->bus_priv, true, + devlink); + if (err) + mlxsw_core->reload_fail = true; + return err; +} + static const struct devlink_ops mlxsw_devlink_ops = { + .reload = mlxsw_devlink_core_bus_device_reload, .port_type_set = mlxsw_devlink_port_type_set, .port_split = mlxsw_devlink_port_split, .port_unsplit = mlxsw_devlink_port_unsplit, @@ -980,23 +1002,26 @@ static const struct devlink_ops mlxsw_devlink_ops = { int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, const struct mlxsw_bus *mlxsw_bus, - void *bus_priv) + void *bus_priv, bool reload, + struct devlink *devlink) { const char *device_kind = mlxsw_bus_info->device_kind; struct mlxsw_core *mlxsw_core; struct mlxsw_driver *mlxsw_driver; - struct devlink *devlink; size_t alloc_size; int err; mlxsw_driver = mlxsw_core_driver_get(device_kind); if (!mlxsw_driver) return -EINVAL; - alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size; - devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size); - if (!devlink) { - err = -ENOMEM; - goto err_devlink_alloc; + + if (!reload) { + alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size; + devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size); + if (!devlink) { + err = -ENOMEM; + goto err_devlink_alloc; + } } mlxsw_core = devlink_priv(devlink); @@ -1012,6 +1037,12 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (err) goto err_bus_init; + if (mlxsw_driver->resources_register && !reload) { + err = mlxsw_driver->resources_register(mlxsw_core); + if (err) + goto err_register_resources; + } + err = mlxsw_ports_init(mlxsw_core); if (err) goto err_ports_init; @@ -1032,9 +1063,11 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (err) goto err_emad_init; - err = devlink_register(devlink, mlxsw_bus_info->dev); - if (err) - goto err_devlink_register; + if (!reload) { + err = devlink_register(devlink, mlxsw_bus_info->dev); + if (err) + goto err_devlink_register; + } err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon); if (err) @@ -1057,7 +1090,8 @@ err_driver_init: mlxsw_thermal_fini(mlxsw_core->thermal); err_thermal_init: err_hwmon_init: - devlink_unregister(devlink); + if (!reload) + devlink_unregister(devlink); err_devlink_register: mlxsw_emad_fini(mlxsw_core); err_emad_init: @@ -1067,26 +1101,40 @@ err_alloc_lag_mapping: err_ports_init: mlxsw_bus->fini(bus_priv); err_bus_init: - devlink_free(devlink); + if (!reload) + devlink_resources_unregister(devlink, NULL); +err_register_resources: + if (!reload) + devlink_free(devlink); err_devlink_alloc: mlxsw_core_driver_put(device_kind); return err; } EXPORT_SYMBOL(mlxsw_core_bus_device_register); -void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core) +void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, + bool reload) { const char *device_kind = mlxsw_core->bus_info->device_kind; struct devlink *devlink = priv_to_devlink(mlxsw_core); + if (mlxsw_core->reload_fail) + goto reload_fail; + if (mlxsw_core->driver->fini) mlxsw_core->driver->fini(mlxsw_core); mlxsw_thermal_fini(mlxsw_core->thermal); - devlink_unregister(devlink); + if (!reload) + devlink_unregister(devlink); mlxsw_emad_fini(mlxsw_core); kfree(mlxsw_core->lag.mapping); mlxsw_ports_fini(mlxsw_core); + if (!reload) + devlink_resources_unregister(devlink, NULL); mlxsw_core->bus->fini(mlxsw_core->bus_priv); + if (reload) + return; +reload_fail: devlink_free(devlink); mlxsw_core_driver_put(device_kind); } @@ -1791,6 +1839,22 @@ void mlxsw_core_flush_owq(void) } EXPORT_SYMBOL(mlxsw_core_flush_owq); +int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core, + const struct mlxsw_config_profile *profile, + u64 *p_single_size, u64 *p_double_size, + u64 *p_linear_size) +{ + struct mlxsw_driver *driver = mlxsw_core->driver; + + if (!driver->kvd_sizes_get) + return -EINVAL; + + return driver->kvd_sizes_get(mlxsw_core, profile, + p_single_size, p_double_size, + p_linear_size); +} +EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get); + static int __init mlxsw_core_module_init(void) { int err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 6e966af72fc4..5ddafd74dc00 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -66,8 +66,9 @@ void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver); int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, const struct mlxsw_bus *mlxsw_bus, - void *bus_priv); -void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core); + void *bus_priv, bool reload, + struct devlink *devlink); +void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, bool reload); struct mlxsw_tx_info { u8 local_port; @@ -308,10 +309,20 @@ struct mlxsw_driver { u32 *p_cur, u32 *p_max); void (*txhdr_construct)(struct sk_buff *skb, const struct mlxsw_tx_info *tx_info); + int (*resources_register)(struct mlxsw_core *mlxsw_core); + int (*kvd_sizes_get)(struct mlxsw_core *mlxsw_core, + const struct mlxsw_config_profile *profile, + u64 *p_single_size, u64 *p_double_size, + u64 *p_linear_size); u8 txhdr_len; const struct mlxsw_config_profile *profile; }; +int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core, + const struct mlxsw_config_profile *profile, + u64 *p_single_size, u64 *p_double_size, + u64 *p_linear_size); + bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core, enum mlxsw_res_id res_id); @@ -332,6 +343,7 @@ struct mlxsw_bus { const struct mlxsw_config_profile *profile, struct mlxsw_res *res); void (*fini)(void *bus_priv); + void (*reset)(void *bus_priv); bool (*skb_transmit_busy)(void *bus_priv, const struct mlxsw_tx_info *tx_info); int (*skb_transmit)(void *bus_priv, struct sk_buff *skb, diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index 6a979a09ab72..b698fb481b2e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -310,9 +310,33 @@ struct mlxsw_afa_block { struct mlxsw_afa_set *first_set; struct mlxsw_afa_set *cur_set; unsigned int cur_act_index; /* In current set. */ - struct list_head fwd_entry_ref_list; + struct list_head resource_list; /* List of resources held by actions + * in this block. + */ }; +struct mlxsw_afa_resource { + struct list_head list; + void (*destructor)(struct mlxsw_afa_block *block, + struct mlxsw_afa_resource *resource); +}; + +static void mlxsw_afa_resource_add(struct mlxsw_afa_block *block, + struct mlxsw_afa_resource *resource) +{ + list_add(&resource->list, &block->resource_list); +} + +static void mlxsw_afa_resources_destroy(struct mlxsw_afa_block *block) +{ + struct mlxsw_afa_resource *resource, *tmp; + + list_for_each_entry_safe(resource, tmp, &block->resource_list, list) { + list_del(&resource->list); + resource->destructor(block, resource); + } +} + struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa) { struct mlxsw_afa_block *block; @@ -320,7 +344,7 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa) block = kzalloc(sizeof(*block), GFP_KERNEL); if (!block) return NULL; - INIT_LIST_HEAD(&block->fwd_entry_ref_list); + INIT_LIST_HEAD(&block->resource_list); block->afa = mlxsw_afa; /* At least one action set is always present, so just create it here */ @@ -336,8 +360,6 @@ err_first_set_create: } EXPORT_SYMBOL(mlxsw_afa_block_create); -static void mlxsw_afa_fwd_entry_refs_destroy(struct mlxsw_afa_block *block); - void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block) { struct mlxsw_afa_set *set = block->first_set; @@ -348,7 +370,7 @@ void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block) mlxsw_afa_set_put(block->afa, set); set = next_set; } while (set); - mlxsw_afa_fwd_entry_refs_destroy(block); + mlxsw_afa_resources_destroy(block); kfree(block); } EXPORT_SYMBOL(mlxsw_afa_block_destroy); @@ -489,10 +511,29 @@ static void mlxsw_afa_fwd_entry_put(struct mlxsw_afa *mlxsw_afa, } struct mlxsw_afa_fwd_entry_ref { - struct list_head list; + struct mlxsw_afa_resource resource; struct mlxsw_afa_fwd_entry *fwd_entry; }; +static void +mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block, + struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref) +{ + mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry); + kfree(fwd_entry_ref); +} + +static void +mlxsw_afa_fwd_entry_ref_destructor(struct mlxsw_afa_block *block, + struct mlxsw_afa_resource *resource) +{ + struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref; + + fwd_entry_ref = container_of(resource, struct mlxsw_afa_fwd_entry_ref, + resource); + mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref); +} + static struct mlxsw_afa_fwd_entry_ref * mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u8 local_port) { @@ -509,7 +550,8 @@ mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u8 local_port) goto err_fwd_entry_get; } fwd_entry_ref->fwd_entry = fwd_entry; - list_add(&fwd_entry_ref->list, &block->fwd_entry_ref_list); + fwd_entry_ref->resource.destructor = mlxsw_afa_fwd_entry_ref_destructor; + mlxsw_afa_resource_add(block, &fwd_entry_ref->resource); return fwd_entry_ref; err_fwd_entry_get: @@ -517,23 +559,51 @@ err_fwd_entry_get: return ERR_PTR(err); } +struct mlxsw_afa_counter { + struct mlxsw_afa_resource resource; + u32 counter_index; +}; + static void -mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block, - struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref) +mlxsw_afa_counter_destroy(struct mlxsw_afa_block *block, + struct mlxsw_afa_counter *counter) { - list_del(&fwd_entry_ref->list); - mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry); - kfree(fwd_entry_ref); + block->afa->ops->counter_index_put(block->afa->ops_priv, + counter->counter_index); + kfree(counter); +} + +static void +mlxsw_afa_counter_destructor(struct mlxsw_afa_block *block, + struct mlxsw_afa_resource *resource) +{ + struct mlxsw_afa_counter *counter; + + counter = container_of(resource, struct mlxsw_afa_counter, resource); + mlxsw_afa_counter_destroy(block, counter); } -static void mlxsw_afa_fwd_entry_refs_destroy(struct mlxsw_afa_block *block) +static struct mlxsw_afa_counter * +mlxsw_afa_counter_create(struct mlxsw_afa_block *block) { - struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref; - struct mlxsw_afa_fwd_entry_ref *tmp; + struct mlxsw_afa_counter *counter; + int err; + + counter = kzalloc(sizeof(*counter), GFP_KERNEL); + if (!counter) + return ERR_PTR(-ENOMEM); + + err = block->afa->ops->counter_index_get(block->afa->ops_priv, + &counter->counter_index); + if (err) + goto err_counter_index_get; + counter->resource.destructor = mlxsw_afa_counter_destructor; + mlxsw_afa_resource_add(block, &counter->resource); + return counter; - list_for_each_entry_safe(fwd_entry_ref, tmp, - &block->fwd_entry_ref_list, list) - mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref); +err_counter_index_get: + kfree(counter); + return ERR_PTR(err); } #define MLXSW_AFA_ONE_ACTION_LEN 32 @@ -690,6 +760,16 @@ MLXSW_ITEM32(afa, trapdisc, forward_action, 0x00, 0, 4); */ MLXSW_ITEM32(afa, trapdisc, trap_id, 0x04, 0, 9); +/* afa_trapdisc_mirror_agent + * Mirror agent. + */ +MLXSW_ITEM32(afa, trapdisc, mirror_agent, 0x08, 29, 3); + +/* afa_trapdisc_mirror_enable + * Mirror enable. + */ +MLXSW_ITEM32(afa, trapdisc, mirror_enable, 0x08, 24, 1); + static inline void mlxsw_afa_trapdisc_pack(char *payload, enum mlxsw_afa_trapdisc_trap_action trap_action, @@ -701,6 +781,14 @@ mlxsw_afa_trapdisc_pack(char *payload, mlxsw_afa_trapdisc_trap_id_set(payload, trap_id); } +static inline void +mlxsw_afa_trapdisc_mirror_pack(char *payload, bool mirror_enable, + u8 mirror_agent) +{ + mlxsw_afa_trapdisc_mirror_enable_set(payload, mirror_enable); + mlxsw_afa_trapdisc_mirror_agent_set(payload, mirror_agent); +} + int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block) { char *act = mlxsw_afa_block_append_action(block, @@ -746,6 +834,104 @@ int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block, } EXPORT_SYMBOL(mlxsw_afa_block_append_trap_and_forward); +struct mlxsw_afa_mirror { + struct mlxsw_afa_resource resource; + int span_id; + u8 local_in_port; + u8 local_out_port; + bool ingress; +}; + +static void +mlxsw_afa_mirror_destroy(struct mlxsw_afa_block *block, + struct mlxsw_afa_mirror *mirror) +{ + block->afa->ops->mirror_del(block->afa->ops_priv, + mirror->local_in_port, + mirror->local_out_port, + mirror->ingress); + kfree(mirror); +} + +static void +mlxsw_afa_mirror_destructor(struct mlxsw_afa_block *block, + struct mlxsw_afa_resource *resource) +{ + struct mlxsw_afa_mirror *mirror; + + mirror = container_of(resource, struct mlxsw_afa_mirror, resource); + mlxsw_afa_mirror_destroy(block, mirror); +} + +static struct mlxsw_afa_mirror * +mlxsw_afa_mirror_create(struct mlxsw_afa_block *block, + u8 local_in_port, u8 local_out_port, + bool ingress) +{ + struct mlxsw_afa_mirror *mirror; + int err; + + mirror = kzalloc(sizeof(*mirror), GFP_KERNEL); + if (!mirror) + return ERR_PTR(-ENOMEM); + + err = block->afa->ops->mirror_add(block->afa->ops_priv, + local_in_port, local_out_port, + ingress, &mirror->span_id); + if (err) + goto err_mirror_add; + + mirror->ingress = ingress; + mirror->local_out_port = local_out_port; + mirror->local_in_port = local_in_port; + mirror->resource.destructor = mlxsw_afa_mirror_destructor; + mlxsw_afa_resource_add(block, &mirror->resource); + return mirror; + +err_mirror_add: + kfree(mirror); + return ERR_PTR(err); +} + +static int +mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block, + u8 mirror_agent) +{ + char *act = mlxsw_afa_block_append_action(block, + MLXSW_AFA_TRAPDISC_CODE, + MLXSW_AFA_TRAPDISC_SIZE); + if (!act) + return -ENOBUFS; + mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP, + MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD, 0); + mlxsw_afa_trapdisc_mirror_pack(act, true, mirror_agent); + return 0; +} + +int +mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, + u8 local_in_port, u8 local_out_port, bool ingress) +{ + struct mlxsw_afa_mirror *mirror; + int err; + + mirror = mlxsw_afa_mirror_create(block, local_in_port, local_out_port, + ingress); + if (IS_ERR(mirror)) + return PTR_ERR(mirror); + + err = mlxsw_afa_block_append_allocated_mirror(block, mirror->span_id); + if (err) + goto err_append_allocated_mirror; + + return 0; + +err_append_allocated_mirror: + mlxsw_afa_mirror_destroy(block, mirror); + return err; +} +EXPORT_SYMBOL(mlxsw_afa_block_append_mirror); + /* Forwarding Action * ----------------- * Forwarding Action can be used to implement Policy Based Switching (PBS) @@ -853,11 +1039,10 @@ mlxsw_afa_polcnt_pack(char *payload, mlxsw_afa_polcnt_counter_index_set(payload, counter_index); } -int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block, - u32 counter_index) +int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block, + u32 counter_index) { - char *act = mlxsw_afa_block_append_action(block, - MLXSW_AFA_POLCNT_CODE, + char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_POLCNT_CODE, MLXSW_AFA_POLCNT_SIZE); if (!act) return -ENOBUFS; @@ -865,6 +1050,32 @@ int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block, counter_index); return 0; } +EXPORT_SYMBOL(mlxsw_afa_block_append_allocated_counter); + +int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block, + u32 *p_counter_index) +{ + struct mlxsw_afa_counter *counter; + u32 counter_index; + int err; + + counter = mlxsw_afa_counter_create(block); + if (IS_ERR(counter)) + return PTR_ERR(counter); + counter_index = counter->counter_index; + + err = mlxsw_afa_block_append_allocated_counter(block, counter_index); + if (err) + goto err_append_allocated_counter; + + if (p_counter_index) + *p_counter_index = counter_index; + return 0; + +err_append_allocated_counter: + mlxsw_afa_counter_destroy(block, counter); + return err; +} EXPORT_SYMBOL(mlxsw_afa_block_append_counter); /* Virtual Router and Forwarding Domain Action diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h index a8d3314c3a24..43132293475c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h @@ -46,6 +46,12 @@ struct mlxsw_afa_ops { void (*kvdl_set_del)(void *priv, u32 kvdl_index, bool is_first); int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u8 local_port); void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index); + int (*counter_index_get)(void *priv, unsigned int *p_counter_index); + void (*counter_index_put)(void *priv, unsigned int counter_index); + int (*mirror_add)(void *priv, u8 locol_in_port, u8 local_out_port, + bool ingress, int *p_span_id); + void (*mirror_del)(void *priv, u8 locol_in_port, u8 local_out_port, + bool ingress); }; struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set, @@ -63,12 +69,17 @@ int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block); int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id); int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block, u16 trap_id); +int mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, + u8 local_in_port, u8 local_out_port, + bool ingress); int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, u8 local_port, bool in_port); int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block, u16 vid, u8 pcp, u8 et); +int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block, + u32 counter_index); int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block, - u32 counter_index); + u32 *p_counter_index); int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid); int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block, u16 expected_irif, u16 min_mtu, diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c index c0dcfa05b077..25f9915ebd82 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c +++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c @@ -539,7 +539,8 @@ static int mlxsw_i2c_probe(struct i2c_client *client, mlxsw_i2c->dev = &client->dev; err = mlxsw_core_bus_device_register(&mlxsw_i2c->bus_info, - &mlxsw_i2c_bus, mlxsw_i2c); + &mlxsw_i2c_bus, mlxsw_i2c, false, + NULL); if (err) { dev_err(&client->dev, "Fail to register core bus\n"); return err; @@ -557,7 +558,7 @@ static int mlxsw_i2c_remove(struct i2c_client *client) { struct mlxsw_i2c *mlxsw_i2c = i2c_get_clientdata(client); - mlxsw_core_bus_device_unregister(mlxsw_i2c->core); + mlxsw_core_bus_device_unregister(mlxsw_i2c->core, false); mutex_destroy(&mlxsw_i2c->cmd.lock); return 0; diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h index 28427f0758c7..31c886edc791 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/item.h +++ b/drivers/net/ethernet/mellanox/mlxsw/item.h @@ -42,7 +42,7 @@ struct mlxsw_item { unsigned short offset; /* bytes in container */ - unsigned short step; /* step in bytes for indexed items */ + short step; /* step in bytes for indexed items */ unsigned short in_step_offset; /* offset within one step */ unsigned char shift; /* shift in bits */ unsigned char element_size; /* size of element in bit array */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 6ef20e5cc77d..85faa87bf42d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -154,6 +154,7 @@ struct mlxsw_pci { } comp; } cmd; struct mlxsw_bus_info bus_info; + const struct pci_device_id *id; }; static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q) @@ -1052,38 +1053,18 @@ static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox, } static int -mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_config_profile *profile, +mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_pci *mlxsw_pci, + const struct mlxsw_config_profile *profile, struct mlxsw_res *res) { - u32 single_size, double_size, linear_size; - - if (!MLXSW_RES_VALID(res, KVD_SINGLE_MIN_SIZE) || - !MLXSW_RES_VALID(res, KVD_DOUBLE_MIN_SIZE) || - !profile->used_kvd_split_data) - return -EIO; - - linear_size = profile->kvd_linear_size; + u64 single_size, double_size, linear_size; + int err; - /* The hash part is what left of the kvd without the - * linear part. It is split to the single size and - * double size by the parts ratio from the profile. - * Both sizes must be a multiplications of the - * granularity from the profile. - */ - double_size = MLXSW_RES_GET(res, KVD_SIZE) - linear_size; - double_size *= profile->kvd_hash_double_parts; - double_size /= profile->kvd_hash_double_parts + - profile->kvd_hash_single_parts; - double_size /= profile->kvd_hash_granularity; - double_size *= profile->kvd_hash_granularity; - single_size = MLXSW_RES_GET(res, KVD_SIZE) - double_size - - linear_size; - - /* Check results are legal. */ - if (single_size < MLXSW_RES_GET(res, KVD_SINGLE_MIN_SIZE) || - double_size < MLXSW_RES_GET(res, KVD_DOUBLE_MIN_SIZE) || - MLXSW_RES_GET(res, KVD_SIZE) < linear_size) - return -EIO; + err = mlxsw_core_kvd_sizes_get(mlxsw_pci->core, profile, + &single_size, &double_size, + &linear_size); + if (err) + return err; MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size); MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size); @@ -1184,7 +1165,7 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox, mbox, profile->adaptive_routing_group_cap); } if (MLXSW_RES_VALID(res, KVD_SIZE)) { - err = mlxsw_pci_profile_get_kvd_sizes(profile, res); + err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res); if (err) return err; @@ -1622,16 +1603,6 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod, return err; } -static const struct mlxsw_bus mlxsw_pci_bus = { - .kind = "pci", - .init = mlxsw_pci_init, - .fini = mlxsw_pci_fini, - .skb_transmit_busy = mlxsw_pci_skb_transmit_busy, - .skb_transmit = mlxsw_pci_skb_transmit, - .cmd_exec = mlxsw_pci_cmd_exec, - .features = MLXSW_BUS_F_TXRX, -}; - static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id) { @@ -1660,6 +1631,41 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci, return 0; } +static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci) +{ + pci_free_irq_vectors(mlxsw_pci->pdev); +} + +static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci) +{ + int err; + + err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX); + if (err < 0) + dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n"); + return err; +} + +static void mlxsw_pci_reset(void *bus_priv) +{ + struct mlxsw_pci *mlxsw_pci = bus_priv; + + mlxsw_pci_free_irq_vectors(mlxsw_pci); + mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id); + mlxsw_pci_alloc_irq_vectors(mlxsw_pci); +} + +static const struct mlxsw_bus mlxsw_pci_bus = { + .kind = "pci", + .init = mlxsw_pci_init, + .fini = mlxsw_pci_fini, + .skb_transmit_busy = mlxsw_pci_skb_transmit_busy, + .skb_transmit = mlxsw_pci_skb_transmit, + .cmd_exec = mlxsw_pci_cmd_exec, + .features = MLXSW_BUS_F_TXRX, + .reset = mlxsw_pci_reset, +}; + static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { const char *driver_name = pdev->driver->name; @@ -1721,7 +1727,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_sw_reset; } - err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX); + err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci); if (err < 0) { dev_err(&pdev->dev, "MSI-X init failed\n"); goto err_msix_init; @@ -1730,9 +1736,11 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) mlxsw_pci->bus_info.device_kind = driver_name; mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev); mlxsw_pci->bus_info.dev = &pdev->dev; + mlxsw_pci->id = id; err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info, - &mlxsw_pci_bus, mlxsw_pci); + &mlxsw_pci_bus, mlxsw_pci, false, + NULL); if (err) { dev_err(&pdev->dev, "cannot register bus device\n"); goto err_bus_device_register; @@ -1741,7 +1749,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; err_bus_device_register: - pci_free_irq_vectors(mlxsw_pci->pdev); + mlxsw_pci_free_irq_vectors(mlxsw_pci); err_msix_init: err_sw_reset: iounmap(mlxsw_pci->hw_addr); @@ -1760,8 +1768,8 @@ static void mlxsw_pci_remove(struct pci_dev *pdev) { struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev); - mlxsw_core_bus_device_unregister(mlxsw_pci->core); - pci_free_irq_vectors(mlxsw_pci->pdev); + mlxsw_core_bus_device_unregister(mlxsw_pci->core, false); + mlxsw_pci_free_irq_vectors(mlxsw_pci); iounmap(mlxsw_pci->hw_addr); pci_release_regions(mlxsw_pci->pdev); pci_disable_device(mlxsw_pci->pdev); diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 6c4e08b8058a..0e08be41c8e0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -4827,6 +4827,42 @@ static inline void mlxsw_reg_ratr_counter_pack(char *payload, u64 counter_index, mlxsw_reg_ratr_counter_set_type_set(payload, set_type); } +/* RDPM - Router DSCP to Priority Mapping + * -------------------------------------- + * Controls the mapping from DSCP field to switch priority on routed packets + */ +#define MLXSW_REG_RDPM_ID 0x8009 +#define MLXSW_REG_RDPM_BASE_LEN 0x00 +#define MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN 0x01 +#define MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT 64 +#define MLXSW_REG_RDPM_LEN 0x40 +#define MLXSW_REG_RDPM_LAST_ENTRY (MLXSW_REG_RDPM_BASE_LEN + \ + MLXSW_REG_RDPM_LEN - \ + MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN) + +MLXSW_REG_DEFINE(rdpm, MLXSW_REG_RDPM_ID, MLXSW_REG_RDPM_LEN); + +/* reg_dscp_entry_e + * Enable update of the specific entry + * Access: Index + */ +MLXSW_ITEM8_INDEXED(reg, rdpm, dscp_entry_e, MLXSW_REG_RDPM_LAST_ENTRY, 7, 1, + -MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN, 0x00, false); + +/* reg_dscp_entry_prio + * Switch Priority + * Access: RW + */ +MLXSW_ITEM8_INDEXED(reg, rdpm, dscp_entry_prio, MLXSW_REG_RDPM_LAST_ENTRY, 0, 4, + -MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN, 0x00, false); + +static inline void mlxsw_reg_rdpm_pack(char *payload, unsigned short index, + u8 prio) +{ + mlxsw_reg_rdpm_dscp_entry_e_set(payload, index, 1); + mlxsw_reg_rdpm_dscp_entry_prio_set(payload, index, prio); +} + /* RICNT - Router Interface Counter Register * ----------------------------------------- * The RICNT register retrieves per port performance counters @@ -7640,6 +7676,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(rtar), MLXSW_REG(ratr), MLXSW_REG(rtdp), + MLXSW_REG(rdpm), MLXSW_REG(ricnt), MLXSW_REG(rrcr), MLXSW_REG(ralta), diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index c3837ca7a705..3dcc58d61506 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -76,12 +76,7 @@ #define MLXSW_FWREV_MAJOR 13 #define MLXSW_FWREV_MINOR 1530 #define MLXSW_FWREV_SUBMINOR 152 - -static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = { - .major = MLXSW_FWREV_MAJOR, - .minor = MLXSW_FWREV_MINOR, - .subminor = MLXSW_FWREV_SUBMINOR -}; +#define MLXSW_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) #define MLXSW_SP_FW_FILENAME \ "mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \ @@ -339,28 +334,25 @@ static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware); } -static bool mlxsw_sp_fw_rev_ge(const struct mlxsw_fw_rev *a, - const struct mlxsw_fw_rev *b) -{ - if (a->major != b->major) - return a->major > b->major; - if (a->minor != b->minor) - return a->minor > b->minor; - return a->subminor >= b->subminor; -} - static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) { const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; const struct firmware *firmware; int err; - if (mlxsw_sp_fw_rev_ge(rev, &mlxsw_sp_supported_fw_rev)) + /* Validate driver & FW are compatible */ + if (rev->major != MLXSW_FWREV_MAJOR) { + WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", + rev->major, MLXSW_FWREV_MAJOR); + return -EINVAL; + } + if (MLXSW_FWREV_MINOR_TO_BRANCH(rev->minor) == + MLXSW_FWREV_MINOR_TO_BRANCH(MLXSW_FWREV_MINOR)) return 0; - dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d out of data\n", + dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n", rev->major, rev->minor, rev->subminor); - dev_info(mlxsw_sp->bus_info->dev, "Upgrading firmware using file %s\n", + dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", MLXSW_SP_FW_FILENAME); err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME, @@ -576,7 +568,7 @@ static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, span_entry->used = false; } -static struct mlxsw_sp_span_entry * +struct mlxsw_sp_span_entry * mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port) { int i; @@ -677,13 +669,28 @@ mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port, static int mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port, struct mlxsw_sp_span_entry *span_entry, - enum mlxsw_sp_span_type type) + enum mlxsw_sp_span_type type, + bool bind) { - struct mlxsw_sp_span_inspected_port *inspected_port; struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; char mpar_pl[MLXSW_REG_MPAR_LEN]; - char sbib_pl[MLXSW_REG_SBIB_LEN]; int pa_id = span_entry->id; + + /* bind the port to the SPAN entry */ + mlxsw_reg_mpar_pack(mpar_pl, port->local_port, + (enum mlxsw_reg_mpar_i_e) type, bind, pa_id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); +} + +static int +mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port, + struct mlxsw_sp_span_entry *span_entry, + enum mlxsw_sp_span_type type, + bool bind) +{ + struct mlxsw_sp_span_inspected_port *inspected_port; + struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; + char sbib_pl[MLXSW_REG_SBIB_LEN]; int err; /* if it is an egress SPAN, bind a shared buffer to it */ @@ -699,12 +706,12 @@ mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port, } } - /* bind the port to the SPAN entry */ - mlxsw_reg_mpar_pack(mpar_pl, port->local_port, - (enum mlxsw_reg_mpar_i_e) type, true, pa_id); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); - if (err) - goto err_mpar_reg_write; + if (bind) { + err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type, + true); + if (err) + goto err_port_bind; + } inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL); if (!inspected_port) { @@ -717,8 +724,11 @@ mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port, return 0; -err_mpar_reg_write: err_inspected_port_alloc: + if (bind) + mlxsw_sp_span_inspected_port_bind(port, span_entry, type, + false); +err_port_bind: if (type == MLXSW_SP_SPAN_EGRESS) { mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); @@ -727,25 +737,22 @@ err_inspected_port_alloc: } static void -mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port, - struct mlxsw_sp_span_entry *span_entry, - enum mlxsw_sp_span_type type) +mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port, + struct mlxsw_sp_span_entry *span_entry, + enum mlxsw_sp_span_type type, + bool bind) { struct mlxsw_sp_span_inspected_port *inspected_port; struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; - char mpar_pl[MLXSW_REG_MPAR_LEN]; char sbib_pl[MLXSW_REG_SBIB_LEN]; - int pa_id = span_entry->id; inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry); if (!inspected_port) return; - /* remove the inspected port */ - mlxsw_reg_mpar_pack(mpar_pl, port->local_port, - (enum mlxsw_reg_mpar_i_e) type, false, pa_id); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); - + if (bind) + mlxsw_sp_span_inspected_port_bind(port, span_entry, type, + false); /* remove the SBIB buffer if it was egress SPAN */ if (type == MLXSW_SP_SPAN_EGRESS) { mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); @@ -758,9 +765,9 @@ mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port, kfree(inspected_port); } -static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, - struct mlxsw_sp_port *to, - enum mlxsw_sp_span_type type) +int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, + struct mlxsw_sp_port *to, + enum mlxsw_sp_span_type type, bool bind) { struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp; struct mlxsw_sp_span_entry *span_entry; @@ -773,7 +780,7 @@ static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n", span_entry->id); - err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type); + err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind); if (err) goto err_port_bind; @@ -784,9 +791,8 @@ err_port_bind: return err; } -static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from, - u8 destination_port, - enum mlxsw_sp_span_type type) +void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, u8 destination_port, + enum mlxsw_sp_span_type type, bool bind) { struct mlxsw_sp_span_entry *span_entry; @@ -799,7 +805,7 @@ static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from, netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n", span_entry->id); - mlxsw_sp_span_inspected_port_unbind(from, span_entry, type); + mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind); } static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, @@ -1571,14 +1577,11 @@ mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, const struct tc_action *a, bool ingress) { - struct net *net = dev_net(mlxsw_sp_port->dev); enum mlxsw_sp_span_type span_type; struct mlxsw_sp_port *to_port; struct net_device *to_dev; - int ifindex; - ifindex = tcf_mirred_ifindex(a); - to_dev = __dev_get_by_index(net, ifindex); + to_dev = tcf_mirred_dev(a); if (!to_dev) { netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); return -EINVAL; @@ -1593,7 +1596,8 @@ mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, mirror->to_local_port = to_port->local_port; mirror->ingress = ingress; span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; - return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type); + return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type, + true); } static void @@ -1604,8 +1608,8 @@ mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, span_type = mirror->ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; - mlxsw_sp_span_mirror_remove(mlxsw_sp_port, mirror->to_local_port, - span_type); + mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->to_local_port, + span_type, true); } static int @@ -1734,9 +1738,6 @@ static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, struct tc_cls_matchall_offload *f, bool ingress) { - if (f->common.chain_index) - return -EOPNOTSUPP; - switch (f->command) { case TC_CLSMATCHALL_REPLACE: return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, @@ -1750,72 +1751,187 @@ static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, } static int -mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port, - struct tc_cls_flower_offload *f, - bool ingress) +mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block, + struct tc_cls_flower_offload *f) { + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block); + switch (f->command) { case TC_CLSFLOWER_REPLACE: - return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, f); + return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f); case TC_CLSFLOWER_DESTROY: - mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, f); + mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f); return 0; case TC_CLSFLOWER_STATS: - return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, f); + return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f); default: return -EOPNOTSUPP; } } -static int mlxsw_sp_setup_tc_block_cb(enum tc_setup_type type, void *type_data, - void *cb_priv, bool ingress) +static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type, + void *type_data, + void *cb_priv, bool ingress) { struct mlxsw_sp_port *mlxsw_sp_port = cb_priv; - if (!tc_can_offload(mlxsw_sp_port->dev)) - return -EOPNOTSUPP; - switch (type) { case TC_SETUP_CLSMATCHALL: + if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev, + type_data)) + return -EOPNOTSUPP; + return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data, ingress); case TC_SETUP_CLSFLOWER: - return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data, - ingress); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type, + void *type_data, + void *cb_priv) +{ + return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, + cb_priv, true); +} + +static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type, + void *type_data, + void *cb_priv) +{ + return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data, + cb_priv, false); +} + +static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + struct mlxsw_sp_acl_block *acl_block = cb_priv; + + switch (type) { + case TC_SETUP_CLSMATCHALL: + return 0; + case TC_SETUP_CLSFLOWER: + if (mlxsw_sp_acl_block_disabled(acl_block)) + return -EOPNOTSUPP; + + return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data); default: return -EOPNOTSUPP; } } -static int mlxsw_sp_setup_tc_block_cb_ig(enum tc_setup_type type, - void *type_data, void *cb_priv) +static int +mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, + struct tcf_block *block, bool ingress) { - return mlxsw_sp_setup_tc_block_cb(type, type_data, cb_priv, true); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_acl_block *acl_block; + struct tcf_block_cb *block_cb; + int err; + + block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower, + mlxsw_sp); + if (!block_cb) { + acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, block->net); + if (!acl_block) + return -ENOMEM; + block_cb = __tcf_block_cb_register(block, + mlxsw_sp_setup_tc_block_cb_flower, + mlxsw_sp, acl_block); + if (IS_ERR(block_cb)) { + err = PTR_ERR(block_cb); + goto err_cb_register; + } + } else { + acl_block = tcf_block_cb_priv(block_cb); + } + tcf_block_cb_incref(block_cb); + err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block, + mlxsw_sp_port, ingress); + if (err) + goto err_block_bind; + + if (ingress) + mlxsw_sp_port->ing_acl_block = acl_block; + else + mlxsw_sp_port->eg_acl_block = acl_block; + + return 0; + +err_block_bind: + if (!tcf_block_cb_decref(block_cb)) { + __tcf_block_cb_unregister(block_cb); +err_cb_register: + mlxsw_sp_acl_block_destroy(acl_block); + } + return err; } -static int mlxsw_sp_setup_tc_block_cb_eg(enum tc_setup_type type, - void *type_data, void *cb_priv) +static void +mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, + struct tcf_block *block, bool ingress) { - return mlxsw_sp_setup_tc_block_cb(type, type_data, cb_priv, false); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_acl_block *acl_block; + struct tcf_block_cb *block_cb; + int err; + + block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower, + mlxsw_sp); + if (!block_cb) + return; + + if (ingress) + mlxsw_sp_port->ing_acl_block = NULL; + else + mlxsw_sp_port->eg_acl_block = NULL; + + acl_block = tcf_block_cb_priv(block_cb); + err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block, + mlxsw_sp_port, ingress); + if (!err && !tcf_block_cb_decref(block_cb)) { + __tcf_block_cb_unregister(block_cb); + mlxsw_sp_acl_block_destroy(acl_block); + } } static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, struct tc_block_offload *f) { tc_setup_cb_t *cb; + bool ingress; + int err; - if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) - cb = mlxsw_sp_setup_tc_block_cb_ig; - else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) - cb = mlxsw_sp_setup_tc_block_cb_eg; - else + if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { + cb = mlxsw_sp_setup_tc_block_cb_matchall_ig; + ingress = true; + } else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { + cb = mlxsw_sp_setup_tc_block_cb_matchall_eg; + ingress = false; + } else { return -EOPNOTSUPP; + } switch (f->command) { case TC_BLOCK_BIND: - return tcf_block_cb_register(f->block, cb, mlxsw_sp_port, - mlxsw_sp_port); + err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port, + mlxsw_sp_port); + if (err) + return err; + err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, + f->block, ingress); + if (err) { + tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); + return err; + } + return 0; case TC_BLOCK_UNBIND: + mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port, + f->block, ingress); tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); return 0; default: @@ -1833,11 +1949,69 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); case TC_SETUP_QDISC_RED: return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); + case TC_SETUP_QDISC_PRIO: + return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); default: return -EOPNOTSUPP; } } + +static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + + if (!enable) { + if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) || + mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) || + !list_empty(&mlxsw_sp_port->mall_tc_list)) { + netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); + return -EINVAL; + } + mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block); + mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block); + } else { + mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block); + mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block); + } + return 0; +} + +typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); + +static int mlxsw_sp_handle_feature(struct net_device *dev, + netdev_features_t wanted_features, + netdev_features_t feature, + mlxsw_sp_feature_handler feature_handler) +{ + netdev_features_t changes = wanted_features ^ dev->features; + bool enable = !!(wanted_features & feature); + int err; + + if (!(changes & feature)) + return 0; + + err = feature_handler(dev, enable); + if (err) { + netdev_err(dev, "%s feature %pNF failed, err %d\n", + enable ? "Enable" : "Disable", &feature, err); + return err; + } + + if (enable) + dev->features |= feature; + else + dev->features &= ~feature; + + return 0; +} +static int mlxsw_sp_set_features(struct net_device *dev, + netdev_features_t features) +{ + return mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, + mlxsw_sp_feature_hw_tc); +} + static const struct net_device_ops mlxsw_sp_port_netdev_ops = { .ndo_open = mlxsw_sp_port_open, .ndo_stop = mlxsw_sp_port_stop, @@ -1852,6 +2026,7 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = { .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name, + .ndo_set_features = mlxsw_sp_set_features, }; static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, @@ -3039,6 +3214,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, goto err_port_fids_init; } + err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", + mlxsw_sp_port->local_port); + goto err_port_qdiscs_init; + } + mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); if (IS_ERR(mlxsw_sp_port_vlan)) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", @@ -3067,6 +3249,8 @@ err_register_netdev: mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); err_port_vlan_get: + mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); +err_port_qdiscs_init: mlxsw_sp_port_fids_fini(mlxsw_sp_port); err_port_fids_init: mlxsw_sp_port_dcb_fini(mlxsw_sp_port); @@ -3102,6 +3286,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) mlxsw_sp->ports[local_port] = NULL; mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); mlxsw_sp_port_vlan_flush(mlxsw_sp_port); + mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); mlxsw_sp_port_fids_fini(mlxsw_sp_port); mlxsw_sp_port_dcb_fini(mlxsw_sp_port); mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); @@ -3933,6 +4118,253 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = { .resource_query_enable = 1, }; +static bool +mlxsw_sp_resource_kvd_granularity_validate(struct netlink_ext_ack *extack, + u64 size) +{ + const struct mlxsw_config_profile *profile; + + profile = &mlxsw_sp_config_profile; + if (size % profile->kvd_hash_granularity) { + NL_SET_ERR_MSG_MOD(extack, "resource set with wrong granularity"); + return false; + } + return true; +} + +static int +mlxsw_sp_resource_kvd_size_validate(struct devlink *devlink, u64 size, + struct netlink_ext_ack *extack) +{ + NL_SET_ERR_MSG_MOD(extack, "kvd size cannot be changed"); + return -EINVAL; +} + +static int +mlxsw_sp_resource_kvd_linear_size_validate(struct devlink *devlink, u64 size, + struct netlink_ext_ack *extack) +{ + if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size)) + return -EINVAL; + + return 0; +} + +static int +mlxsw_sp_resource_kvd_hash_single_size_validate(struct devlink *devlink, u64 size, + struct netlink_ext_ack *extack) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + + if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size)) + return -EINVAL; + + if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE)) { + NL_SET_ERR_MSG_MOD(extack, "hash single size is smaller than minimum"); + return -EINVAL; + } + return 0; +} + +static int +mlxsw_sp_resource_kvd_hash_double_size_validate(struct devlink *devlink, u64 size, + struct netlink_ext_ack *extack) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + + if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size)) + return -EINVAL; + + if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) { + NL_SET_ERR_MSG_MOD(extack, "hash double size is smaller than minimum"); + return -EINVAL; + } + return 0; +} + +static u64 mlxsw_sp_resource_kvd_linear_occ_get(struct devlink *devlink) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + + return mlxsw_sp_kvdl_occ_get(mlxsw_sp); +} + +static struct devlink_resource_ops mlxsw_sp_resource_kvd_ops = { + .size_validate = mlxsw_sp_resource_kvd_size_validate, +}; + +static struct devlink_resource_ops mlxsw_sp_resource_kvd_linear_ops = { + .size_validate = mlxsw_sp_resource_kvd_linear_size_validate, + .occ_get = mlxsw_sp_resource_kvd_linear_occ_get, +}; + +static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_single_ops = { + .size_validate = mlxsw_sp_resource_kvd_hash_single_size_validate, +}; + +static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_double_ops = { + .size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate, +}; + +static struct devlink_resource_size_params mlxsw_sp_kvd_size_params; +static struct devlink_resource_size_params mlxsw_sp_linear_size_params; +static struct devlink_resource_size_params mlxsw_sp_hash_single_size_params; +static struct devlink_resource_size_params mlxsw_sp_hash_double_size_params; + +static void +mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core) +{ + u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, + KVD_SINGLE_MIN_SIZE); + u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, + KVD_DOUBLE_MIN_SIZE); + u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); + u32 linear_size_min = 0; + + /* KVD top resource */ + mlxsw_sp_kvd_size_params.size_min = kvd_size; + mlxsw_sp_kvd_size_params.size_max = kvd_size; + mlxsw_sp_kvd_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; + mlxsw_sp_kvd_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; + + /* Linear part init */ + mlxsw_sp_linear_size_params.size_min = linear_size_min; + mlxsw_sp_linear_size_params.size_max = kvd_size - single_size_min - + double_size_min; + mlxsw_sp_linear_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; + mlxsw_sp_linear_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; + + /* Hash double part init */ + mlxsw_sp_hash_double_size_params.size_min = double_size_min; + mlxsw_sp_hash_double_size_params.size_max = kvd_size - single_size_min - + linear_size_min; + mlxsw_sp_hash_double_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; + mlxsw_sp_hash_double_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; + + /* Hash single part init */ + mlxsw_sp_hash_single_size_params.size_min = single_size_min; + mlxsw_sp_hash_single_size_params.size_max = kvd_size - double_size_min - + linear_size_min; + mlxsw_sp_hash_single_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; + mlxsw_sp_hash_single_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; +} + +static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_core); + u32 kvd_size, single_size, double_size, linear_size; + const struct mlxsw_config_profile *profile; + int err; + + profile = &mlxsw_sp_config_profile; + if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) + return -EIO; + + mlxsw_sp_resource_size_params_prepare(mlxsw_core); + kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); + err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, + true, kvd_size, + MLXSW_SP_RESOURCE_KVD, + DEVLINK_RESOURCE_ID_PARENT_TOP, + &mlxsw_sp_kvd_size_params, + &mlxsw_sp_resource_kvd_ops); + if (err) + return err; + + linear_size = profile->kvd_linear_size; + err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, + false, linear_size, + MLXSW_SP_RESOURCE_KVD_LINEAR, + MLXSW_SP_RESOURCE_KVD, + &mlxsw_sp_linear_size_params, + &mlxsw_sp_resource_kvd_linear_ops); + if (err) + return err; + + double_size = kvd_size - linear_size; + double_size *= profile->kvd_hash_double_parts; + double_size /= profile->kvd_hash_double_parts + + profile->kvd_hash_single_parts; + double_size = rounddown(double_size, profile->kvd_hash_granularity); + err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, + false, double_size, + MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, + MLXSW_SP_RESOURCE_KVD, + &mlxsw_sp_hash_double_size_params, + &mlxsw_sp_resource_kvd_hash_double_ops); + if (err) + return err; + + single_size = kvd_size - double_size - linear_size; + err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, + false, single_size, + MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, + MLXSW_SP_RESOURCE_KVD, + &mlxsw_sp_hash_single_size_params, + &mlxsw_sp_resource_kvd_hash_single_ops); + if (err) + return err; + + return 0; +} + +static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, + const struct mlxsw_config_profile *profile, + u64 *p_single_size, u64 *p_double_size, + u64 *p_linear_size) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_core); + u32 double_size; + int err; + + if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || + !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || + !profile->used_kvd_split_data) + return -EIO; + + /* The hash part is what left of the kvd without the + * linear part. It is split to the single size and + * double size by the parts ratio from the profile. + * Both sizes must be a multiplications of the + * granularity from the profile. In case the user + * provided the sizes they are obtained via devlink. + */ + err = devlink_resource_size_get(devlink, + MLXSW_SP_RESOURCE_KVD_LINEAR, + p_linear_size); + if (err) + *p_linear_size = profile->kvd_linear_size; + + err = devlink_resource_size_get(devlink, + MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, + p_double_size); + if (err) { + double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - + *p_linear_size; + double_size *= profile->kvd_hash_double_parts; + double_size /= profile->kvd_hash_double_parts + + profile->kvd_hash_single_parts; + *p_double_size = rounddown(double_size, + profile->kvd_hash_granularity); + } + + err = devlink_resource_size_get(devlink, + MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, + p_single_size); + if (err) + *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - + *p_double_size - *p_linear_size; + + /* Check results are legal. */ + if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || + *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || + MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) + return -EIO; + + return 0; +} + static struct mlxsw_driver mlxsw_sp_driver = { .kind = mlxsw_sp_driver_name, .priv_size = sizeof(struct mlxsw_sp), @@ -3952,6 +4384,8 @@ static struct mlxsw_driver mlxsw_sp_driver = { .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, .txhdr_construct = mlxsw_sp_txhdr_construct, + .resources_register = mlxsw_sp_resources_register, + .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, .txhdr_len = MLXSW_TXHDR_LEN, .profile = &mlxsw_sp_config_profile, }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 05ce1befd9b3..bdd8f94a452c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -66,6 +66,18 @@ #define MLXSW_SP_KVD_LINEAR_SIZE 98304 /* entries */ #define MLXSW_SP_KVD_GRANULARITY 128 +#define MLXSW_SP_RESOURCE_NAME_KVD "kvd" +#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR "linear" +#define MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE "hash_single" +#define MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE "hash_double" + +enum mlxsw_sp_resource_id { + MLXSW_SP_RESOURCE_KVD, + MLXSW_SP_RESOURCE_KVD_LINEAR, + MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, + MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, +}; + struct mlxsw_sp_port; struct mlxsw_sp_rif; @@ -204,29 +216,6 @@ struct mlxsw_sp_port_vlan { struct list_head bridge_vlan_node; }; -enum mlxsw_sp_qdisc_type { - MLXSW_SP_QDISC_NO_QDISC, - MLXSW_SP_QDISC_RED, -}; - -struct mlxsw_sp_qdisc { - u32 handle; - enum mlxsw_sp_qdisc_type type; - struct red_stats xstats_base; - union { - struct { - u64 tail_drop_base; - u64 ecn_base; - u64 wred_drop_base; - } red; - } xstats; - - u64 tx_bytes; - u64 tx_packets; - u64 drops; - u64 overlimits; -}; - /* No need an internal lock; At worse - miss a single periodic iteration */ struct mlxsw_sp_port_xstats { u64 ecn; @@ -269,7 +258,10 @@ struct mlxsw_sp_port { } periodic_hw_stats; struct mlxsw_sp_port_sample *sample; struct list_head vlans_list; - struct mlxsw_sp_qdisc root_qdisc; + struct mlxsw_sp_qdisc *root_qdisc; + unsigned acl_rule_count; + struct mlxsw_sp_acl_block *ing_acl_block; + struct mlxsw_sp_acl_block *eg_acl_block; }; static inline bool @@ -404,6 +396,16 @@ struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev); struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev); void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port); struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev); +int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, + struct mlxsw_sp_port *to, + enum mlxsw_sp_span_type type, + bool bind); +void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, + u8 destination_port, + enum mlxsw_sp_span_type type, + bool bind); +struct mlxsw_sp_span_entry * +mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port); /* spectrum_dcb.c */ #ifdef CONFIG_MLXSW_SPECTRUM_DCB @@ -458,13 +460,13 @@ void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index); int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count, unsigned int *p_alloc_size); +u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp); struct mlxsw_sp_acl_rule_info { unsigned int priority; struct mlxsw_afk_element_values values; struct mlxsw_afa_block *act_block; unsigned int counter_index; - bool counter_valid; }; enum mlxsw_sp_acl_profile { @@ -477,8 +479,11 @@ struct mlxsw_sp_acl_profile_ops { void *priv, void *ruleset_priv); void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv); int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, - struct net_device *dev, bool ingress); - void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv); + struct mlxsw_sp_port *mlxsw_sp_port, + bool ingress); + void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, + struct mlxsw_sp_port *mlxsw_sp_port, + bool ingress); u16 (*ruleset_group_id)(void *ruleset_priv); size_t rule_priv_size; int (*rule_add)(struct mlxsw_sp *mlxsw_sp, @@ -498,17 +503,34 @@ struct mlxsw_sp_acl_ops { enum mlxsw_sp_acl_profile profile); }; +struct mlxsw_sp_acl_block; struct mlxsw_sp_acl_ruleset; /* spectrum_acl.c */ struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl); +struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block); +unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block); +void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block); +void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block); +bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block); +struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp, + struct net *net); +void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block); +int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_block *block, + struct mlxsw_sp_port *mlxsw_sp_port, + bool ingress); +int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_block *block, + struct mlxsw_sp_port *mlxsw_sp_port, + bool ingress); struct mlxsw_sp_acl_ruleset * -mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev, - bool ingress, u32 chain_index, +mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_block *block, u32 chain_index, enum mlxsw_sp_acl_profile profile); struct mlxsw_sp_acl_ruleset * -mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev, - bool ingress, u32 chain_index, +mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_block *block, u32 chain_index, enum mlxsw_sp_acl_profile profile); void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset *ruleset); @@ -532,6 +554,10 @@ int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei, u16 group_id); int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei); int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei); +int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + struct mlxsw_sp_acl_block *block, + struct net_device *out_dev); int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, struct net_device *out_dev); @@ -575,16 +601,23 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp); extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops; /* spectrum_flower.c */ -int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, +int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_block *block, struct tc_cls_flower_offload *f); -void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, +void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_block *block, struct tc_cls_flower_offload *f); -int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, +int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_block *block, struct tc_cls_flower_offload *f); /* spectrum_qdisc.c */ +int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port); +void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port); int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port, struct tc_red_qopt_offload *p); +int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_prio_qopt_offload *p); /* spectrum_fid.c */ int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 93dcd315f7d6..0897a5435cc2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -39,6 +39,7 @@ #include <linux/string.h> #include <linux/rhashtable.h> #include <linux/netdevice.h> +#include <net/net_namespace.h> #include <net/tc_act/tc_vlan.h> #include "reg.h" @@ -70,9 +71,23 @@ struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl) return acl->afk; } -struct mlxsw_sp_acl_ruleset_ht_key { - struct net_device *dev; /* dev this ruleset is bound to */ +struct mlxsw_sp_acl_block_binding { + struct list_head list; + struct net_device *dev; + struct mlxsw_sp_port *mlxsw_sp_port; bool ingress; +}; + +struct mlxsw_sp_acl_block { + struct list_head binding_list; + struct mlxsw_sp_acl_ruleset *ruleset_zero; + struct mlxsw_sp *mlxsw_sp; + unsigned int rule_count; + unsigned int disable_count; +}; + +struct mlxsw_sp_acl_ruleset_ht_key { + struct mlxsw_sp_acl_block *block; u32 chain_index; const struct mlxsw_sp_acl_profile_ops *ops; }; @@ -118,8 +133,185 @@ struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp) return mlxsw_sp->acl->dummy_fid; } +struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block) +{ + return block->mlxsw_sp; +} + +unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block) +{ + return block ? block->rule_count : 0; +} + +void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block) +{ + if (block) + block->disable_count++; +} + +void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block) +{ + if (block) + block->disable_count--; +} + +bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block) +{ + return block->disable_count; +} + +static int +mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_block *block, + struct mlxsw_sp_acl_block_binding *binding) +{ + struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero; + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + + return ops->ruleset_bind(mlxsw_sp, ruleset->priv, + binding->mlxsw_sp_port, binding->ingress); +} + +static void +mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_block *block, + struct mlxsw_sp_acl_block_binding *binding) +{ + struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero; + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + + ops->ruleset_unbind(mlxsw_sp, ruleset->priv, + binding->mlxsw_sp_port, binding->ingress); +} + +static bool mlxsw_sp_acl_ruleset_block_bound(struct mlxsw_sp_acl_block *block) +{ + return block->ruleset_zero; +} + +static int +mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset, + struct mlxsw_sp_acl_block *block) +{ + struct mlxsw_sp_acl_block_binding *binding; + int err; + + block->ruleset_zero = ruleset; + list_for_each_entry(binding, &block->binding_list, list) { + err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding); + if (err) + goto rollback; + } + return 0; + +rollback: + list_for_each_entry_continue_reverse(binding, &block->binding_list, + list) + mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding); + block->ruleset_zero = NULL; + + return err; +} + +static void +mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset, + struct mlxsw_sp_acl_block *block) +{ + struct mlxsw_sp_acl_block_binding *binding; + + list_for_each_entry(binding, &block->binding_list, list) + mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding); + block->ruleset_zero = NULL; +} + +struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp, + struct net *net) +{ + struct mlxsw_sp_acl_block *block; + + block = kzalloc(sizeof(*block), GFP_KERNEL); + if (!block) + return NULL; + INIT_LIST_HEAD(&block->binding_list); + block->mlxsw_sp = mlxsw_sp; + return block; +} + +void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block) +{ + WARN_ON(!list_empty(&block->binding_list)); + kfree(block); +} + +static struct mlxsw_sp_acl_block_binding * +mlxsw_sp_acl_block_lookup(struct mlxsw_sp_acl_block *block, + struct mlxsw_sp_port *mlxsw_sp_port, bool ingress) +{ + struct mlxsw_sp_acl_block_binding *binding; + + list_for_each_entry(binding, &block->binding_list, list) + if (binding->mlxsw_sp_port == mlxsw_sp_port && + binding->ingress == ingress) + return binding; + return NULL; +} + +int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_block *block, + struct mlxsw_sp_port *mlxsw_sp_port, + bool ingress) +{ + struct mlxsw_sp_acl_block_binding *binding; + int err; + + if (WARN_ON(mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress))) + return -EEXIST; + + binding = kzalloc(sizeof(*binding), GFP_KERNEL); + if (!binding) + return -ENOMEM; + binding->mlxsw_sp_port = mlxsw_sp_port; + binding->ingress = ingress; + + if (mlxsw_sp_acl_ruleset_block_bound(block)) { + err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding); + if (err) + goto err_ruleset_bind; + } + + list_add(&binding->list, &block->binding_list); + return 0; + +err_ruleset_bind: + kfree(binding); + return err; +} + +int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_block *block, + struct mlxsw_sp_port *mlxsw_sp_port, + bool ingress) +{ + struct mlxsw_sp_acl_block_binding *binding; + + binding = mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress); + if (!binding) + return -ENOENT; + + list_del(&binding->list); + + if (mlxsw_sp_acl_ruleset_block_bound(block)) + mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding); + + kfree(binding); + return 0; +} + static struct mlxsw_sp_acl_ruleset * mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_block *block, u32 chain_index, const struct mlxsw_sp_acl_profile_ops *ops) { struct mlxsw_sp_acl *acl = mlxsw_sp->acl; @@ -132,6 +324,8 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp, if (!ruleset) return ERR_PTR(-ENOMEM); ruleset->ref_count = 1; + ruleset->ht_key.block = block; + ruleset->ht_key.chain_index = chain_index; ruleset->ht_key.ops = ops; err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params); @@ -142,68 +336,50 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err_ops_ruleset_add; - return ruleset; - -err_ops_ruleset_add: - rhashtable_destroy(&ruleset->rule_ht); -err_rhashtable_init: - kfree(ruleset); - return ERR_PTR(err); -} - -static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_ruleset *ruleset) -{ - const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; - - ops->ruleset_del(mlxsw_sp, ruleset->priv); - rhashtable_destroy(&ruleset->rule_ht); - kfree(ruleset); -} - -static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_ruleset *ruleset, - struct net_device *dev, bool ingress, - u32 chain_index) -{ - const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; - struct mlxsw_sp_acl *acl = mlxsw_sp->acl; - int err; - - ruleset->ht_key.dev = dev; - ruleset->ht_key.ingress = ingress; - ruleset->ht_key.chain_index = chain_index; err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node, mlxsw_sp_acl_ruleset_ht_params); if (err) - return err; - if (!ruleset->ht_key.chain_index) { + goto err_ht_insert; + + if (!chain_index) { /* We only need ruleset with chain index 0, the implicit one, * to be directly bound to device. The rest of the rulesets * are bound by "Goto action set". */ - err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress); + err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block); if (err) - goto err_ops_ruleset_bind; + goto err_ruleset_bind; } - return 0; -err_ops_ruleset_bind: + return ruleset; + +err_ruleset_bind: rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node, mlxsw_sp_acl_ruleset_ht_params); - return err; +err_ht_insert: + ops->ruleset_del(mlxsw_sp, ruleset->priv); +err_ops_ruleset_add: + rhashtable_destroy(&ruleset->rule_ht); +err_rhashtable_init: + kfree(ruleset); + return ERR_PTR(err); } -static void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_ruleset *ruleset) +static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ruleset *ruleset) { const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + struct mlxsw_sp_acl_block *block = ruleset->ht_key.block; + u32 chain_index = ruleset->ht_key.chain_index; struct mlxsw_sp_acl *acl = mlxsw_sp->acl; - if (!ruleset->ht_key.chain_index) - ops->ruleset_unbind(mlxsw_sp, ruleset->priv); + if (!chain_index) + mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, block); rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node, mlxsw_sp_acl_ruleset_ht_params); + ops->ruleset_del(mlxsw_sp, ruleset->priv); + rhashtable_destroy(&ruleset->rule_ht); + kfree(ruleset); } static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset) @@ -216,20 +392,18 @@ static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp, { if (--ruleset->ref_count) return; - mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, ruleset); mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset); } static struct mlxsw_sp_acl_ruleset * -__mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl, struct net_device *dev, - bool ingress, u32 chain_index, +__mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl, + struct mlxsw_sp_acl_block *block, u32 chain_index, const struct mlxsw_sp_acl_profile_ops *ops) { struct mlxsw_sp_acl_ruleset_ht_key ht_key; memset(&ht_key, 0, sizeof(ht_key)); - ht_key.dev = dev; - ht_key.ingress = ingress; + ht_key.block = block; ht_key.chain_index = chain_index; ht_key.ops = ops; return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key, @@ -237,8 +411,8 @@ __mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl, struct net_device *dev, } struct mlxsw_sp_acl_ruleset * -mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev, - bool ingress, u32 chain_index, +mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_block *block, u32 chain_index, enum mlxsw_sp_acl_profile profile) { const struct mlxsw_sp_acl_profile_ops *ops; @@ -248,45 +422,31 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev, ops = acl->ops->profile_ops(mlxsw_sp, profile); if (!ops) return ERR_PTR(-EINVAL); - ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, dev, ingress, - chain_index, ops); + ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops); if (!ruleset) return ERR_PTR(-ENOENT); return ruleset; } struct mlxsw_sp_acl_ruleset * -mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev, - bool ingress, u32 chain_index, +mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_block *block, u32 chain_index, enum mlxsw_sp_acl_profile profile) { const struct mlxsw_sp_acl_profile_ops *ops; struct mlxsw_sp_acl *acl = mlxsw_sp->acl; struct mlxsw_sp_acl_ruleset *ruleset; - int err; ops = acl->ops->profile_ops(mlxsw_sp, profile); if (!ops) return ERR_PTR(-EINVAL); - ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, dev, ingress, - chain_index, ops); + ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops); if (ruleset) { mlxsw_sp_acl_ruleset_ref_inc(ruleset); return ruleset; } - ruleset = mlxsw_sp_acl_ruleset_create(mlxsw_sp, ops); - if (IS_ERR(ruleset)) - return ruleset; - err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev, - ingress, chain_index); - if (err) - goto err_ruleset_bind; - return ruleset; - -err_ruleset_bind: - mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset); - return ERR_PTR(err); + return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops); } void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp, @@ -302,27 +462,6 @@ u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset) return ops->ruleset_group_id(ruleset->priv); } -static int -mlxsw_sp_acl_rulei_counter_alloc(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_rule_info *rulei) -{ - int err; - - err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &rulei->counter_index); - if (err) - return err; - rulei->counter_valid = true; - return 0; -} - -static void -mlxsw_sp_acl_rulei_counter_free(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_rule_info *rulei) -{ - rulei->counter_valid = false; - mlxsw_sp_flow_counter_free(mlxsw_sp, rulei->counter_index); -} - struct mlxsw_sp_acl_rule_info * mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl) { @@ -427,6 +566,34 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, local_port, in_port); } +int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + struct mlxsw_sp_acl_block *block, + struct net_device *out_dev) +{ + struct mlxsw_sp_acl_block_binding *binding; + struct mlxsw_sp_port *out_port; + struct mlxsw_sp_port *in_port; + + if (!list_is_singular(&block->binding_list)) + return -EOPNOTSUPP; + + binding = list_first_entry(&block->binding_list, + struct mlxsw_sp_acl_block_binding, list); + in_port = binding->mlxsw_sp_port; + if (!mlxsw_sp_port_dev_check(out_dev)) + return -EINVAL; + + out_port = netdev_priv(out_dev); + if (out_port->mlxsw_sp != mlxsw_sp) + return -EINVAL; + + return mlxsw_afa_block_append_mirror(rulei->act_block, + in_port->local_port, + out_port->local_port, + binding->ingress); +} + int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, u32 action, u16 vid, u16 proto, u8 prio) @@ -459,7 +626,7 @@ int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei) { return mlxsw_afa_block_append_counter(rulei->act_block, - rulei->counter_index); + &rulei->counter_index); } int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp, @@ -493,13 +660,8 @@ mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, goto err_rulei_create; } - err = mlxsw_sp_acl_rulei_counter_alloc(mlxsw_sp, rule->rulei); - if (err) - goto err_counter_alloc; return rule; -err_counter_alloc: - mlxsw_sp_acl_rulei_destroy(rule->rulei); err_rulei_create: kfree(rule); err_alloc: @@ -512,7 +674,6 @@ void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; - mlxsw_sp_acl_rulei_counter_free(mlxsw_sp, rule->rulei); mlxsw_sp_acl_rulei_destroy(rule->rulei); kfree(rule); mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset); @@ -535,6 +696,7 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp, goto err_rhashtable_insert; list_add_tail(&rule->list, &mlxsw_sp->acl->rules); + ruleset->ht_key.block->rule_count++; return 0; err_rhashtable_insert: @@ -548,6 +710,7 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + ruleset->ht_key.block->rule_count--; list_del(&rule->list); rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node, mlxsw_sp_acl_rule_ht_params); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c index 4d3340ed0291..6ca6894125f0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c @@ -108,11 +108,77 @@ static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index) mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); } +static int +mlxsw_sp_act_counter_index_get(void *priv, unsigned int *p_counter_index) +{ + struct mlxsw_sp *mlxsw_sp = priv; + + return mlxsw_sp_flow_counter_alloc(mlxsw_sp, p_counter_index); +} + +static void +mlxsw_sp_act_counter_index_put(void *priv, unsigned int counter_index) +{ + struct mlxsw_sp *mlxsw_sp = priv; + + mlxsw_sp_flow_counter_free(mlxsw_sp, counter_index); +} + +static int +mlxsw_sp_act_mirror_add(void *priv, u8 local_in_port, u8 local_out_port, + bool ingress, int *p_span_id) +{ + struct mlxsw_sp_port *in_port, *out_port; + struct mlxsw_sp_span_entry *span_entry; + struct mlxsw_sp *mlxsw_sp = priv; + enum mlxsw_sp_span_type type; + int err; + + type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; + out_port = mlxsw_sp->ports[local_out_port]; + in_port = mlxsw_sp->ports[local_in_port]; + + err = mlxsw_sp_span_mirror_add(in_port, out_port, type, false); + if (err) + return err; + + span_entry = mlxsw_sp_span_entry_find(mlxsw_sp, local_out_port); + if (!span_entry) { + err = -ENOENT; + goto err_span_entry_find; + } + + *p_span_id = span_entry->id; + return 0; + +err_span_entry_find: + mlxsw_sp_span_mirror_del(in_port, local_out_port, type, false); + return err; +} + +static void +mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, u8 local_out_port, + bool ingress) +{ + struct mlxsw_sp *mlxsw_sp = priv; + struct mlxsw_sp_port *in_port; + enum mlxsw_sp_span_type type; + + type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; + in_port = mlxsw_sp->ports[local_in_port]; + + mlxsw_sp_span_mirror_del(in_port, local_out_port, type, false); +} + static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = { .kvdl_set_add = mlxsw_sp_act_kvdl_set_add, .kvdl_set_del = mlxsw_sp_act_kvdl_set_del, .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add, .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del, + .counter_index_get = mlxsw_sp_act_counter_index_get, + .counter_index_put = mlxsw_sp_act_counter_index_put, + .mirror_add = mlxsw_sp_act_mirror_add, + .mirror_del = mlxsw_sp_act_mirror_del, }; int mlxsw_sp_afa_init(struct mlxsw_sp *mlxsw_sp) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index 7e8284b46968..c6e180c2be1e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -154,10 +154,6 @@ struct mlxsw_sp_acl_tcam_group { struct list_head region_list; unsigned int region_count; struct rhashtable chunk_ht; - struct { - u16 local_port; - bool ingress; - } bound; struct mlxsw_sp_acl_tcam_group_ops *ops; const struct mlxsw_sp_acl_tcam_pattern *patterns; unsigned int patterns_count; @@ -262,35 +258,29 @@ static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_group *group, - struct net_device *dev, bool ingress) + struct mlxsw_sp_port *mlxsw_sp_port, + bool ingress) { - struct mlxsw_sp_port *mlxsw_sp_port; char ppbt_pl[MLXSW_REG_PPBT_LEN]; - if (!mlxsw_sp_port_dev_check(dev)) - return -EINVAL; - - mlxsw_sp_port = netdev_priv(dev); - group->bound.local_port = mlxsw_sp_port->local_port; - group->bound.ingress = ingress; - mlxsw_reg_ppbt_pack(ppbt_pl, - group->bound.ingress ? MLXSW_REG_PXBT_E_IACL : - MLXSW_REG_PXBT_E_EACL, - MLXSW_REG_PXBT_OP_BIND, group->bound.local_port, + mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL : + MLXSW_REG_PXBT_E_EACL, + MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port, group->id); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl); } static void mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_group *group) + struct mlxsw_sp_acl_tcam_group *group, + struct mlxsw_sp_port *mlxsw_sp_port, + bool ingress) { char ppbt_pl[MLXSW_REG_PPBT_LEN]; - mlxsw_reg_ppbt_pack(ppbt_pl, - group->bound.ingress ? MLXSW_REG_PXBT_E_IACL : - MLXSW_REG_PXBT_E_EACL, - MLXSW_REG_PXBT_OP_UNBIND, group->bound.local_port, + mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL : + MLXSW_REG_PXBT_E_EACL, + MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port, group->id); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl); } @@ -1056,21 +1046,25 @@ mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, - struct net_device *dev, bool ingress) + struct mlxsw_sp_port *mlxsw_sp_port, + bool ingress) { struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group, - dev, ingress); + mlxsw_sp_port, ingress); } static void mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp, - void *ruleset_priv) + void *ruleset_priv, + struct mlxsw_sp_port *mlxsw_sp_port, + bool ingress) { struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; - mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group); + mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group, + mlxsw_sp_port, ingress); } static u16 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c index 96fdba78acab..f56fa18d6b26 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c @@ -771,14 +771,33 @@ static struct devlink_dpipe_table_ops mlxsw_sp_host4_ops = { .size_get = mlxsw_sp_dpipe_table_host4_size_get, }; +#define MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST4 1 + static int mlxsw_sp_dpipe_host4_table_init(struct mlxsw_sp *mlxsw_sp) { struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + int err; - return devlink_dpipe_table_register(devlink, - MLXSW_SP_DPIPE_TABLE_NAME_HOST4, - &mlxsw_sp_host4_ops, - mlxsw_sp, false); + err = devlink_dpipe_table_register(devlink, + MLXSW_SP_DPIPE_TABLE_NAME_HOST4, + &mlxsw_sp_host4_ops, + mlxsw_sp, false); + if (err) + return err; + + err = devlink_dpipe_table_resource_set(devlink, + MLXSW_SP_DPIPE_TABLE_NAME_HOST4, + MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, + MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST4); + if (err) + goto err_resource_set; + + return 0; + +err_resource_set: + devlink_dpipe_table_unregister(devlink, + MLXSW_SP_DPIPE_TABLE_NAME_HOST4); + return err; } static void mlxsw_sp_dpipe_host4_table_fini(struct mlxsw_sp *mlxsw_sp) @@ -829,14 +848,33 @@ static struct devlink_dpipe_table_ops mlxsw_sp_host6_ops = { .size_get = mlxsw_sp_dpipe_table_host6_size_get, }; +#define MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST6 2 + static int mlxsw_sp_dpipe_host6_table_init(struct mlxsw_sp *mlxsw_sp) { struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + int err; - return devlink_dpipe_table_register(devlink, - MLXSW_SP_DPIPE_TABLE_NAME_HOST6, - &mlxsw_sp_host6_ops, - mlxsw_sp, false); + err = devlink_dpipe_table_register(devlink, + MLXSW_SP_DPIPE_TABLE_NAME_HOST6, + &mlxsw_sp_host6_ops, + mlxsw_sp, false); + if (err) + return err; + + err = devlink_dpipe_table_resource_set(devlink, + MLXSW_SP_DPIPE_TABLE_NAME_HOST6, + MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, + MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST6); + if (err) + goto err_resource_set; + + return 0; + +err_resource_set: + devlink_dpipe_table_unregister(devlink, + MLXSW_SP_DPIPE_TABLE_NAME_HOST6); + return err; } static void mlxsw_sp_dpipe_host6_table_fini(struct mlxsw_sp *mlxsw_sp) @@ -1213,14 +1251,33 @@ static struct devlink_dpipe_table_ops mlxsw_sp_dpipe_table_adj_ops = { .size_get = mlxsw_sp_dpipe_table_adj_size_get, }; +#define MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_ADJ 1 + static int mlxsw_sp_dpipe_adj_table_init(struct mlxsw_sp *mlxsw_sp) { struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + int err; - return devlink_dpipe_table_register(devlink, - MLXSW_SP_DPIPE_TABLE_NAME_ADJ, - &mlxsw_sp_dpipe_table_adj_ops, - mlxsw_sp, false); + err = devlink_dpipe_table_register(devlink, + MLXSW_SP_DPIPE_TABLE_NAME_ADJ, + &mlxsw_sp_dpipe_table_adj_ops, + mlxsw_sp, false); + if (err) + return err; + + err = devlink_dpipe_table_resource_set(devlink, + MLXSW_SP_DPIPE_TABLE_NAME_ADJ, + MLXSW_SP_RESOURCE_KVD_LINEAR, + MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_ADJ); + if (err) + goto err_resource_set; + + return 0; + +err_resource_set: + devlink_dpipe_table_unregister(devlink, + MLXSW_SP_DPIPE_TABLE_NAME_ADJ); + return err; } static void mlxsw_sp_dpipe_adj_table_fini(struct mlxsw_sp *mlxsw_sp) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 2f0e57857ea4..6ce00e28d4ea 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -35,6 +35,7 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/netdevice.h> +#include <net/net_namespace.h> #include <net/flow_dissector.h> #include <net/pkt_cls.h> #include <net/tc_act/tc_gact.h> @@ -45,7 +46,7 @@ #include "core_acl_flex_keys.h" static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, - struct net_device *dev, bool ingress, + struct mlxsw_sp_acl_block *block, struct mlxsw_sp_acl_rule_info *rulei, struct tcf_exts *exts) { @@ -80,8 +81,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset *ruleset; u16 group_id; - ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, dev, - ingress, + ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block, chain_index, MLXSW_SP_ACL_PROFILE_FLOWER); if (IS_ERR(ruleset)) @@ -92,7 +92,6 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, if (err) return err; } else if (is_tcf_mirred_egress_redirect(a)) { - int ifindex = tcf_mirred_ifindex(a); struct net_device *out_dev; struct mlxsw_sp_fid *fid; u16 fid_index; @@ -104,14 +103,18 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, if (err) return err; - out_dev = __dev_get_by_index(dev_net(dev), ifindex); - if (out_dev == dev) - out_dev = NULL; - + out_dev = tcf_mirred_dev(a); err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei, out_dev); if (err) return err; + } else if (is_tcf_mirred_egress_mirror(a)) { + struct net_device *out_dev = tcf_mirred_dev(a); + + err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei, + block, out_dev); + if (err) + return err; } else if (is_tcf_vlan(a)) { u16 proto = be16_to_cpu(tcf_vlan_push_proto(a)); u32 action = tcf_vlan_action(a); @@ -266,7 +269,7 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp, } static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, - struct net_device *dev, bool ingress, + struct mlxsw_sp_acl_block *block, struct mlxsw_sp_acl_rule_info *rulei, struct tc_cls_flower_offload *f) { @@ -384,21 +387,19 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, if (err) return err; - return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, ingress, - rulei, f->exts); + return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts); } -int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, +int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_block *block, struct tc_cls_flower_offload *f) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - struct net_device *dev = mlxsw_sp_port->dev; struct mlxsw_sp_acl_rule_info *rulei; struct mlxsw_sp_acl_ruleset *ruleset; struct mlxsw_sp_acl_rule *rule; int err; - ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, dev, ingress, + ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, f->common.chain_index, MLXSW_SP_ACL_PROFILE_FLOWER); if (IS_ERR(ruleset)) @@ -411,7 +412,7 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, } rulei = mlxsw_sp_acl_rule_rulei(rule); - err = mlxsw_sp_flower_parse(mlxsw_sp, dev, ingress, rulei, f); + err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f); if (err) goto err_flower_parse; @@ -435,15 +436,15 @@ err_rule_create: return err; } -void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, +void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_block *block, struct tc_cls_flower_offload *f) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_acl_ruleset *ruleset; struct mlxsw_sp_acl_rule *rule; - ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev, - ingress, f->common.chain_index, + ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, + f->common.chain_index, MLXSW_SP_ACL_PROFILE_FLOWER); if (IS_ERR(ruleset)) return; @@ -457,10 +458,10 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); } -int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, +int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_block *block, struct tc_cls_flower_offload *f) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_acl_ruleset *ruleset; struct mlxsw_sp_acl_rule *rule; u64 packets; @@ -468,8 +469,8 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, u64 bytes; int err; - ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev, - ingress, f->common.chain_index, + ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, + f->common.chain_index, MLXSW_SP_ACL_PROFILE_FLOWER); if (WARN_ON(IS_ERR(ruleset))) return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c index 310c38247b5c..55f9d2d70f9e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c @@ -286,6 +286,32 @@ static void mlxsw_sp_kvdl_parts_fini(struct mlxsw_sp *mlxsw_sp) mlxsw_sp_kvdl_part_fini(mlxsw_sp, i); } +static u64 mlxsw_sp_kvdl_part_occ(struct mlxsw_sp_kvdl_part *part) +{ + unsigned int nr_entries; + int bit = -1; + u64 occ = 0; + + nr_entries = (part->info->end_index - + part->info->start_index + 1) / + part->info->alloc_size; + while ((bit = find_next_bit(part->usage, nr_entries, bit + 1)) + < nr_entries) + occ += part->info->alloc_size; + return occ; +} + +u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_kvdl_part *part; + u64 occ = 0; + + list_for_each_entry(part, &mlxsw_sp->kvdl->parts_list, list) + occ += mlxsw_sp_kvdl_part_occ(part); + + return occ; +} + int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp) { struct mlxsw_sp_kvdl *kvdl; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c index 34a0b632e5dd..4c7f32d4288d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c @@ -243,7 +243,8 @@ mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp, if (!afa_block) return ERR_PTR(-ENOMEM); - err = mlxsw_afa_block_append_counter(afa_block, counter_index); + err = mlxsw_afa_block_append_allocated_counter(afa_block, + counter_index); if (err) goto err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c index b5397da94d7f..0b7670459051 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c @@ -41,6 +41,150 @@ #include "spectrum.h" #include "reg.h" +#define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1) + +enum mlxsw_sp_qdisc_type { + MLXSW_SP_QDISC_NO_QDISC, + MLXSW_SP_QDISC_RED, + MLXSW_SP_QDISC_PRIO, +}; + +struct mlxsw_sp_qdisc_ops { + enum mlxsw_sp_qdisc_type type; + int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + void *params); + int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params); + int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc); + int (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + struct tc_qopt_offload_stats *stats_ptr); + int (*get_xstats)(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + void *xstats_ptr); + void (*clean_stats)(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc); + /* unoffload - to be used for a qdisc that stops being offloaded without + * being destroyed. + */ + void (*unoffload)(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params); +}; + +struct mlxsw_sp_qdisc { + u32 handle; + u8 tclass_num; + union { + struct red_stats red; + } xstats_base; + struct mlxsw_sp_qdisc_stats { + u64 tx_bytes; + u64 tx_packets; + u64 drops; + u64 overlimits; + u64 backlog; + } stats_base; + + struct mlxsw_sp_qdisc_ops *ops; +}; + +static bool +mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle, + enum mlxsw_sp_qdisc_type type) +{ + return mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops && + mlxsw_sp_qdisc->ops->type == type && + mlxsw_sp_qdisc->handle == handle; +} + +static int +mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) +{ + int err = 0; + + if (!mlxsw_sp_qdisc) + return 0; + + if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->destroy) + err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port, + mlxsw_sp_qdisc); + + mlxsw_sp_qdisc->handle = TC_H_UNSPEC; + mlxsw_sp_qdisc->ops = NULL; + return err; +} + +static int +mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + struct mlxsw_sp_qdisc_ops *ops, void *params) +{ + int err; + + if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type) + /* In case this location contained a different qdisc of the + * same type we can override the old qdisc configuration. + * Otherwise, we need to remove the old qdisc before setting the + * new one. + */ + mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc); + err = ops->check_params(mlxsw_sp_port, mlxsw_sp_qdisc, params); + if (err) + goto err_bad_param; + + err = ops->replace(mlxsw_sp_port, mlxsw_sp_qdisc, params); + if (err) + goto err_config; + + if (mlxsw_sp_qdisc->handle != handle) { + mlxsw_sp_qdisc->ops = ops; + if (ops->clean_stats) + ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc); + } + + mlxsw_sp_qdisc->handle = handle; + return 0; + +err_bad_param: +err_config: + if (mlxsw_sp_qdisc->handle == handle && ops->unoffload) + ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params); + + mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc); + return err; +} + +static int +mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + struct tc_qopt_offload_stats *stats_ptr) +{ + if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops && + mlxsw_sp_qdisc->ops->get_stats) + return mlxsw_sp_qdisc->ops->get_stats(mlxsw_sp_port, + mlxsw_sp_qdisc, + stats_ptr); + + return -EOPNOTSUPP; +} + +static int +mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + void *xstats_ptr) +{ + if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops && + mlxsw_sp_qdisc->ops->get_xstats) + return mlxsw_sp_qdisc->ops->get_xstats(mlxsw_sp_port, + mlxsw_sp_qdisc, + xstats_ptr); + + return -EOPNOTSUPP; +} + static int mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port, int tclass_num, u32 min, u32 max, @@ -80,80 +224,78 @@ mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port, } static void -mlxsw_sp_setup_tc_qdisc_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, - int tclass_num) +mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) { - struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base; + u8 tclass_num = mlxsw_sp_qdisc->tclass_num; + struct mlxsw_sp_qdisc_stats *stats_base; struct mlxsw_sp_port_xstats *xstats; struct rtnl_link_stats64 *stats; + struct red_stats *red_base; xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; stats = &mlxsw_sp_port->periodic_hw_stats.stats; + stats_base = &mlxsw_sp_qdisc->stats_base; + red_base = &mlxsw_sp_qdisc->xstats_base.red; - mlxsw_sp_qdisc->tx_packets = stats->tx_packets; - mlxsw_sp_qdisc->tx_bytes = stats->tx_bytes; + stats_base->tx_packets = stats->tx_packets; + stats_base->tx_bytes = stats->tx_bytes; - switch (mlxsw_sp_qdisc->type) { - case MLXSW_SP_QDISC_RED: - xstats_base->prob_mark = xstats->ecn; - xstats_base->prob_drop = xstats->wred_drop[tclass_num]; - xstats_base->pdrop = xstats->tail_drop[tclass_num]; + red_base->prob_mark = xstats->ecn; + red_base->prob_drop = xstats->wred_drop[tclass_num]; + red_base->pdrop = xstats->tail_drop[tclass_num]; - mlxsw_sp_qdisc->overlimits = xstats_base->prob_drop + - xstats_base->prob_mark; - mlxsw_sp_qdisc->drops = xstats_base->prob_drop + - xstats_base->pdrop; - break; - default: - break; - } + stats_base->overlimits = red_base->prob_drop + red_base->prob_mark; + stats_base->drops = red_base->prob_drop + red_base->pdrop; + + stats_base->backlog = 0; } static int -mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, - struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, - int tclass_num) +mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) { - int err; - - if (mlxsw_sp_qdisc->handle != handle) - return 0; - - err = mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, tclass_num); - mlxsw_sp_qdisc->handle = TC_H_UNSPEC; - mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_NO_QDISC; - - return err; + return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, + mlxsw_sp_qdisc->tclass_num); } static int -mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, - struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, - int tclass_num, - struct tc_red_qopt_offload_params *p) +mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + void *params) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u32 min, max; - u64 prob; - int err = 0; + struct tc_red_qopt_offload_params *p = params; if (p->min > p->max) { dev_err(mlxsw_sp->bus_info->dev, "spectrum: RED: min %u is bigger then max %u\n", p->min, p->max); - goto err_bad_param; + return -EINVAL; } if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) { dev_err(mlxsw_sp->bus_info->dev, "spectrum: RED: max value %u is too big\n", p->max); - goto err_bad_param; + return -EINVAL; } if (p->min == 0 || p->max == 0) { dev_err(mlxsw_sp->bus_info->dev, "spectrum: RED: 0 value is illegal for min and max\n"); - goto err_bad_param; + return -EINVAL; } + return 0; +} + +static int +mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + void *params) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct tc_red_qopt_offload_params *p = params; + u8 tclass_num = mlxsw_sp_qdisc->tclass_num; + u32 min, max; + u64 prob; /* calculate probability in percentage */ prob = p->probability; @@ -162,116 +304,309 @@ mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, prob = DIV_ROUND_UP(prob, 1 << 16); min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min); max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max); - err = mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min, - max, prob, p->is_ecn); - if (err) - goto err_config; - - mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_RED; - if (mlxsw_sp_qdisc->handle != handle) - mlxsw_sp_setup_tc_qdisc_clean_stats(mlxsw_sp_port, - mlxsw_sp_qdisc, - tclass_num); + return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min, + max, prob, p->is_ecn); +} - mlxsw_sp_qdisc->handle = handle; - return 0; +static void +mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + void *params) +{ + struct tc_red_qopt_offload_params *p = params; + u64 backlog; -err_bad_param: - err = -EINVAL; -err_config: - mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, mlxsw_sp_qdisc->handle, - mlxsw_sp_qdisc, tclass_num); - return err; + backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp, + mlxsw_sp_qdisc->stats_base.backlog); + p->qstats->backlog -= backlog; } static int -mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, +mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, - int tclass_num, struct red_stats *res) + void *xstats_ptr) { - struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base; + struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red; + u8 tclass_num = mlxsw_sp_qdisc->tclass_num; struct mlxsw_sp_port_xstats *xstats; - - if (mlxsw_sp_qdisc->handle != handle || - mlxsw_sp_qdisc->type != MLXSW_SP_QDISC_RED) - return -EOPNOTSUPP; + struct red_stats *res = xstats_ptr; + int early_drops, marks, pdrops; xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; - res->prob_drop = xstats->wred_drop[tclass_num] - xstats_base->prob_drop; - res->prob_mark = xstats->ecn - xstats_base->prob_mark; - res->pdrop = xstats->tail_drop[tclass_num] - xstats_base->pdrop; + early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop; + marks = xstats->ecn - xstats_base->prob_mark; + pdrops = xstats->tail_drop[tclass_num] - xstats_base->pdrop; + + res->pdrop += pdrops; + res->prob_drop += early_drops; + res->prob_mark += marks; + + xstats_base->pdrop += pdrops; + xstats_base->prob_drop += early_drops; + xstats_base->prob_mark += marks; return 0; } static int -mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, +mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, - int tclass_num, - struct tc_red_qopt_offload_stats *res) + struct tc_qopt_offload_stats *stats_ptr) { - u64 tx_bytes, tx_packets, overlimits, drops; + u64 tx_bytes, tx_packets, overlimits, drops, backlog; + u8 tclass_num = mlxsw_sp_qdisc->tclass_num; + struct mlxsw_sp_qdisc_stats *stats_base; struct mlxsw_sp_port_xstats *xstats; struct rtnl_link_stats64 *stats; - if (mlxsw_sp_qdisc->handle != handle || - mlxsw_sp_qdisc->type != MLXSW_SP_QDISC_RED) - return -EOPNOTSUPP; - xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; stats = &mlxsw_sp_port->periodic_hw_stats.stats; + stats_base = &mlxsw_sp_qdisc->stats_base; - tx_bytes = stats->tx_bytes - mlxsw_sp_qdisc->tx_bytes; - tx_packets = stats->tx_packets - mlxsw_sp_qdisc->tx_packets; + tx_bytes = stats->tx_bytes - stats_base->tx_bytes; + tx_packets = stats->tx_packets - stats_base->tx_packets; overlimits = xstats->wred_drop[tclass_num] + xstats->ecn - - mlxsw_sp_qdisc->overlimits; + stats_base->overlimits; drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] - - mlxsw_sp_qdisc->drops; - - _bstats_update(res->bstats, tx_bytes, tx_packets); - res->qstats->overlimits += overlimits; - res->qstats->drops += drops; - res->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp, - xstats->backlog[tclass_num]); - - mlxsw_sp_qdisc->drops += drops; - mlxsw_sp_qdisc->overlimits += overlimits; - mlxsw_sp_qdisc->tx_bytes += tx_bytes; - mlxsw_sp_qdisc->tx_packets += tx_packets; + stats_base->drops; + backlog = xstats->backlog[tclass_num]; + + _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets); + stats_ptr->qstats->overlimits += overlimits; + stats_ptr->qstats->drops += drops; + stats_ptr->qstats->backlog += + mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp, + backlog) - + mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp, + stats_base->backlog); + + stats_base->backlog = backlog; + stats_base->drops += drops; + stats_base->overlimits += overlimits; + stats_base->tx_bytes += tx_bytes; + stats_base->tx_packets += tx_packets; return 0; } #define MLXSW_SP_PORT_DEFAULT_TCLASS 0 +static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = { + .type = MLXSW_SP_QDISC_RED, + .check_params = mlxsw_sp_qdisc_red_check_params, + .replace = mlxsw_sp_qdisc_red_replace, + .unoffload = mlxsw_sp_qdisc_red_unoffload, + .destroy = mlxsw_sp_qdisc_red_destroy, + .get_stats = mlxsw_sp_qdisc_get_red_stats, + .get_xstats = mlxsw_sp_qdisc_get_red_xstats, + .clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats, +}; + int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port, struct tc_red_qopt_offload *p) { struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; - int tclass_num; if (p->parent != TC_H_ROOT) return -EOPNOTSUPP; - mlxsw_sp_qdisc = &mlxsw_sp_port->root_qdisc; - tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS; + mlxsw_sp_qdisc = mlxsw_sp_port->root_qdisc; + + if (p->command == TC_RED_REPLACE) + return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle, + mlxsw_sp_qdisc, + &mlxsw_sp_qdisc_ops_red, + &p->set); + + if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle, + MLXSW_SP_QDISC_RED)) + return -EOPNOTSUPP; switch (p->command) { - case TC_RED_REPLACE: - return mlxsw_sp_qdisc_red_replace(mlxsw_sp_port, p->handle, - mlxsw_sp_qdisc, tclass_num, - &p->set); case TC_RED_DESTROY: - return mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, p->handle, - mlxsw_sp_qdisc, tclass_num); + return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc); case TC_RED_XSTATS: - return mlxsw_sp_qdisc_get_red_xstats(mlxsw_sp_port, p->handle, - mlxsw_sp_qdisc, tclass_num, - p->xstats); + return mlxsw_sp_qdisc_get_xstats(mlxsw_sp_port, mlxsw_sp_qdisc, + p->xstats); case TC_RED_STATS: - return mlxsw_sp_qdisc_get_red_stats(mlxsw_sp_port, p->handle, - mlxsw_sp_qdisc, tclass_num, - &p->stats); + return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc, + &p->stats); default: return -EOPNOTSUPP; } } + +static int +mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) +{ + int i; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, + MLXSW_SP_PORT_DEFAULT_TCLASS); + + return 0; +} + +static int +mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + void *params) +{ + struct tc_prio_qopt_offload_params *p = params; + + if (p->bands > IEEE_8021QAZ_MAX_TCS) + return -EOPNOTSUPP; + + return 0; +} + +static int +mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + void *params) +{ + struct tc_prio_qopt_offload_params *p = params; + int tclass, i; + int err; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(p->priomap[i]); + err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, tclass); + if (err) + return err; + } + + return 0; +} + +static void +mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + void *params) +{ + struct tc_prio_qopt_offload_params *p = params; + u64 backlog; + + backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp, + mlxsw_sp_qdisc->stats_base.backlog); + p->qstats->backlog -= backlog; +} + +static int +mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + struct tc_qopt_offload_stats *stats_ptr) +{ + u64 tx_bytes, tx_packets, drops = 0, backlog = 0; + struct mlxsw_sp_qdisc_stats *stats_base; + struct mlxsw_sp_port_xstats *xstats; + struct rtnl_link_stats64 *stats; + int i; + + xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; + stats = &mlxsw_sp_port->periodic_hw_stats.stats; + stats_base = &mlxsw_sp_qdisc->stats_base; + + tx_bytes = stats->tx_bytes - stats_base->tx_bytes; + tx_packets = stats->tx_packets - stats_base->tx_packets; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + drops += xstats->tail_drop[i]; + backlog += xstats->backlog[i]; + } + drops = drops - stats_base->drops; + + _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets); + stats_ptr->qstats->drops += drops; + stats_ptr->qstats->backlog += + mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp, + backlog) - + mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp, + stats_base->backlog); + stats_base->backlog = backlog; + stats_base->drops += drops; + stats_base->tx_bytes += tx_bytes; + stats_base->tx_packets += tx_packets; + return 0; +} + +static void +mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) +{ + struct mlxsw_sp_qdisc_stats *stats_base; + struct mlxsw_sp_port_xstats *xstats; + struct rtnl_link_stats64 *stats; + int i; + + xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; + stats = &mlxsw_sp_port->periodic_hw_stats.stats; + stats_base = &mlxsw_sp_qdisc->stats_base; + + stats_base->tx_packets = stats->tx_packets; + stats_base->tx_bytes = stats->tx_bytes; + + stats_base->drops = 0; + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + stats_base->drops += xstats->tail_drop[i]; + + mlxsw_sp_qdisc->stats_base.backlog = 0; +} + +static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = { + .type = MLXSW_SP_QDISC_PRIO, + .check_params = mlxsw_sp_qdisc_prio_check_params, + .replace = mlxsw_sp_qdisc_prio_replace, + .unoffload = mlxsw_sp_qdisc_prio_unoffload, + .destroy = mlxsw_sp_qdisc_prio_destroy, + .get_stats = mlxsw_sp_qdisc_get_prio_stats, + .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats, +}; + +int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_prio_qopt_offload *p) +{ + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; + + if (p->parent != TC_H_ROOT) + return -EOPNOTSUPP; + + mlxsw_sp_qdisc = mlxsw_sp_port->root_qdisc; + if (p->command == TC_PRIO_REPLACE) + return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle, + mlxsw_sp_qdisc, + &mlxsw_sp_qdisc_ops_prio, + &p->replace_params); + + if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle, + MLXSW_SP_QDISC_PRIO)) + return -EOPNOTSUPP; + + switch (p->command) { + case TC_PRIO_DESTROY: + return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc); + case TC_PRIO_STATS: + return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc, + &p->stats); + default: + return -EOPNOTSUPP; + } +} + +int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + mlxsw_sp_port->root_qdisc = kzalloc(sizeof(*mlxsw_sp_port->root_qdisc), + GFP_KERNEL); + if (!mlxsw_sp_port->root_qdisc) + return -ENOMEM; + + mlxsw_sp_port->root_qdisc->tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS; + + return 0; +} + +void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port) +{ + kfree(mlxsw_sp_port->root_qdisc); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 7042c855a5d6..f0b25baba09a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -71,6 +71,7 @@ #include "spectrum_mr_tcam.h" #include "spectrum_router.h" +struct mlxsw_sp_fib; struct mlxsw_sp_vr; struct mlxsw_sp_lpm_tree; struct mlxsw_sp_rif_ops; @@ -84,6 +85,8 @@ struct mlxsw_sp_router { struct rhashtable nexthop_ht; struct list_head nexthop_list; struct { + /* One tree for each protocol: IPv4 and IPv6 */ + struct mlxsw_sp_lpm_tree *proto_trees[2]; struct mlxsw_sp_lpm_tree *trees; unsigned int tree_count; } lpm; @@ -162,6 +165,15 @@ struct mlxsw_sp_rif_ops { struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif); }; +static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree); +static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_lpm_tree *lpm_tree); +static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_fib *fib, + u8 tree_id); +static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_fib *fib); + static unsigned int * mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif, enum mlxsw_sp_rif_counter_dir dir) @@ -349,14 +361,6 @@ mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1, return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1)); } -static bool -mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage) -{ - struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } }; - - return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none); -} - static void mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1, struct mlxsw_sp_prefix_usage *prefix_usage2) @@ -398,7 +402,6 @@ enum mlxsw_sp_fib_entry_type { }; struct mlxsw_sp_nexthop_group; -struct mlxsw_sp_fib; struct mlxsw_sp_fib_node { struct list_head entry_list; @@ -445,6 +448,7 @@ struct mlxsw_sp_lpm_tree { u8 id; /* tree ID */ unsigned int ref_count; enum mlxsw_sp_l3proto proto; + unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT]; struct mlxsw_sp_prefix_usage prefix_usage; }; @@ -453,8 +457,6 @@ struct mlxsw_sp_fib { struct list_head node_list; struct mlxsw_sp_vr *vr; struct mlxsw_sp_lpm_tree *lpm_tree; - unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT]; - struct mlxsw_sp_prefix_usage prefix_usage; enum mlxsw_sp_l3proto proto; }; @@ -469,12 +471,15 @@ struct mlxsw_sp_vr { static const struct rhashtable_params mlxsw_sp_fib_ht_params; -static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr, +static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_vr *vr, enum mlxsw_sp_l3proto proto) { + struct mlxsw_sp_lpm_tree *lpm_tree; struct mlxsw_sp_fib *fib; int err; + lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto]; fib = kzalloc(sizeof(*fib), GFP_KERNEL); if (!fib) return ERR_PTR(-ENOMEM); @@ -484,17 +489,26 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr, INIT_LIST_HEAD(&fib->node_list); fib->proto = proto; fib->vr = vr; + fib->lpm_tree = lpm_tree; + mlxsw_sp_lpm_tree_hold(lpm_tree); + err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id); + if (err) + goto err_lpm_tree_bind; return fib; +err_lpm_tree_bind: + mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); err_rhashtable_init: kfree(fib); return ERR_PTR(err); } -static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib) +static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib *fib) { + mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib); + mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree); WARN_ON(!list_empty(&fib->node_list)); - WARN_ON(fib->lpm_tree); rhashtable_destroy(&fib->ht); kfree(fib); } @@ -581,6 +595,9 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp, goto err_left_struct_set; memcpy(&lpm_tree->prefix_usage, prefix_usage, sizeof(lpm_tree->prefix_usage)); + memset(&lpm_tree->prefix_ref_count, 0, + sizeof(lpm_tree->prefix_ref_count)); + lpm_tree->ref_count = 1; return lpm_tree; err_left_struct_set: @@ -607,8 +624,10 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp, if (lpm_tree->ref_count != 0 && lpm_tree->proto == proto && mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage, - prefix_usage)) + prefix_usage)) { + mlxsw_sp_lpm_tree_hold(lpm_tree); return lpm_tree; + } } return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto); } @@ -629,9 +648,10 @@ static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp) { + struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } }; struct mlxsw_sp_lpm_tree *lpm_tree; u64 max_trees; - int i; + int err, i; if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES)) return -EIO; @@ -649,11 +669,42 @@ static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp) lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN; } + lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage, + MLXSW_SP_L3_PROTO_IPV4); + if (IS_ERR(lpm_tree)) { + err = PTR_ERR(lpm_tree); + goto err_ipv4_tree_get; + } + mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree; + + lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage, + MLXSW_SP_L3_PROTO_IPV6); + if (IS_ERR(lpm_tree)) { + err = PTR_ERR(lpm_tree); + goto err_ipv6_tree_get; + } + mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree; + return 0; + +err_ipv6_tree_get: + lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4]; + mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); +err_ipv4_tree_get: + kfree(mlxsw_sp->router->lpm.trees); + return err; } static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp) { + struct mlxsw_sp_lpm_tree *lpm_tree; + + lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6]; + mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); + + lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4]; + mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); + kfree(mlxsw_sp->router->lpm.trees); } @@ -745,10 +796,10 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers"); return ERR_PTR(-EBUSY); } - vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4); + vr->fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4); if (IS_ERR(vr->fib4)) return ERR_CAST(vr->fib4); - vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6); + vr->fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6); if (IS_ERR(vr->fib6)) { err = PTR_ERR(vr->fib6); goto err_fib6_create; @@ -763,21 +814,22 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, return vr; err_mr_table_create: - mlxsw_sp_fib_destroy(vr->fib6); + mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6); vr->fib6 = NULL; err_fib6_create: - mlxsw_sp_fib_destroy(vr->fib4); + mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4); vr->fib4 = NULL; return ERR_PTR(err); } -static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr) +static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_vr *vr) { mlxsw_sp_mr_table_destroy(vr->mr4_table); vr->mr4_table = NULL; - mlxsw_sp_fib_destroy(vr->fib6); + mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6); vr->fib6 = NULL; - mlxsw_sp_fib_destroy(vr->fib4); + mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4); vr->fib4 = NULL; } @@ -793,12 +845,12 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, return vr; } -static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr) +static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr) { if (!vr->rif_count && list_empty(&vr->fib4->node_list) && list_empty(&vr->fib6->node_list) && mlxsw_sp_mr_table_empty(vr->mr4_table)) - mlxsw_sp_vr_destroy(vr); + mlxsw_sp_vr_destroy(mlxsw_sp, vr); } static bool @@ -809,7 +861,7 @@ mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr, if (!mlxsw_sp_vr_is_used(vr)) return false; - if (fib->lpm_tree && fib->lpm_tree->id == tree_id) + if (fib->lpm_tree->id == tree_id) return true; return false; } @@ -839,14 +891,13 @@ static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib *fib, struct mlxsw_sp_lpm_tree *new_tree) { - struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree; enum mlxsw_sp_l3proto proto = fib->proto; + struct mlxsw_sp_lpm_tree *old_tree; u8 old_id, new_id = new_tree->id; struct mlxsw_sp_vr *vr; int i, err; - if (!old_tree) - goto no_replace; + old_tree = mlxsw_sp->router->lpm.proto_trees[proto]; old_id = old_tree->id; for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { @@ -860,6 +911,11 @@ static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp, goto err_tree_replace; } + memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count, + sizeof(new_tree->prefix_ref_count)); + mlxsw_sp->router->lpm.proto_trees[proto] = new_tree; + mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree); + return 0; err_tree_replace: @@ -871,36 +927,6 @@ err_tree_replace: old_tree); } return err; - -no_replace: - fib->lpm_tree = new_tree; - mlxsw_sp_lpm_tree_hold(new_tree); - err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id); - if (err) { - mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree); - fib->lpm_tree = NULL; - return err; - } - return 0; -} - -static void -mlxsw_sp_vrs_prefixes(struct mlxsw_sp *mlxsw_sp, - enum mlxsw_sp_l3proto proto, - struct mlxsw_sp_prefix_usage *req_prefix_usage) -{ - int i; - - for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { - struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i]; - struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto); - unsigned char prefix; - - if (!mlxsw_sp_vr_is_used(vr)) - continue; - mlxsw_sp_prefix_usage_for_each(prefix, &fib->prefix_usage) - mlxsw_sp_prefix_usage_set(req_prefix_usage, prefix); - } } static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp) @@ -2630,7 +2656,8 @@ struct mlxsw_sp_nexthop_group_cmp_arg { static bool mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp, - const struct in6_addr *gw, int ifindex) + const struct in6_addr *gw, int ifindex, + int weight) { int i; @@ -2638,7 +2665,7 @@ mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp, const struct mlxsw_sp_nexthop *nh; nh = &nh_grp->nexthops[i]; - if (nh->ifindex == ifindex && + if (nh->ifindex == ifindex && nh->nh_weight == weight && ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr)) return true; } @@ -2657,11 +2684,13 @@ mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp, list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { struct in6_addr *gw; - int ifindex; + int ifindex, weight; ifindex = mlxsw_sp_rt6->rt->dst.dev->ifindex; + weight = mlxsw_sp_rt6->rt->rt6i_nh_weight; gw = &mlxsw_sp_rt6->rt->rt6i_gateway; - if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex)) + if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex, + weight)) return false; } @@ -4192,68 +4221,66 @@ mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node, } static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib *fib, struct mlxsw_sp_fib_node *fib_node) { - struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } }; + struct mlxsw_sp_prefix_usage req_prefix_usage; + struct mlxsw_sp_fib *fib = fib_node->fib; struct mlxsw_sp_lpm_tree *lpm_tree; int err; - /* Since the tree is shared between all virtual routers we must - * make sure it contains all the required prefix lengths. This - * can be computed by either adding the new prefix length to the - * existing prefix usage of a bound tree, or by aggregating the - * prefix lengths across all virtual routers and adding the new - * one as well. - */ - if (fib->lpm_tree) - mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, - &fib->lpm_tree->prefix_usage); - else - mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage); - mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len); + lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto]; + if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0) + goto out; + mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage); + mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len); lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage, fib->proto); if (IS_ERR(lpm_tree)) return PTR_ERR(lpm_tree); - if (fib->lpm_tree && fib->lpm_tree->id == lpm_tree->id) - return 0; - err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree); if (err) - return err; + goto err_lpm_tree_replace; +out: + lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++; return 0; -} -static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib *fib) -{ - if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) - return; - mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib); - mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree); - fib->lpm_tree = NULL; +err_lpm_tree_replace: + mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); + return err; } -static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node) +static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_node *fib_node) { - unsigned char prefix_len = fib_node->key.prefix_len; + struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree; + struct mlxsw_sp_prefix_usage req_prefix_usage; struct mlxsw_sp_fib *fib = fib_node->fib; + int err; - if (fib->prefix_ref_count[prefix_len]++ == 0) - mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len); -} + if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0) + return; + /* Try to construct a new LPM tree from the current prefix usage + * minus the unused one. If we fail, continue using the old one. + */ + mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage); + mlxsw_sp_prefix_usage_clear(&req_prefix_usage, + fib_node->key.prefix_len); + lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage, + fib->proto); + if (IS_ERR(lpm_tree)) + return; -static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node) -{ - unsigned char prefix_len = fib_node->key.prefix_len; - struct mlxsw_sp_fib *fib = fib_node->fib; + err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree); + if (err) + goto err_lpm_tree_replace; - if (--fib->prefix_ref_count[prefix_len] == 0) - mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len); + return; + +err_lpm_tree_replace: + mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); } static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp, @@ -4267,12 +4294,10 @@ static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp, return err; fib_node->fib = fib; - err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib, fib_node); + err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node); if (err) goto err_fib_lpm_tree_link; - mlxsw_sp_fib_node_prefix_inc(fib_node); - return 0; err_fib_lpm_tree_link: @@ -4286,8 +4311,7 @@ static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_fib *fib = fib_node->fib; - mlxsw_sp_fib_node_prefix_dec(fib_node); - mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib); + mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node); fib_node->fib = NULL; mlxsw_sp_fib_node_remove(fib, fib_node); } @@ -4326,7 +4350,7 @@ mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr, err_fib_node_init: mlxsw_sp_fib_node_destroy(fib_node); err_fib_node_create: - mlxsw_sp_vr_put(vr); + mlxsw_sp_vr_put(mlxsw_sp, vr); return ERR_PTR(err); } @@ -4339,7 +4363,7 @@ static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp, return; mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node); mlxsw_sp_fib_node_destroy(fib_node); - mlxsw_sp_vr_put(vr); + mlxsw_sp_vr_put(mlxsw_sp, vr); } static struct mlxsw_sp_fib4_entry * @@ -4764,7 +4788,7 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp, struct net_device *dev = rt->dst.dev; nh->nh_grp = nh_grp; - nh->nh_weight = 1; + nh->nh_weight = rt->rt6i_nh_weight; memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr)); mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh); @@ -5362,7 +5386,7 @@ static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp, return; mlxsw_sp_mr_route4_del(vr->mr4_table, men_info->mfc); - mlxsw_sp_vr_put(vr); + mlxsw_sp_vr_put(mlxsw_sp, vr); } static int @@ -5399,7 +5423,7 @@ mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp, return; mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index); - mlxsw_sp_vr_put(vr); + mlxsw_sp_vr_put(mlxsw_sp, vr); } static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) @@ -6048,7 +6072,7 @@ err_fid_get: err_rif_alloc: err_rif_index_alloc: vr->rif_count--; - mlxsw_sp_vr_put(vr); + mlxsw_sp_vr_put(mlxsw_sp, vr); return ERR_PTR(err); } @@ -6071,7 +6095,7 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif) mlxsw_sp_fid_put(fid); kfree(rif); vr->rif_count--; - mlxsw_sp_vr_put(vr); + mlxsw_sp_vr_put(mlxsw_sp, vr); } static void @@ -6861,7 +6885,7 @@ mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif) return 0; err_loopback_op: - mlxsw_sp_vr_put(ul_vr); + mlxsw_sp_vr_put(mlxsw_sp, ul_vr); return err; } @@ -6875,7 +6899,7 @@ static void mlxsw_sp_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif) mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, false); --ul_vr->rif_count; - mlxsw_sp_vr_put(ul_vr); + mlxsw_sp_vr_put(mlxsw_sp, ul_vr); } static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = { @@ -7010,6 +7034,24 @@ static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp) } #endif +static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp) +{ + char rdpm_pl[MLXSW_REG_RDPM_LEN]; + unsigned int i; + + MLXSW_REG_ZERO(rdpm, rdpm_pl); + + /* HW is determining switch priority based on DSCP-bits, but the + * kernel is still doing that based on the ToS. Since there's a + * mismatch in bits we need to make sure to translate the right + * value ToS would observe, skipping the 2 least-significant ECN bits. + */ + for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++) + mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2)); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl); +} + static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) { char rgcr_pl[MLXSW_REG_RGCR_LEN]; @@ -7022,6 +7064,7 @@ static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) mlxsw_reg_rgcr_pack(rgcr_pl, true, true); mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs); + mlxsw_reg_rgcr_usp_set(rgcr_pl, true); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl); if (err) return err; @@ -7097,6 +7140,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) if (err) goto err_mp_hash_init; + err = mlxsw_sp_dscp_init(mlxsw_sp); + if (err) + goto err_dscp_init; + mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event; err = register_fib_notifier(&mlxsw_sp->router->fib_nb, mlxsw_sp_router_fib_dump_flush); @@ -7106,6 +7153,7 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) return 0; err_register_fib_notifier: +err_dscp_init: err_mp_hash_init: unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb); err_register_netevent_notifier: diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index 2fe96f1f3fe5..bd6e9014bc74 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c @@ -28,6 +28,7 @@ #include <linux/spi/spi.h> #include <linux/gpio.h> #include <linux/of_gpio.h> +#include <linux/of_net.h> #include "ks8851.h" @@ -407,15 +408,23 @@ static void ks8851_read_mac_addr(struct net_device *dev) * @ks: The device structure * * Get or create the initial mac address for the device and then set that - * into the station address register. If there is an EEPROM present, then + * into the station address register. A mac address supplied in the device + * tree takes precedence. Otherwise, if there is an EEPROM present, then * we try that. If no valid mac address is found we use eth_random_addr() * to create a new one. */ static void ks8851_init_mac(struct ks8851_net *ks) { struct net_device *dev = ks->netdev; + const u8 *mac_addr; + + mac_addr = of_get_mac_address(ks->spidev->dev.of_node); + if (mac_addr) { + memcpy(dev->dev_addr, mac_addr, ETH_ALEN); + ks8851_write_mac_addr(dev); + return; + } - /* first, try reading what we've got already */ if (ks->rc_ccr & CCR_EEPROM) { ks8851_read_mac_addr(dev); if (is_valid_ether_addr(dev->dev_addr)) diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile index 24c4408b5734..d5866d708dfa 100644 --- a/drivers/net/ethernet/netronome/nfp/Makefile +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -22,6 +22,8 @@ nfp-objs := \ nfp_hwmon.o \ nfp_main.o \ nfp_net_common.o \ + nfp_net_ctrl.o \ + nfp_net_debugdump.o \ nfp_net_ethtool.o \ nfp_net_main.o \ nfp_net_repr.o \ @@ -43,6 +45,7 @@ endif ifeq ($(CONFIG_BPF_SYSCALL),y) nfp-objs += \ + bpf/cmsg.o \ bpf/main.o \ bpf/offload.o \ bpf/verifier.o \ diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c new file mode 100644 index 000000000000..80d3aa0fc9d3 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c @@ -0,0 +1,453 @@ +/* + * Copyright (C) 2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/bpf.h> +#include <linux/bitops.h> +#include <linux/bug.h> +#include <linux/jiffies.h> +#include <linux/skbuff.h> +#include <linux/wait.h> + +#include "../nfp_app.h" +#include "../nfp_net.h" +#include "fw.h" +#include "main.h" + +#define cmsg_warn(bpf, msg...) nn_dp_warn(&(bpf)->app->ctrl->dp, msg) + +#define NFP_BPF_TAG_ALLOC_SPAN (U16_MAX / 4) + +static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf) +{ + u16 used_tags; + + used_tags = bpf->tag_alloc_next - bpf->tag_alloc_last; + + return used_tags > NFP_BPF_TAG_ALLOC_SPAN; +} + +static int nfp_bpf_alloc_tag(struct nfp_app_bpf *bpf) +{ + /* All FW communication for BPF is request-reply. To make sure we + * don't reuse the message ID too early after timeout - limit the + * number of requests in flight. + */ + if (nfp_bpf_all_tags_busy(bpf)) { + cmsg_warn(bpf, "all FW request contexts busy!\n"); + return -EAGAIN; + } + + WARN_ON(__test_and_set_bit(bpf->tag_alloc_next, bpf->tag_allocator)); + return bpf->tag_alloc_next++; +} + +static void nfp_bpf_free_tag(struct nfp_app_bpf *bpf, u16 tag) +{ + WARN_ON(!__test_and_clear_bit(tag, bpf->tag_allocator)); + + while (!test_bit(bpf->tag_alloc_last, bpf->tag_allocator) && + bpf->tag_alloc_last != bpf->tag_alloc_next) + bpf->tag_alloc_last++; +} + +static struct sk_buff * +nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size) +{ + struct sk_buff *skb; + + skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL); + skb_put(skb, size); + + return skb; +} + +static struct sk_buff * +nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n) +{ + unsigned int size; + + size = sizeof(struct cmsg_req_map_op); + size += sizeof(struct cmsg_key_value_pair) * n; + + return nfp_bpf_cmsg_alloc(bpf, size); +} + +static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb) +{ + struct cmsg_hdr *hdr; + + hdr = (struct cmsg_hdr *)skb->data; + + return be16_to_cpu(hdr->tag); +} + +static struct sk_buff *__nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag) +{ + unsigned int msg_tag; + struct sk_buff *skb; + + skb_queue_walk(&bpf->cmsg_replies, skb) { + msg_tag = nfp_bpf_cmsg_get_tag(skb); + if (msg_tag == tag) { + nfp_bpf_free_tag(bpf, tag); + __skb_unlink(skb, &bpf->cmsg_replies); + return skb; + } + } + + return NULL; +} + +static struct sk_buff *nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag) +{ + struct sk_buff *skb; + + nfp_ctrl_lock(bpf->app->ctrl); + skb = __nfp_bpf_reply(bpf, tag); + nfp_ctrl_unlock(bpf->app->ctrl); + + return skb; +} + +static struct sk_buff *nfp_bpf_reply_drop_tag(struct nfp_app_bpf *bpf, u16 tag) +{ + struct sk_buff *skb; + + nfp_ctrl_lock(bpf->app->ctrl); + skb = __nfp_bpf_reply(bpf, tag); + if (!skb) + nfp_bpf_free_tag(bpf, tag); + nfp_ctrl_unlock(bpf->app->ctrl); + + return skb; +} + +static struct sk_buff * +nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf *bpf, enum nfp_bpf_cmsg_type type, + int tag) +{ + struct sk_buff *skb; + int i, err; + + for (i = 0; i < 50; i++) { + udelay(4); + skb = nfp_bpf_reply(bpf, tag); + if (skb) + return skb; + } + + err = wait_event_interruptible_timeout(bpf->cmsg_wq, + skb = nfp_bpf_reply(bpf, tag), + msecs_to_jiffies(5000)); + /* We didn't get a response - try last time and atomically drop + * the tag even if no response is matched. + */ + if (!skb) + skb = nfp_bpf_reply_drop_tag(bpf, tag); + if (err < 0) { + cmsg_warn(bpf, "%s waiting for response to 0x%02x: %d\n", + err == ERESTARTSYS ? "interrupted" : "error", + type, err); + return ERR_PTR(err); + } + if (!skb) { + cmsg_warn(bpf, "timeout waiting for response to 0x%02x\n", + type); + return ERR_PTR(-ETIMEDOUT); + } + + return skb; +} + +static struct sk_buff * +nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb, + enum nfp_bpf_cmsg_type type, unsigned int reply_size) +{ + struct cmsg_hdr *hdr; + int tag; + + nfp_ctrl_lock(bpf->app->ctrl); + tag = nfp_bpf_alloc_tag(bpf); + if (tag < 0) { + nfp_ctrl_unlock(bpf->app->ctrl); + dev_kfree_skb_any(skb); + return ERR_PTR(tag); + } + + hdr = (void *)skb->data; + hdr->ver = CMSG_MAP_ABI_VERSION; + hdr->type = type; + hdr->tag = cpu_to_be16(tag); + + __nfp_app_ctrl_tx(bpf->app, skb); + + nfp_ctrl_unlock(bpf->app->ctrl); + + skb = nfp_bpf_cmsg_wait_reply(bpf, type, tag); + if (IS_ERR(skb)) + return skb; + + hdr = (struct cmsg_hdr *)skb->data; + /* 0 reply_size means caller will do the validation */ + if (reply_size && skb->len != reply_size) { + cmsg_warn(bpf, "cmsg drop - wrong size %d != %d!\n", + skb->len, reply_size); + goto err_free; + } + if (hdr->type != __CMSG_REPLY(type)) { + cmsg_warn(bpf, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n", + hdr->type, __CMSG_REPLY(type)); + goto err_free; + } + + return skb; +err_free: + dev_kfree_skb_any(skb); + return ERR_PTR(-EIO); +} + +static int +nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf, + struct cmsg_reply_map_simple *reply) +{ + static const int res_table[] = { + [CMSG_RC_SUCCESS] = 0, + [CMSG_RC_ERR_MAP_FD] = -EBADFD, + [CMSG_RC_ERR_MAP_NOENT] = -ENOENT, + [CMSG_RC_ERR_MAP_ERR] = -EINVAL, + [CMSG_RC_ERR_MAP_PARSE] = -EIO, + [CMSG_RC_ERR_MAP_EXIST] = -EEXIST, + [CMSG_RC_ERR_MAP_NOMEM] = -ENOMEM, + [CMSG_RC_ERR_MAP_E2BIG] = -E2BIG, + }; + u32 rc; + + rc = be32_to_cpu(reply->rc); + if (rc >= ARRAY_SIZE(res_table)) { + cmsg_warn(bpf, "FW responded with invalid status: %u\n", rc); + return -EIO; + } + + return res_table[rc]; +} + +long long int +nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map) +{ + struct cmsg_reply_map_alloc_tbl *reply; + struct cmsg_req_map_alloc_tbl *req; + struct sk_buff *skb; + u32 tid; + int err; + + skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req)); + if (!skb) + return -ENOMEM; + + req = (void *)skb->data; + req->key_size = cpu_to_be32(map->key_size); + req->value_size = cpu_to_be32(map->value_size); + req->max_entries = cpu_to_be32(map->max_entries); + req->map_type = cpu_to_be32(map->map_type); + req->map_flags = 0; + + skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_ALLOC, + sizeof(*reply)); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + reply = (void *)skb->data; + err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr); + if (err) + goto err_free; + + tid = be32_to_cpu(reply->tid); + dev_consume_skb_any(skb); + + return tid; +err_free: + dev_kfree_skb_any(skb); + return err; +} + +void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map) +{ + struct cmsg_reply_map_free_tbl *reply; + struct cmsg_req_map_free_tbl *req; + struct sk_buff *skb; + int err; + + skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req)); + if (!skb) { + cmsg_warn(bpf, "leaking map - failed to allocate msg\n"); + return; + } + + req = (void *)skb->data; + req->tid = cpu_to_be32(nfp_map->tid); + + skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_FREE, + sizeof(*reply)); + if (IS_ERR(skb)) { + cmsg_warn(bpf, "leaking map - I/O error\n"); + return; + } + + reply = (void *)skb->data; + err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr); + if (err) + cmsg_warn(bpf, "leaking map - FW responded with: %d\n", err); + + dev_consume_skb_any(skb); +} + +static int +nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, + enum nfp_bpf_cmsg_type op, + u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value) +{ + struct nfp_bpf_map *nfp_map = offmap->dev_priv; + struct nfp_app_bpf *bpf = nfp_map->bpf; + struct bpf_map *map = &offmap->map; + struct cmsg_reply_map_op *reply; + struct cmsg_req_map_op *req; + struct sk_buff *skb; + int err; + + /* FW messages have no space for more than 32 bits of flags */ + if (flags >> 32) + return -EOPNOTSUPP; + + skb = nfp_bpf_cmsg_map_req_alloc(bpf, 1); + if (!skb) + return -ENOMEM; + + req = (void *)skb->data; + req->tid = cpu_to_be32(nfp_map->tid); + req->count = cpu_to_be32(1); + req->flags = cpu_to_be32(flags); + + /* Copy inputs */ + if (key) + memcpy(&req->elem[0].key, key, map->key_size); + if (value) + memcpy(&req->elem[0].value, value, map->value_size); + + skb = nfp_bpf_cmsg_communicate(bpf, skb, op, + sizeof(*reply) + sizeof(*reply->elem)); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + reply = (void *)skb->data; + err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr); + if (err) + goto err_free; + + /* Copy outputs */ + if (out_key) + memcpy(out_key, &reply->elem[0].key, map->key_size); + if (out_value) + memcpy(out_value, &reply->elem[0].value, map->value_size); + + dev_consume_skb_any(skb); + + return 0; +err_free: + dev_kfree_skb_any(skb); + return err; +} + +int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap, + void *key, void *value, u64 flags) +{ + return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_UPDATE, + key, value, flags, NULL, NULL); +} + +int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key) +{ + return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_DELETE, + key, NULL, 0, NULL, NULL); +} + +int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap, + void *key, void *value) +{ + return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_LOOKUP, + key, NULL, 0, NULL, value); +} + +int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap, + void *next_key) +{ + return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETFIRST, + NULL, NULL, 0, next_key, NULL); +} + +int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap, + void *key, void *next_key) +{ + return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETNEXT, + key, NULL, 0, next_key, NULL); +} + +void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb) +{ + struct nfp_app_bpf *bpf = app->priv; + unsigned int tag; + + if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) { + cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len); + goto err_free; + } + + nfp_ctrl_lock(bpf->app->ctrl); + + tag = nfp_bpf_cmsg_get_tag(skb); + if (unlikely(!test_bit(tag, bpf->tag_allocator))) { + cmsg_warn(bpf, "cmsg drop - no one is waiting for tag %u!\n", + tag); + goto err_unlock; + } + + __skb_queue_tail(&bpf->cmsg_replies, skb); + wake_up_interruptible_all(&bpf->cmsg_wq); + + nfp_ctrl_unlock(bpf->app->ctrl); + + return; +err_unlock: + nfp_ctrl_unlock(bpf->app->ctrl); +err_free: + dev_kfree_skb_any(skb); +} diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h new file mode 100644 index 000000000000..cfcc7bcb2c67 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h @@ -0,0 +1,157 @@ +/* + * Copyright (C) 2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef NFP_BPF_FW_H +#define NFP_BPF_FW_H 1 + +#include <linux/bitops.h> +#include <linux/types.h> + +enum bpf_cap_tlv_type { + NFP_BPF_CAP_TYPE_FUNC = 1, + NFP_BPF_CAP_TYPE_ADJUST_HEAD = 2, + NFP_BPF_CAP_TYPE_MAPS = 3, +}; + +struct nfp_bpf_cap_tlv_func { + __le32 func_id; + __le32 func_addr; +}; + +struct nfp_bpf_cap_tlv_adjust_head { + __le32 flags; + __le32 off_min; + __le32 off_max; + __le32 guaranteed_sub; + __le32 guaranteed_add; +}; + +#define NFP_BPF_ADJUST_HEAD_NO_META BIT(0) + +struct nfp_bpf_cap_tlv_maps { + __le32 types; + __le32 max_maps; + __le32 max_elems; + __le32 max_key_sz; + __le32 max_val_sz; + __le32 max_elem_sz; +}; + +/* + * Types defined for map related control messages + */ +#define CMSG_MAP_ABI_VERSION 1 + +enum nfp_bpf_cmsg_type { + CMSG_TYPE_MAP_ALLOC = 1, + CMSG_TYPE_MAP_FREE = 2, + CMSG_TYPE_MAP_LOOKUP = 3, + CMSG_TYPE_MAP_UPDATE = 4, + CMSG_TYPE_MAP_DELETE = 5, + CMSG_TYPE_MAP_GETNEXT = 6, + CMSG_TYPE_MAP_GETFIRST = 7, + __CMSG_TYPE_MAP_MAX, +}; + +#define CMSG_TYPE_MAP_REPLY_BIT 7 +#define __CMSG_REPLY(req) (BIT(CMSG_TYPE_MAP_REPLY_BIT) | (req)) + +#define CMSG_MAP_KEY_LW 16 +#define CMSG_MAP_VALUE_LW 16 + +enum nfp_bpf_cmsg_status { + CMSG_RC_SUCCESS = 0, + CMSG_RC_ERR_MAP_FD = 1, + CMSG_RC_ERR_MAP_NOENT = 2, + CMSG_RC_ERR_MAP_ERR = 3, + CMSG_RC_ERR_MAP_PARSE = 4, + CMSG_RC_ERR_MAP_EXIST = 5, + CMSG_RC_ERR_MAP_NOMEM = 6, + CMSG_RC_ERR_MAP_E2BIG = 7, +}; + +struct cmsg_hdr { + u8 type; + u8 ver; + __be16 tag; +}; + +struct cmsg_reply_map_simple { + struct cmsg_hdr hdr; + __be32 rc; +}; + +struct cmsg_req_map_alloc_tbl { + struct cmsg_hdr hdr; + __be32 key_size; /* in bytes */ + __be32 value_size; /* in bytes */ + __be32 max_entries; + __be32 map_type; + __be32 map_flags; /* reserved */ +}; + +struct cmsg_reply_map_alloc_tbl { + struct cmsg_reply_map_simple reply_hdr; + __be32 tid; +}; + +struct cmsg_req_map_free_tbl { + struct cmsg_hdr hdr; + __be32 tid; +}; + +struct cmsg_reply_map_free_tbl { + struct cmsg_reply_map_simple reply_hdr; + __be32 count; +}; + +struct cmsg_key_value_pair { + __be32 key[CMSG_MAP_KEY_LW]; + __be32 value[CMSG_MAP_VALUE_LW]; +}; + +struct cmsg_req_map_op { + struct cmsg_hdr hdr; + __be32 tid; + __be32 count; + __be32 flags; + struct cmsg_key_value_pair elem[0]; +}; + +struct cmsg_reply_map_op { + struct cmsg_reply_map_simple reply_hdr; + __be32 count; + __be32 resv; + struct cmsg_key_value_pair elem[0]; +}; +#endif diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 995e95410b11..56451edf01c2 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2016 Netronome Systems, Inc. + * Copyright (C) 2016-2017 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -33,6 +33,7 @@ #define pr_fmt(fmt) "NFP net bpf: " fmt +#include <linux/bug.h> #include <linux/kernel.h> #include <linux/bpf.h> #include <linux/filter.h> @@ -66,12 +67,6 @@ next2 = nfp_meta_next(next)) static bool -nfp_meta_has_next(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return meta->l.next != &nfp_prog->insns; -} - -static bool nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { return meta->l.prev != &nfp_prog->insns; @@ -90,19 +85,25 @@ static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn) static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog) { - return nfp_prog->start_off + nfp_prog->prog_len; + return nfp_prog->prog_len; } -static unsigned int -nfp_prog_offset_to_index(struct nfp_prog *nfp_prog, unsigned int offset) +static bool +nfp_prog_confirm_current_offset(struct nfp_prog *nfp_prog, unsigned int off) { - return offset - nfp_prog->start_off; + /* If there is a recorded error we may have dropped instructions; + * that doesn't have to be due to translator bug, and the translation + * will fail anyway, so just return OK. + */ + if (nfp_prog->error) + return true; + return !WARN_ON_ONCE(nfp_prog_current_offset(nfp_prog) != off); } /* --- Emitters --- */ static void __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, - u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync) + u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync, bool indir) { enum cmd_ctx_swap ctx; u64 insn; @@ -120,14 +121,15 @@ __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, FIELD_PREP(OP_CMD_CNT, size) | FIELD_PREP(OP_CMD_SIG, sync) | FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) | + FIELD_PREP(OP_CMD_INDIR, indir) | FIELD_PREP(OP_CMD_MODE, mode); nfp_prog_push(nfp_prog, insn); } static void -emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, - u8 mode, u8 xfer, swreg lreg, swreg rreg, u8 size, bool sync) +emit_cmd_any(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, + swreg lreg, swreg rreg, u8 size, bool sync, bool indir) { struct nfp_insn_re_regs reg; int err; @@ -148,7 +150,22 @@ emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, return; } - __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync); + __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync, + indir); +} + +static void +emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, + swreg lreg, swreg rreg, u8 size, bool sync) +{ + emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, sync, false); +} + +static void +emit_cmd_indir(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, + swreg lreg, swreg rreg, u8 size, bool sync) +{ + emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, sync, true); } static void @@ -172,22 +189,28 @@ __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip, nfp_prog_push(nfp_prog, insn); } -static void emit_br_def(struct nfp_prog *nfp_prog, u16 addr, u8 defer) +static void +emit_br_relo(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer, + enum nfp_relo_type relo) { - if (defer > 2) { + if (mask == BR_UNC && defer > 2) { pr_err("BUG: branch defer out of bounds %d\n", defer); nfp_prog->error = -EFAULT; return; } - __emit_br(nfp_prog, BR_UNC, BR_EV_PIP_UNCOND, BR_CSS_NONE, addr, defer); + + __emit_br(nfp_prog, mask, + mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND, + BR_CSS_NONE, addr, defer); + + nfp_prog->prog[nfp_prog->prog_len - 1] |= + FIELD_PREP(OP_RELO_TYPE, relo); } static void emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer) { - __emit_br(nfp_prog, mask, - mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND, - BR_CSS_NONE, addr, defer); + emit_br_relo(nfp_prog, mask, addr, defer, RELO_BR_REL); } static void @@ -230,9 +253,11 @@ emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm, return; } - __emit_immed(nfp_prog, reg.areg, reg.breg, imm >> 8, width, - invert, shift, reg.wr_both, - reg.dst_lmextn, reg.src_lmextn); + /* Use reg.dst when destination is No-Dest. */ + __emit_immed(nfp_prog, + swreg_type(dst) == NN_REG_NONE ? reg.dst : reg.areg, + reg.breg, imm >> 8, width, invert, shift, + reg.wr_both, reg.dst_lmextn, reg.src_lmextn); } static void @@ -458,6 +483,21 @@ static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm) } } +static void +wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm, + enum nfp_relo_type relo) +{ + if (imm > 0xffff) { + pr_err("relocation of a large immediate!\n"); + nfp_prog->error = -EFAULT; + return; + } + emit_immed(nfp_prog, dst, imm, IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B); + + nfp_prog->prog[nfp_prog->prog_len - 1] |= + FIELD_PREP(OP_RELO_TYPE, relo); +} + /* ur_load_imm_any() - encode immediate or use tmp register (unrestricted) * If the @imm is small enough encode it directly in operand and return * otherwise load @imm to a spare register and return its encoding. @@ -490,24 +530,179 @@ static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count) emit_nop(nfp_prog); } +static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src) +{ + emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src); +} + +static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src) +{ + wrp_mov(nfp_prog, reg_both(dst), reg_b(src)); +} + +/* wrp_reg_subpart() - load @field_len bytes from @offset of @src, write the + * result to @dst from low end. + */ static void -wrp_br_special(struct nfp_prog *nfp_prog, enum br_mask mask, - enum br_special special) +wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len, + u8 offset) { - emit_br(nfp_prog, mask, 0, 0); + enum shf_sc sc = offset ? SHF_SC_R_SHF : SHF_SC_NONE; + u8 mask = (1 << field_len) - 1; - nfp_prog->prog[nfp_prog->prog_len - 1] |= - FIELD_PREP(OP_BR_SPECIAL, special); + emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true); } -static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src) +static void +addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, + swreg *rega, swreg *regb) { - emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src); + if (offset == reg_imm(0)) { + *rega = reg_a(src_gpr); + *regb = reg_b(src_gpr + 1); + return; + } + + emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(src_gpr), ALU_OP_ADD, offset); + emit_alu(nfp_prog, imm_b(nfp_prog), reg_b(src_gpr + 1), ALU_OP_ADD_C, + reg_imm(0)); + *rega = imm_a(nfp_prog); + *regb = imm_b(nfp_prog); } -static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src) +/* NFP has Command Push Pull bus which supports bluk memory operations. */ +static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - wrp_mov(nfp_prog, reg_both(dst), reg_b(src)); + bool descending_seq = meta->ldst_gather_len < 0; + s16 len = abs(meta->ldst_gather_len); + swreg src_base, off; + bool src_40bit_addr; + unsigned int i; + u8 xfer_num; + + off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); + src_40bit_addr = meta->ptr.type == PTR_TO_MAP_VALUE; + src_base = reg_a(meta->insn.src_reg * 2); + xfer_num = round_up(len, 4) / 4; + + if (src_40bit_addr) + addr40_offset(nfp_prog, meta->insn.src_reg, off, &src_base, + &off); + + /* Setup PREV_ALU fields to override memory read length. */ + if (len > 32) + wrp_immed(nfp_prog, reg_none(), + CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); + + /* Memory read from source addr into transfer-in registers. */ + emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, + src_40bit_addr ? CMD_MODE_40b_BA : CMD_MODE_32b, 0, + src_base, off, xfer_num - 1, true, len > 32); + + /* Move from transfer-in to transfer-out. */ + for (i = 0; i < xfer_num; i++) + wrp_mov(nfp_prog, reg_xfer(i), reg_xfer(i)); + + off = re_load_imm_any(nfp_prog, meta->paired_st->off, imm_b(nfp_prog)); + + if (len <= 8) { + /* Use single direct_ref write8. */ + emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, + reg_a(meta->paired_st->dst_reg * 2), off, len - 1, + true); + } else if (len <= 32 && IS_ALIGNED(len, 4)) { + /* Use single direct_ref write32. */ + emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, + reg_a(meta->paired_st->dst_reg * 2), off, xfer_num - 1, + true); + } else if (len <= 32) { + /* Use single indirect_ref write8. */ + wrp_immed(nfp_prog, reg_none(), + CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, len - 1)); + emit_cmd_indir(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, + reg_a(meta->paired_st->dst_reg * 2), off, + len - 1, true); + } else if (IS_ALIGNED(len, 4)) { + /* Use single indirect_ref write32. */ + wrp_immed(nfp_prog, reg_none(), + CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); + emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, + reg_a(meta->paired_st->dst_reg * 2), off, + xfer_num - 1, true); + } else if (len <= 40) { + /* Use one direct_ref write32 to write the first 32-bytes, then + * another direct_ref write8 to write the remaining bytes. + */ + emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, + reg_a(meta->paired_st->dst_reg * 2), off, 7, + true); + + off = re_load_imm_any(nfp_prog, meta->paired_st->off + 32, + imm_b(nfp_prog)); + emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 8, + reg_a(meta->paired_st->dst_reg * 2), off, len - 33, + true); + } else { + /* Use one indirect_ref write32 to write 4-bytes aligned length, + * then another direct_ref write8 to write the remaining bytes. + */ + u8 new_off; + + wrp_immed(nfp_prog, reg_none(), + CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 2)); + emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0, + reg_a(meta->paired_st->dst_reg * 2), off, + xfer_num - 2, true); + new_off = meta->paired_st->off + (xfer_num - 1) * 4; + off = re_load_imm_any(nfp_prog, new_off, imm_b(nfp_prog)); + emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, + xfer_num - 1, reg_a(meta->paired_st->dst_reg * 2), off, + (len & 0x3) - 1, true); + } + + /* TODO: The following extra load is to make sure data flow be identical + * before and after we do memory copy optimization. + * + * The load destination register is not guaranteed to be dead, so we + * need to make sure it is loaded with the value the same as before + * this transformation. + * + * These extra loads could be removed once we have accurate register + * usage information. + */ + if (descending_seq) + xfer_num = 0; + else if (BPF_SIZE(meta->insn.code) != BPF_DW) + xfer_num = xfer_num - 1; + else + xfer_num = xfer_num - 2; + + switch (BPF_SIZE(meta->insn.code)) { + case BPF_B: + wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), + reg_xfer(xfer_num), 1, + IS_ALIGNED(len, 4) ? 3 : (len & 3) - 1); + break; + case BPF_H: + wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2), + reg_xfer(xfer_num), 2, (len & 3) ^ 2); + break; + case BPF_W: + wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), + reg_xfer(0)); + break; + case BPF_DW: + wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), + reg_xfer(xfer_num)); + wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), + reg_xfer(xfer_num + 1)); + break; + } + + if (BPF_SIZE(meta->insn.code) != BPF_DW) + wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); + + return 0; } static int @@ -540,20 +735,20 @@ data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size) } static int -data_ld_host_order(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, - u8 dst_gpr, int size) +data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, + swreg lreg, swreg rreg, int size, enum cmd_mode mode) { unsigned int i; u8 mask, sz; - /* We load the value from the address indicated in @offset and then + /* We load the value from the address indicated in rreg + lreg and then * mask out the data we don't need. Note: this is little endian! */ sz = max(size, 4); mask = size < 4 ? GENMASK(size - 1, 0) : 0; - emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0, - reg_a(src_gpr), offset, sz / 4 - 1, true); + emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, mode, 0, + lreg, rreg, sz / 4 - 1, true); i = 0; if (mask) @@ -570,6 +765,26 @@ data_ld_host_order(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, } static int +data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, + u8 dst_gpr, u8 size) +{ + return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset, + size, CMD_MODE_32b); +} + +static int +data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, + u8 dst_gpr, u8 size) +{ + swreg rega, regb; + + addr40_offset(nfp_prog, src_gpr, offset, ®a, ®b); + + return data_ld_host_order(nfp_prog, dst_gpr, rega, regb, + size, CMD_MODE_40b_BA); +} + +static int construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size) { swreg tmp_reg; @@ -583,7 +798,7 @@ construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size) imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size)); emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog)); - wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT); + emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); /* Load data */ return data_ld(nfp_prog, imm_b(nfp_prog), 0, size); @@ -596,7 +811,7 @@ static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size) /* Check packet length */ tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog)); emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg); - wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT); + emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT); /* Load data */ tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); @@ -975,9 +1190,6 @@ wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, { const struct bpf_insn *insn = &meta->insn; - if (insn->off < 0) /* TODO */ - return -EOPNOTSUPP; - wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op, insn->src_reg * 2, br_mask, insn->off); wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op, @@ -995,9 +1207,6 @@ wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u8 reg = insn->dst_reg * 2; swreg tmp_reg; - if (insn->off < 0) /* TODO */ - return -EOPNOTSUPP; - tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); if (!swap) emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg); @@ -1027,9 +1236,6 @@ wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, areg = insn->dst_reg * 2; breg = insn->src_reg * 2; - if (insn->off < 0) /* TODO */ - return -EOPNOTSUPP; - if (swap) { areg ^= breg; breg ^= areg; @@ -1052,6 +1258,136 @@ static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out) SHF_SC_R_ROT, 16); } +static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog); + struct nfp_bpf_cap_adjust_head *adjust_head; + u32 ret_einval, end; + + adjust_head = &nfp_prog->bpf->adjust_head; + + /* Optimized version - 5 vs 14 cycles */ + if (nfp_prog->adjust_head_location != UINT_MAX) { + if (WARN_ON_ONCE(nfp_prog->adjust_head_location != meta->n)) + return -EINVAL; + + emit_alu(nfp_prog, pptr_reg(nfp_prog), + reg_a(2 * 2), ALU_OP_ADD, pptr_reg(nfp_prog)); + emit_alu(nfp_prog, plen_reg(nfp_prog), + plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); + emit_alu(nfp_prog, pv_len(nfp_prog), + pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); + + wrp_immed(nfp_prog, reg_both(0), 0); + wrp_immed(nfp_prog, reg_both(1), 0); + + /* TODO: when adjust head is guaranteed to succeed we can + * also eliminate the following if (r0 == 0) branch. + */ + + return 0; + } + + ret_einval = nfp_prog_current_offset(nfp_prog) + 14; + end = ret_einval + 2; + + /* We need to use a temp because offset is just a part of the pkt ptr */ + emit_alu(nfp_prog, tmp, + reg_a(2 * 2), ALU_OP_ADD_2B, pptr_reg(nfp_prog)); + + /* Validate result will fit within FW datapath constraints */ + emit_alu(nfp_prog, reg_none(), + tmp, ALU_OP_SUB, reg_imm(adjust_head->off_min)); + emit_br(nfp_prog, BR_BLO, ret_einval, 0); + emit_alu(nfp_prog, reg_none(), + reg_imm(adjust_head->off_max), ALU_OP_SUB, tmp); + emit_br(nfp_prog, BR_BLO, ret_einval, 0); + + /* Validate the length is at least ETH_HLEN */ + emit_alu(nfp_prog, tmp_len, + plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); + emit_alu(nfp_prog, reg_none(), + tmp_len, ALU_OP_SUB, reg_imm(ETH_HLEN)); + emit_br(nfp_prog, BR_BMI, ret_einval, 0); + + /* Load the ret code */ + wrp_immed(nfp_prog, reg_both(0), 0); + wrp_immed(nfp_prog, reg_both(1), 0); + + /* Modify the packet metadata */ + emit_ld_field(nfp_prog, pptr_reg(nfp_prog), 0x3, tmp, SHF_SC_NONE, 0); + + /* Skip over the -EINVAL ret code (defer 2) */ + emit_br(nfp_prog, BR_UNC, end, 2); + + emit_alu(nfp_prog, plen_reg(nfp_prog), + plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); + emit_alu(nfp_prog, pv_len(nfp_prog), + pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); + + /* return -EINVAL target */ + if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval)) + return -EINVAL; + + wrp_immed(nfp_prog, reg_both(0), -22); + wrp_immed(nfp_prog, reg_both(1), ~0); + + if (!nfp_prog_confirm_current_offset(nfp_prog, end)) + return -EINVAL; + + return 0; +} + +static int +map_lookup_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + struct bpf_offloaded_map *offmap; + struct nfp_bpf_map *nfp_map; + bool load_lm_ptr; + u32 ret_tgt; + s64 lm_off; + swreg tid; + + offmap = (struct bpf_offloaded_map *)meta->arg1.map_ptr; + nfp_map = offmap->dev_priv; + + /* We only have to reload LM0 if the key is not at start of stack */ + lm_off = nfp_prog->stack_depth; + lm_off += meta->arg2.var_off.value + meta->arg2.off; + load_lm_ptr = meta->arg2_var_off || lm_off; + + /* Set LM0 to start of key */ + if (load_lm_ptr) + emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0); + + /* Load map ID into a register, it should actually fit as an immediate + * but in case it doesn't deal with it here, not in the delay slots. + */ + tid = ur_load_imm_any(nfp_prog, nfp_map->tid, imm_a(nfp_prog)); + + emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + BPF_FUNC_map_lookup_elem, + 2, RELO_BR_HELPER); + ret_tgt = nfp_prog_current_offset(nfp_prog) + 2; + + /* Load map ID into A0 */ + wrp_mov(nfp_prog, reg_a(0), tid); + + /* Load the return address into B0 */ + wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL); + + if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) + return -EINVAL; + + /* Reset the LM0 pointer */ + if (!load_lm_ptr) + return 0; + + emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0); + wrp_nops(nfp_prog, 3); + + return 0; +} + /* --- Callbacks --- */ static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { @@ -1486,14 +1822,29 @@ mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); - return data_ld_host_order(nfp_prog, meta->insn.src_reg * 2, tmp_reg, - meta->insn.dst_reg * 2, size); + return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2, + tmp_reg, meta->insn.dst_reg * 2, size); +} + +static int +mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + unsigned int size) +{ + swreg tmp_reg; + + tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); + + return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2, + tmp_reg, meta->insn.dst_reg * 2, size); } static int mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, unsigned int size) { + if (meta->ldst_gather_len) + return nfp_cpp_memcpy(nfp_prog, meta); + if (meta->ptr.type == PTR_TO_CTX) { if (nfp_prog->type == BPF_PROG_TYPE_XDP) return mem_ldx_xdp(nfp_prog, meta, size); @@ -1508,6 +1859,9 @@ mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, return mem_ldx_stack(nfp_prog, meta, size, meta->ptr.off + meta->ptr.var_off.value); + if (meta->ptr.type == PTR_TO_MAP_VALUE) + return mem_ldx_emem(nfp_prog, meta, size); + return -EOPNOTSUPP; } @@ -1630,8 +1984,6 @@ static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - if (meta->insn.off < 0) /* TODO */ - return -EOPNOTSUPP; emit_br(nfp_prog, BR_UNC, meta->insn.off, 0); return 0; @@ -1646,9 +1998,6 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) or1 = reg_a(insn->dst_reg * 2); or2 = reg_b(insn->dst_reg * 2 + 1); - if (insn->off < 0) /* TODO */ - return -EOPNOTSUPP; - if (imm & ~0U) { tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); emit_alu(nfp_prog, imm_a(nfp_prog), @@ -1689,15 +2038,32 @@ static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true); } +static int jsgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_cmp_imm(nfp_prog, meta, BR_BLT, true); +} + +static int jsge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_cmp_imm(nfp_prog, meta, BR_BGE, false); +} + +static int jslt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_cmp_imm(nfp_prog, meta, BR_BLT, false); +} + +static int jsle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_cmp_imm(nfp_prog, meta, BR_BGE, true); +} + static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; u64 imm = insn->imm; /* sign extend */ swreg tmp_reg; - if (insn->off < 0) /* TODO */ - return -EOPNOTSUPP; - if (!imm) { meta->skip = true; return 0; @@ -1726,9 +2092,6 @@ static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) u64 imm = insn->imm; /* sign extend */ swreg tmp_reg; - if (insn->off < 0) /* TODO */ - return -EOPNOTSUPP; - if (!imm) { emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2), ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1)); @@ -1753,9 +2116,6 @@ static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; - if (insn->off < 0) /* TODO */ - return -EOPNOTSUPP; - emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2), ALU_OP_XOR, reg_b(insn->src_reg * 2)); emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1), @@ -1787,6 +2147,26 @@ static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true); } +static int jsgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_cmp_reg(nfp_prog, meta, BR_BLT, true); +} + +static int jsge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_cmp_reg(nfp_prog, meta, BR_BGE, false); +} + +static int jslt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_cmp_reg(nfp_prog, meta, BR_BLT, false); +} + +static int jsle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_cmp_reg(nfp_prog, meta, BR_BGE, true); +} + static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE); @@ -1797,9 +2177,22 @@ static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE); } +static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + switch (meta->insn.imm) { + case BPF_FUNC_xdp_adjust_head: + return adjust_head(nfp_prog, meta); + case BPF_FUNC_map_lookup_elem: + return map_lookup_stack(nfp_prog, meta); + default: + WARN_ONCE(1, "verifier allowed unsupported function\n"); + return -EOPNOTSUPP; + } +} + static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - wrp_br_special(nfp_prog, BR_UNC, OP_BR_GO_OUT); + emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT); return 0; } @@ -1860,6 +2253,10 @@ static const instr_cb_t instr_cb[256] = { [BPF_JMP | BPF_JGE | BPF_K] = jge_imm, [BPF_JMP | BPF_JLT | BPF_K] = jlt_imm, [BPF_JMP | BPF_JLE | BPF_K] = jle_imm, + [BPF_JMP | BPF_JSGT | BPF_K] = jsgt_imm, + [BPF_JMP | BPF_JSGE | BPF_K] = jsge_imm, + [BPF_JMP | BPF_JSLT | BPF_K] = jslt_imm, + [BPF_JMP | BPF_JSLE | BPF_K] = jsle_imm, [BPF_JMP | BPF_JSET | BPF_K] = jset_imm, [BPF_JMP | BPF_JNE | BPF_K] = jne_imm, [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg, @@ -1867,99 +2264,64 @@ static const instr_cb_t instr_cb[256] = { [BPF_JMP | BPF_JGE | BPF_X] = jge_reg, [BPF_JMP | BPF_JLT | BPF_X] = jlt_reg, [BPF_JMP | BPF_JLE | BPF_X] = jle_reg, + [BPF_JMP | BPF_JSGT | BPF_X] = jsgt_reg, + [BPF_JMP | BPF_JSGE | BPF_X] = jsge_reg, + [BPF_JMP | BPF_JSLT | BPF_X] = jslt_reg, + [BPF_JMP | BPF_JSLE | BPF_X] = jsle_reg, [BPF_JMP | BPF_JSET | BPF_X] = jset_reg, [BPF_JMP | BPF_JNE | BPF_X] = jne_reg, + [BPF_JMP | BPF_CALL] = call, [BPF_JMP | BPF_EXIT] = goto_out, }; -/* --- Misc code --- */ -static void br_set_offset(u64 *instr, u16 offset) -{ - u16 addr_lo, addr_hi; - - addr_lo = offset & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO)); - addr_hi = offset != addr_lo; - *instr &= ~(OP_BR_ADDR_HI | OP_BR_ADDR_LO); - *instr |= FIELD_PREP(OP_BR_ADDR_HI, addr_hi); - *instr |= FIELD_PREP(OP_BR_ADDR_LO, addr_lo); -} - /* --- Assembler logic --- */ static int nfp_fixup_branches(struct nfp_prog *nfp_prog) { - struct nfp_insn_meta *meta, *next; - u32 off, br_idx; - u32 idx; + struct nfp_insn_meta *meta, *jmp_dst; + u32 idx, br_idx; - nfp_for_each_insn_walk2(nfp_prog, meta, next) { + list_for_each_entry(meta, &nfp_prog->insns, l) { if (meta->skip) continue; + if (meta->insn.code == (BPF_JMP | BPF_CALL)) + continue; if (BPF_CLASS(meta->insn.code) != BPF_JMP) continue; - br_idx = nfp_prog_offset_to_index(nfp_prog, next->off) - 1; + if (list_is_last(&meta->l, &nfp_prog->insns)) + br_idx = nfp_prog->last_bpf_off; + else + br_idx = list_next_entry(meta, l)->off - 1; + if (!nfp_is_br(nfp_prog->prog[br_idx])) { pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n", br_idx, meta->insn.code, nfp_prog->prog[br_idx]); return -ELOOP; } /* Leave special branches for later */ - if (FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx])) + if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) != + RELO_BR_REL) continue; - /* Find the target offset in assembler realm */ - off = meta->insn.off; - if (!off) { - pr_err("Fixup found zero offset!!\n"); + if (!meta->jmp_dst) { + pr_err("Non-exit jump doesn't have destination info recorded!!\n"); return -ELOOP; } - while (off && nfp_meta_has_next(nfp_prog, next)) { - next = nfp_meta_next(next); - off--; - } - if (off) { - pr_err("Fixup found too large jump!! %d\n", off); - return -ELOOP; - } + jmp_dst = meta->jmp_dst; - if (next->skip) { + if (jmp_dst->skip) { pr_err("Branch landing on removed instruction!!\n"); return -ELOOP; } - for (idx = nfp_prog_offset_to_index(nfp_prog, meta->off); - idx <= br_idx; idx++) { + for (idx = meta->off; idx <= br_idx; idx++) { if (!nfp_is_br(nfp_prog->prog[idx])) continue; - br_set_offset(&nfp_prog->prog[idx], next->off); + br_set_offset(&nfp_prog->prog[idx], jmp_dst->off); } } - /* Fixup 'goto out's separately, they can be scattered around */ - for (br_idx = 0; br_idx < nfp_prog->prog_len; br_idx++) { - enum br_special special; - - if ((nfp_prog->prog[br_idx] & OP_BR_BASE_MASK) != OP_BR_BASE) - continue; - - special = FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx]); - switch (special) { - case OP_BR_NORMAL: - break; - case OP_BR_GO_OUT: - br_set_offset(&nfp_prog->prog[br_idx], - nfp_prog->tgt_out); - break; - case OP_BR_GO_ABORT: - br_set_offset(&nfp_prog->prog[br_idx], - nfp_prog->tgt_abort); - break; - } - - nfp_prog->prog[br_idx] &= ~OP_BR_SPECIAL; - } - return 0; } @@ -1987,7 +2349,7 @@ static void nfp_outro_tc_da(struct nfp_prog *nfp_prog) /* Target for aborts */ nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); - emit_br_def(nfp_prog, nfp_prog->tgt_done, 2); + emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16); @@ -2014,7 +2376,7 @@ static void nfp_outro_tc_da(struct nfp_prog *nfp_prog) emit_shf(nfp_prog, reg_b(2), reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0); - emit_br_def(nfp_prog, nfp_prog->tgt_done, 2); + emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); emit_shf(nfp_prog, reg_b(2), reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4); @@ -2033,7 +2395,7 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog) /* Target for aborts */ nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); - emit_br_def(nfp_prog, nfp_prog->tgt_done, 2); + emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16); @@ -2054,7 +2416,7 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog) emit_shf(nfp_prog, reg_b(2), reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); - emit_br_def(nfp_prog, nfp_prog->tgt_done, 2); + emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT); wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); @@ -2105,6 +2467,8 @@ static int nfp_translate(struct nfp_prog *nfp_prog) nfp_prog->n_translated++; } + nfp_prog->last_bpf_off = nfp_prog_current_offset(nfp_prog) - 1; + nfp_outro(nfp_prog); if (nfp_prog->error) return nfp_prog->error; @@ -2173,6 +2537,9 @@ static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog) if (next.src_reg || next.dst_reg) continue; + if (meta2->flags & FLAG_INSN_IS_JUMP_DST) + continue; + meta2->skip = true; } } @@ -2209,40 +2576,294 @@ static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog) if (next1.imm != 0x20 || next2.imm != 0x20) continue; + if (meta2->flags & FLAG_INSN_IS_JUMP_DST || + meta3->flags & FLAG_INSN_IS_JUMP_DST) + continue; + meta2->skip = true; meta3->skip = true; } } +/* load/store pair that forms memory copy sould look like the following: + * + * ld_width R, [addr_src + offset_src] + * st_width [addr_dest + offset_dest], R + * + * The destination register of load and source register of store should + * be the same, load and store should also perform at the same width. + * If either of addr_src or addr_dest is stack pointer, we don't do the + * CPP optimization as stack is modelled by registers on NFP. + */ +static bool +curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta, + struct nfp_insn_meta *st_meta) +{ + struct bpf_insn *ld = &ld_meta->insn; + struct bpf_insn *st = &st_meta->insn; + + if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta)) + return false; + + if (ld_meta->ptr.type != PTR_TO_PACKET) + return false; + + if (st_meta->ptr.type != PTR_TO_PACKET) + return false; + + if (BPF_SIZE(ld->code) != BPF_SIZE(st->code)) + return false; + + if (ld->dst_reg != st->src_reg) + return false; + + /* There is jump to the store insn in this pair. */ + if (st_meta->flags & FLAG_INSN_IS_JUMP_DST) + return false; + + return true; +} + +/* Currently, we only support chaining load/store pairs if: + * + * - Their address base registers are the same. + * - Their address offsets are in the same order. + * - They operate at the same memory width. + * - There is no jump into the middle of them. + */ +static bool +curr_pair_chain_with_previous(struct nfp_insn_meta *ld_meta, + struct nfp_insn_meta *st_meta, + struct bpf_insn *prev_ld, + struct bpf_insn *prev_st) +{ + u8 prev_size, curr_size, prev_ld_base, prev_st_base, prev_ld_dst; + struct bpf_insn *ld = &ld_meta->insn; + struct bpf_insn *st = &st_meta->insn; + s16 prev_ld_off, prev_st_off; + + /* This pair is the start pair. */ + if (!prev_ld) + return true; + + prev_size = BPF_LDST_BYTES(prev_ld); + curr_size = BPF_LDST_BYTES(ld); + prev_ld_base = prev_ld->src_reg; + prev_st_base = prev_st->dst_reg; + prev_ld_dst = prev_ld->dst_reg; + prev_ld_off = prev_ld->off; + prev_st_off = prev_st->off; + + if (ld->dst_reg != prev_ld_dst) + return false; + + if (ld->src_reg != prev_ld_base || st->dst_reg != prev_st_base) + return false; + + if (curr_size != prev_size) + return false; + + /* There is jump to the head of this pair. */ + if (ld_meta->flags & FLAG_INSN_IS_JUMP_DST) + return false; + + /* Both in ascending order. */ + if (prev_ld_off + prev_size == ld->off && + prev_st_off + prev_size == st->off) + return true; + + /* Both in descending order. */ + if (ld->off + curr_size == prev_ld_off && + st->off + curr_size == prev_st_off) + return true; + + return false; +} + +/* Return TRUE if cross memory access happens. Cross memory access means + * store area is overlapping with load area that a later load might load + * the value from previous store, for this case we can't treat the sequence + * as an memory copy. + */ +static bool +cross_mem_access(struct bpf_insn *ld, struct nfp_insn_meta *head_ld_meta, + struct nfp_insn_meta *head_st_meta) +{ + s16 head_ld_off, head_st_off, ld_off; + + /* Different pointer types does not overlap. */ + if (head_ld_meta->ptr.type != head_st_meta->ptr.type) + return false; + + /* load and store are both PTR_TO_PACKET, check ID info. */ + if (head_ld_meta->ptr.id != head_st_meta->ptr.id) + return true; + + /* Canonicalize the offsets. Turn all of them against the original + * base register. + */ + head_ld_off = head_ld_meta->insn.off + head_ld_meta->ptr.off; + head_st_off = head_st_meta->insn.off + head_st_meta->ptr.off; + ld_off = ld->off + head_ld_meta->ptr.off; + + /* Ascending order cross. */ + if (ld_off > head_ld_off && + head_ld_off < head_st_off && ld_off >= head_st_off) + return true; + + /* Descending order cross. */ + if (ld_off < head_ld_off && + head_ld_off > head_st_off && ld_off <= head_st_off) + return true; + + return false; +} + +/* This pass try to identify the following instructoin sequences. + * + * load R, [regA + offA] + * store [regB + offB], R + * load R, [regA + offA + const_imm_A] + * store [regB + offB + const_imm_A], R + * load R, [regA + offA + 2 * const_imm_A] + * store [regB + offB + 2 * const_imm_A], R + * ... + * + * Above sequence is typically generated by compiler when lowering + * memcpy. NFP prefer using CPP instructions to accelerate it. + */ +static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog) +{ + struct nfp_insn_meta *head_ld_meta = NULL; + struct nfp_insn_meta *head_st_meta = NULL; + struct nfp_insn_meta *meta1, *meta2; + struct bpf_insn *prev_ld = NULL; + struct bpf_insn *prev_st = NULL; + u8 count = 0; + + nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { + struct bpf_insn *ld = &meta1->insn; + struct bpf_insn *st = &meta2->insn; + + /* Reset record status if any of the following if true: + * - The current insn pair is not load/store. + * - The load/store pair doesn't chain with previous one. + * - The chained load/store pair crossed with previous pair. + * - The chained load/store pair has a total size of memory + * copy beyond 128 bytes which is the maximum length a + * single NFP CPP command can transfer. + */ + if (!curr_pair_is_memcpy(meta1, meta2) || + !curr_pair_chain_with_previous(meta1, meta2, prev_ld, + prev_st) || + (head_ld_meta && (cross_mem_access(ld, head_ld_meta, + head_st_meta) || + head_ld_meta->ldst_gather_len >= 128))) { + if (!count) + continue; + + if (count > 1) { + s16 prev_ld_off = prev_ld->off; + s16 prev_st_off = prev_st->off; + s16 head_ld_off = head_ld_meta->insn.off; + + if (prev_ld_off < head_ld_off) { + head_ld_meta->insn.off = prev_ld_off; + head_st_meta->insn.off = prev_st_off; + head_ld_meta->ldst_gather_len = + -head_ld_meta->ldst_gather_len; + } + + head_ld_meta->paired_st = &head_st_meta->insn; + head_st_meta->skip = true; + } else { + head_ld_meta->ldst_gather_len = 0; + } + + /* If the chain is ended by an load/store pair then this + * could serve as the new head of the the next chain. + */ + if (curr_pair_is_memcpy(meta1, meta2)) { + head_ld_meta = meta1; + head_st_meta = meta2; + head_ld_meta->ldst_gather_len = + BPF_LDST_BYTES(ld); + meta1 = nfp_meta_next(meta1); + meta2 = nfp_meta_next(meta2); + prev_ld = ld; + prev_st = st; + count = 1; + } else { + head_ld_meta = NULL; + head_st_meta = NULL; + prev_ld = NULL; + prev_st = NULL; + count = 0; + } + + continue; + } + + if (!head_ld_meta) { + head_ld_meta = meta1; + head_st_meta = meta2; + } else { + meta1->skip = true; + meta2->skip = true; + } + + head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld); + meta1 = nfp_meta_next(meta1); + meta2 = nfp_meta_next(meta2); + prev_ld = ld; + prev_st = st; + count++; + } +} + static int nfp_bpf_optimize(struct nfp_prog *nfp_prog) { nfp_bpf_opt_reg_init(nfp_prog); nfp_bpf_opt_ld_mask(nfp_prog); nfp_bpf_opt_ld_shift(nfp_prog); + nfp_bpf_opt_ldst_gather(nfp_prog); return 0; } -static int nfp_bpf_ustore_calc(struct nfp_prog *nfp_prog, __le64 *ustore) +static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len) { + __le64 *ustore = (__force __le64 *)prog; int i; - for (i = 0; i < nfp_prog->prog_len; i++) { + for (i = 0; i < len; i++) { int err; - err = nfp_ustore_check_valid_no_ecc(nfp_prog->prog[i]); + err = nfp_ustore_check_valid_no_ecc(prog[i]); if (err) return err; - nfp_prog->prog[i] = nfp_ustore_calc_ecc_insn(nfp_prog->prog[i]); - - ustore[i] = cpu_to_le64(nfp_prog->prog[i]); + ustore[i] = cpu_to_le64(nfp_ustore_calc_ecc_insn(prog[i])); } return 0; } +static void nfp_bpf_prog_trim(struct nfp_prog *nfp_prog) +{ + void *prog; + + prog = kvmalloc_array(nfp_prog->prog_len, sizeof(u64), GFP_KERNEL); + if (!prog) + return; + + nfp_prog->__prog_alloc_len = nfp_prog->prog_len * sizeof(u64); + memcpy(prog, nfp_prog->prog, nfp_prog->__prog_alloc_len); + kvfree(nfp_prog->prog); + nfp_prog->prog = prog; +} + int nfp_bpf_jit(struct nfp_prog *nfp_prog) { int ret; @@ -2258,5 +2879,102 @@ int nfp_bpf_jit(struct nfp_prog *nfp_prog) return -EINVAL; } - return nfp_bpf_ustore_calc(nfp_prog, (__force __le64 *)nfp_prog->prog); + nfp_bpf_prog_trim(nfp_prog); + + return ret; +} + +void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt) +{ + struct nfp_insn_meta *meta; + + /* Another pass to record jump information. */ + list_for_each_entry(meta, &nfp_prog->insns, l) { + u64 code = meta->insn.code; + + if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT && + BPF_OP(code) != BPF_CALL) { + struct nfp_insn_meta *dst_meta; + unsigned short dst_indx; + + dst_indx = meta->n + 1 + meta->insn.off; + dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx, + cnt); + + meta->jmp_dst = dst_meta; + dst_meta->flags |= FLAG_INSN_IS_JUMP_DST; + } + } +} + +bool nfp_bpf_supported_opcode(u8 code) +{ + return !!instr_cb[code]; +} + +void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv) +{ + unsigned int i; + u64 *prog; + int err; + + prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64), + GFP_KERNEL); + if (!prog) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < nfp_prog->prog_len; i++) { + enum nfp_relo_type special; + u32 val; + + special = FIELD_GET(OP_RELO_TYPE, prog[i]); + switch (special) { + case RELO_NONE: + continue; + case RELO_BR_REL: + br_add_offset(&prog[i], bv->start_off); + break; + case RELO_BR_GO_OUT: + br_set_offset(&prog[i], + nfp_prog->tgt_out + bv->start_off); + break; + case RELO_BR_GO_ABORT: + br_set_offset(&prog[i], + nfp_prog->tgt_abort + bv->start_off); + break; + case RELO_BR_NEXT_PKT: + br_set_offset(&prog[i], bv->tgt_done); + break; + case RELO_BR_HELPER: + val = br_get_offset(prog[i]); + val -= BR_OFF_RELO; + switch (val) { + case BPF_FUNC_map_lookup_elem: + val = nfp_prog->bpf->helpers.map_lookup; + break; + default: + pr_err("relocation of unknown helper %d\n", + val); + err = -EINVAL; + goto err_free_prog; + } + br_set_offset(&prog[i], val); + break; + case RELO_IMMED_REL: + immed_add_value(&prog[i], bv->start_off); + break; + } + + prog[i] &= ~OP_RELO_TYPE; + } + + err = nfp_bpf_ustore_calc(prog, nfp_prog->prog_len); + if (err) + goto err_free_prog; + + return prog; + +err_free_prog: + kfree(prog); + return ERR_PTR(err); } diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 13190aa09faf..322027792fe8 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -34,10 +34,12 @@ #include <net/pkt_cls.h> #include "../nfpcore/nfp_cpp.h" +#include "../nfpcore/nfp_nffw.h" #include "../nfp_app.h" #include "../nfp_main.h" #include "../nfp_net.h" #include "../nfp_port.h" +#include "fw.h" #include "main.h" static bool nfp_net_ebpf_capable(struct nfp_net *nn) @@ -52,7 +54,7 @@ static bool nfp_net_ebpf_capable(struct nfp_net *nn) static int nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn, - struct bpf_prog *prog) + struct bpf_prog *prog, struct netlink_ext_ack *extack) { bool running, xdp_running; int ret; @@ -68,10 +70,10 @@ nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn, if (prog && running && !xdp_running) return -EBUSY; - ret = nfp_net_bpf_offload(nn, prog, running); + ret = nfp_net_bpf_offload(nn, prog, running, extack); /* Stop offload if replace not possible */ if (ret && prog) - nfp_bpf_xdp_offload(app, nn, NULL); + nfp_bpf_xdp_offload(app, nn, NULL, extack); nn->dp.bpf_offload_xdp = prog && !ret; return ret; @@ -85,16 +87,21 @@ static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn) static int nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) { + struct nfp_bpf_vnic *bv; int err; - nn->app_priv = kzalloc(sizeof(struct nfp_bpf_vnic), GFP_KERNEL); - if (!nn->app_priv) + bv = kzalloc(sizeof(*bv), GFP_KERNEL); + if (!bv) return -ENOMEM; + nn->app_priv = bv; err = nfp_app_nic_vnic_alloc(app, nn, id); if (err) goto err_free_priv; + bv->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START); + bv->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE); + return 0; err_free_priv: kfree(nn->app_priv); @@ -105,8 +112,6 @@ static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn) { struct nfp_bpf_vnic *bv = nn->app_priv; - if (nn->dp.bpf_offload_xdp) - nfp_bpf_xdp_offload(app, nn, NULL); WARN_ON(bv->tc_prog); kfree(bv); } @@ -120,17 +125,29 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type, struct nfp_bpf_vnic *bv; int err; - if (type != TC_SETUP_CLSBPF || - !tc_can_offload(nn->dp.netdev) || - !nfp_net_ebpf_capable(nn) || - cls_bpf->common.protocol != htons(ETH_P_ALL) || - cls_bpf->common.chain_index) + if (type != TC_SETUP_CLSBPF) { + NL_SET_ERR_MSG_MOD(cls_bpf->common.extack, + "only offload of BPF classifiers supported"); + return -EOPNOTSUPP; + } + if (!tc_cls_can_offload_and_chain0(nn->dp.netdev, &cls_bpf->common)) + return -EOPNOTSUPP; + if (!nfp_net_ebpf_capable(nn)) { + NL_SET_ERR_MSG_MOD(cls_bpf->common.extack, + "NFP firmware does not support eBPF offload"); return -EOPNOTSUPP; + } + if (cls_bpf->common.protocol != htons(ETH_P_ALL)) { + NL_SET_ERR_MSG_MOD(cls_bpf->common.extack, + "only ETH_P_ALL supported as filter protocol"); + return -EOPNOTSUPP; + } /* Only support TC direct action */ if (!cls_bpf->exts_integrated || tcf_exts_has_actions(cls_bpf->exts)) { - nn_err(nn, "only direct action with no legacy actions supported\n"); + NL_SET_ERR_MSG_MOD(cls_bpf->common.extack, + "only direct action with no legacy actions supported"); return -EOPNOTSUPP; } @@ -147,7 +164,8 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type, return 0; } - err = nfp_net_bpf_offload(nn, cls_bpf->prog, oldprog); + err = nfp_net_bpf_offload(nn, cls_bpf->prog, oldprog, + cls_bpf->common.extack); if (err) return err; @@ -191,23 +209,215 @@ static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev, static bool nfp_bpf_tc_busy(struct nfp_app *app, struct nfp_net *nn) { - return nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF; + struct nfp_bpf_vnic *bv = nn->app_priv; + + return !!bv->tc_prog; +} + +static int +nfp_bpf_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu) +{ + struct nfp_net *nn = netdev_priv(netdev); + unsigned int max_mtu; + + if (~nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) + return 0; + + max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; + if (new_mtu > max_mtu) { + nn_info(nn, "BPF offload active, MTU over %u not supported\n", + max_mtu); + return -EBUSY; + } + return 0; +} + +static int +nfp_bpf_parse_cap_adjust_head(struct nfp_app_bpf *bpf, void __iomem *value, + u32 length) +{ + struct nfp_bpf_cap_tlv_adjust_head __iomem *cap = value; + struct nfp_cpp *cpp = bpf->app->pf->cpp; + + if (length < sizeof(*cap)) { + nfp_err(cpp, "truncated adjust_head TLV: %d\n", length); + return -EINVAL; + } + + bpf->adjust_head.flags = readl(&cap->flags); + bpf->adjust_head.off_min = readl(&cap->off_min); + bpf->adjust_head.off_max = readl(&cap->off_max); + bpf->adjust_head.guaranteed_sub = readl(&cap->guaranteed_sub); + bpf->adjust_head.guaranteed_add = readl(&cap->guaranteed_add); + + if (bpf->adjust_head.off_min > bpf->adjust_head.off_max) { + nfp_err(cpp, "invalid adjust_head TLV: min > max\n"); + return -EINVAL; + } + if (!FIELD_FIT(UR_REG_IMM_MAX, bpf->adjust_head.off_min) || + !FIELD_FIT(UR_REG_IMM_MAX, bpf->adjust_head.off_max)) { + nfp_warn(cpp, "disabling adjust_head - driver expects min/max to fit in as immediates\n"); + memset(&bpf->adjust_head, 0, sizeof(bpf->adjust_head)); + return 0; + } + + return 0; +} + +static int +nfp_bpf_parse_cap_func(struct nfp_app_bpf *bpf, void __iomem *value, u32 length) +{ + struct nfp_bpf_cap_tlv_func __iomem *cap = value; + + if (length < sizeof(*cap)) { + nfp_err(bpf->app->cpp, "truncated function TLV: %d\n", length); + return -EINVAL; + } + + switch (readl(&cap->func_id)) { + case BPF_FUNC_map_lookup_elem: + bpf->helpers.map_lookup = readl(&cap->func_addr); + break; + } + + return 0; +} + +static int +nfp_bpf_parse_cap_maps(struct nfp_app_bpf *bpf, void __iomem *value, u32 length) +{ + struct nfp_bpf_cap_tlv_maps __iomem *cap = value; + + if (length < sizeof(*cap)) { + nfp_err(bpf->app->cpp, "truncated maps TLV: %d\n", length); + return -EINVAL; + } + + bpf->maps.types = readl(&cap->types); + bpf->maps.max_maps = readl(&cap->max_maps); + bpf->maps.max_elems = readl(&cap->max_elems); + bpf->maps.max_key_sz = readl(&cap->max_key_sz); + bpf->maps.max_val_sz = readl(&cap->max_val_sz); + bpf->maps.max_elem_sz = readl(&cap->max_elem_sz); + + return 0; +} + +static int nfp_bpf_parse_capabilities(struct nfp_app *app) +{ + struct nfp_cpp *cpp = app->pf->cpp; + struct nfp_cpp_area *area; + u8 __iomem *mem, *start; + + mem = nfp_rtsym_map(app->pf->rtbl, "_abi_bpf_capabilities", "bpf.cap", + 8, &area); + if (IS_ERR(mem)) + return PTR_ERR(mem) == -ENOENT ? 0 : PTR_ERR(mem); + + start = mem; + while (mem - start + 8 < nfp_cpp_area_size(area)) { + u8 __iomem *value; + u32 type, length; + + type = readl(mem); + length = readl(mem + 4); + value = mem + 8; + + mem += 8 + length; + if (mem - start > nfp_cpp_area_size(area)) + goto err_release_free; + + switch (type) { + case NFP_BPF_CAP_TYPE_FUNC: + if (nfp_bpf_parse_cap_func(app->priv, value, length)) + goto err_release_free; + break; + case NFP_BPF_CAP_TYPE_ADJUST_HEAD: + if (nfp_bpf_parse_cap_adjust_head(app->priv, value, + length)) + goto err_release_free; + break; + case NFP_BPF_CAP_TYPE_MAPS: + if (nfp_bpf_parse_cap_maps(app->priv, value, length)) + goto err_release_free; + break; + default: + nfp_dbg(cpp, "unknown BPF capability: %d\n", type); + break; + } + } + if (mem - start != nfp_cpp_area_size(area)) { + nfp_err(cpp, "BPF capabilities left after parsing, parsed:%zd total length:%zu\n", + mem - start, nfp_cpp_area_size(area)); + goto err_release_free; + } + + nfp_cpp_area_release_free(area); + + return 0; + +err_release_free: + nfp_err(cpp, "invalid BPF capabilities at offset:%zd\n", mem - start); + nfp_cpp_area_release_free(area); + return -EINVAL; +} + +static int nfp_bpf_init(struct nfp_app *app) +{ + struct nfp_app_bpf *bpf; + int err; + + bpf = kzalloc(sizeof(*bpf), GFP_KERNEL); + if (!bpf) + return -ENOMEM; + bpf->app = app; + app->priv = bpf; + + skb_queue_head_init(&bpf->cmsg_replies); + init_waitqueue_head(&bpf->cmsg_wq); + INIT_LIST_HEAD(&bpf->map_list); + + err = nfp_bpf_parse_capabilities(app); + if (err) + goto err_free_bpf; + + return 0; + +err_free_bpf: + kfree(bpf); + return err; +} + +static void nfp_bpf_clean(struct nfp_app *app) +{ + struct nfp_app_bpf *bpf = app->priv; + + WARN_ON(!skb_queue_empty(&bpf->cmsg_replies)); + WARN_ON(!list_empty(&bpf->map_list)); + WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use); + kfree(bpf); } const struct nfp_app_type app_bpf = { .id = NFP_APP_BPF_NIC, .name = "ebpf", + .ctrl_cap_mask = 0, + + .init = nfp_bpf_init, + .clean = nfp_bpf_clean, + + .change_mtu = nfp_bpf_change_mtu, + .extra_cap = nfp_bpf_extra_cap, .vnic_alloc = nfp_bpf_vnic_alloc, .vnic_free = nfp_bpf_vnic_free, + .ctrl_msg_rx = nfp_bpf_ctrl_msg_rx, + .setup_tc = nfp_bpf_setup_tc, .tc_busy = nfp_bpf_tc_busy, + .bpf = nfp_ndo_bpf, .xdp_offload = nfp_bpf_xdp_offload, - - .bpf_verifier_prep = nfp_bpf_verifier_prep, - .bpf_translate = nfp_bpf_translate, - .bpf_destroy = nfp_bpf_destroy, }; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 57b6043177a3..424fe8338105 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2016 Netronome Systems, Inc. + * Copyright (C) 2016-2017 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -37,22 +37,40 @@ #include <linux/bitfield.h> #include <linux/bpf.h> #include <linux/bpf_verifier.h> +#include <linux/kernel.h> #include <linux/list.h> +#include <linux/skbuff.h> #include <linux/types.h> +#include <linux/wait.h> #include "../nfp_asm.h" +#include "fw.h" -/* For branch fixup logic use up-most byte of branch instruction as scratch +/* For relocation logic use up-most byte of branch instruction as scratch * area. Remember to clear this before sending instructions to HW! */ -#define OP_BR_SPECIAL 0xff00000000000000ULL - -enum br_special { - OP_BR_NORMAL = 0, - OP_BR_GO_OUT, - OP_BR_GO_ABORT, +#define OP_RELO_TYPE 0xff00000000000000ULL + +enum nfp_relo_type { + RELO_NONE = 0, + /* standard internal jumps */ + RELO_BR_REL, + /* internal jumps to parts of the outro */ + RELO_BR_GO_OUT, + RELO_BR_GO_ABORT, + /* external jumps to fixed addresses */ + RELO_BR_NEXT_PKT, + RELO_BR_HELPER, + /* immediate relocation against load address */ + RELO_IMMED_REL, }; +/* To make absolute relocated branches (branches other than RELO_BR_REL) + * distinguishable in user space dumps from normal jumps, add a large offset + * to them. + */ +#define BR_OFF_RELO 15000 + enum static_regs { STATIC_REG_IMM = 21, /* Bank AB */ STATIC_REG_STACK = 22, /* Bank A */ @@ -78,6 +96,89 @@ enum pkt_vec { #define NFP_BPF_ABI_FLAGS reg_imm(0) #define NFP_BPF_ABI_FLAG_MARK 1 +/** + * struct nfp_app_bpf - bpf app priv structure + * @app: backpointer to the app + * + * @tag_allocator: bitmap of control message tags in use + * @tag_alloc_next: next tag bit to allocate + * @tag_alloc_last: next tag bit to be freed + * + * @cmsg_replies: received cmsg replies waiting to be consumed + * @cmsg_wq: work queue for waiting for cmsg replies + * + * @map_list: list of offloaded maps + * @maps_in_use: number of currently offloaded maps + * @map_elems_in_use: number of elements allocated to offloaded maps + * + * @adjust_head: adjust head capability + * @flags: extra flags for adjust head + * @off_min: minimal packet offset within buffer required + * @off_max: maximum packet offset within buffer required + * @guaranteed_sub: amount of negative adjustment guaranteed possible + * @guaranteed_add: amount of positive adjustment guaranteed possible + * + * @maps: map capability + * @types: supported map types + * @max_maps: max number of maps supported + * @max_elems: max number of entries in each map + * @max_key_sz: max size of map key + * @max_val_sz: max size of map value + * @max_elem_sz: max size of map entry (key + value) + * + * @helpers: helper addressess for various calls + * @map_lookup: map lookup helper address + */ +struct nfp_app_bpf { + struct nfp_app *app; + + DECLARE_BITMAP(tag_allocator, U16_MAX + 1); + u16 tag_alloc_next; + u16 tag_alloc_last; + + struct sk_buff_head cmsg_replies; + struct wait_queue_head cmsg_wq; + + struct list_head map_list; + unsigned int maps_in_use; + unsigned int map_elems_in_use; + + struct nfp_bpf_cap_adjust_head { + u32 flags; + int off_min; + int off_max; + int guaranteed_sub; + int guaranteed_add; + } adjust_head; + + struct { + u32 types; + u32 max_maps; + u32 max_elems; + u32 max_key_sz; + u32 max_val_sz; + u32 max_elem_sz; + } maps; + + struct { + u32 map_lookup; + } helpers; +}; + +/** + * struct nfp_bpf_map - private per-map data attached to BPF maps for offload + * @offmap: pointer to the offloaded BPF map + * @bpf: back pointer to bpf app private structure + * @tid: table id identifying map on datapath + * @l: link on the nfp_app_bpf->map_list list + */ +struct nfp_bpf_map { + struct bpf_offloaded_map *offmap; + struct nfp_app_bpf *bpf; + u32 tid; + struct list_head l; +}; + struct nfp_prog; struct nfp_insn_meta; typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *); @@ -89,23 +190,47 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *); #define nfp_meta_next(meta) list_next_entry(meta, l) #define nfp_meta_prev(meta) list_prev_entry(meta, l) +#define FLAG_INSN_IS_JUMP_DST BIT(0) + /** * struct nfp_insn_meta - BPF instruction wrapper * @insn: BPF instruction * @ptr: pointer type for memory operations + * @ldst_gather_len: memcpy length gathered from load/store sequence + * @paired_st: the paired store insn at the head of the sequence * @ptr_not_const: pointer is not always constant + * @jmp_dst: destination info for jump instructions + * @func_id: function id for call instructions + * @arg1: arg1 for call instructions + * @arg2: arg2 for call instructions + * @arg2_var_off: arg2 changes stack offset on different paths * @off: index of first generated machine instruction (in nfp_prog.prog) * @n: eBPF instruction number + * @flags: eBPF instruction extra optimization flags * @skip: skip this instruction (optimized out) * @double_cb: callback for second part of the instruction * @l: link on nfp_prog->insns list */ struct nfp_insn_meta { struct bpf_insn insn; - struct bpf_reg_state ptr; - bool ptr_not_const; + union { + struct { + struct bpf_reg_state ptr; + struct bpf_insn *paired_st; + s16 ldst_gather_len; + bool ptr_not_const; + }; + struct nfp_insn_meta *jmp_dst; + struct { + u32 func_id; + struct bpf_reg_state arg1; + struct bpf_reg_state arg2; + bool arg2_var_off; + }; + }; unsigned int off; unsigned short n; + unsigned short flags; bool skip; instr_cb_t double_cb; @@ -134,23 +259,36 @@ static inline u8 mbpf_mode(const struct nfp_insn_meta *meta) return BPF_MODE(meta->insn.code); } +static inline bool is_mbpf_load(const struct nfp_insn_meta *meta) +{ + return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM); +} + +static inline bool is_mbpf_store(const struct nfp_insn_meta *meta) +{ + return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM); +} + /** * struct nfp_prog - nfp BPF program + * @bpf: backpointer to the bpf app priv structure * @prog: machine code * @prog_len: number of valid instructions in @prog array * @__prog_alloc_len: alloc size of @prog array * @verifier_meta: temporary storage for verifier's insn meta * @type: BPF program type - * @start_off: address of the first instruction in the memory + * @last_bpf_off: address of the last instruction translated from BPF * @tgt_out: jump target for normal exit * @tgt_abort: jump target for abort (e.g. access outside of packet buffer) - * @tgt_done: jump target to get the next packet * @n_translated: number of successfully translated instructions (for errors) * @error: error code if something went wrong * @stack_depth: max stack depth from the verifier + * @adjust_head_location: if program has single adjust head call - the insn no. * @insns: list of BPF instruction wrappers (struct nfp_insn_meta) */ struct nfp_prog { + struct nfp_app_bpf *bpf; + u64 *prog; unsigned int prog_len; unsigned int __prog_alloc_len; @@ -159,15 +297,15 @@ struct nfp_prog { enum bpf_prog_type type; - unsigned int start_off; + unsigned int last_bpf_off; unsigned int tgt_out; unsigned int tgt_abort; - unsigned int tgt_done; unsigned int n_translated; int error; unsigned int stack_depth; + unsigned int adjust_head_location; struct list_head insns; }; @@ -175,26 +313,49 @@ struct nfp_prog { /** * struct nfp_bpf_vnic - per-vNIC BPF priv structure * @tc_prog: currently loaded cls_bpf program + * @start_off: address of the first instruction in the memory + * @tgt_done: jump target to get the next packet */ struct nfp_bpf_vnic { struct bpf_prog *tc_prog; + unsigned int start_off; + unsigned int tgt_done; }; +void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt); int nfp_bpf_jit(struct nfp_prog *prog); +bool nfp_bpf_supported_opcode(u8 code); -extern const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops; +extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops; struct netdev_bpf; struct nfp_app; struct nfp_net; +int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, + struct netdev_bpf *bpf); int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, - bool old_prog); - -int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn, - struct netdev_bpf *bpf); -int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn, - struct bpf_prog *prog); -int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn, - struct bpf_prog *prog); + bool old_prog, struct netlink_ext_ack *extack); + +struct nfp_insn_meta * +nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + unsigned int insn_idx, unsigned int n_insns); + +void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv); + +long long int +nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map); +void +nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map); +int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap, + void *next_key); +int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap, + void *key, void *value, u64 flags); +int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key); +int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap, + void *key, void *value); +int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap, + void *key, void *next_key); + +void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb); #endif diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index bc879aeb62d4..0a7732385469 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2016 Netronome Systems, Inc. + * Copyright (C) 2016-2017 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -36,18 +36,23 @@ * Netronome network device driver: TC offload functions for PF and VF */ +#define pr_fmt(fmt) "NFP net bpf: " fmt + +#include <linux/bpf.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/jiffies.h> #include <linux/timer.h> #include <linux/list.h> +#include <linux/mm.h> #include <net/pkt_cls.h> #include <net/tc_act/tc_gact.h> #include <net/tc_act/tc_mirred.h> #include "main.h" +#include "../nfp_app.h" #include "../nfp_net_ctrl.h" #include "../nfp_net.h" @@ -55,11 +60,10 @@ static int nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog, unsigned int cnt) { + struct nfp_insn_meta *meta; unsigned int i; for (i = 0; i < cnt; i++) { - struct nfp_insn_meta *meta; - meta = kzalloc(sizeof(*meta), GFP_KERNEL); if (!meta) return -ENOMEM; @@ -70,6 +74,8 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog, list_add_tail(&meta->l, &nfp_prog->insns); } + nfp_bpf_jit_prepare(nfp_prog, cnt); + return 0; } @@ -84,8 +90,9 @@ static void nfp_prog_free(struct nfp_prog *nfp_prog) kfree(nfp_prog); } -int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn, - struct netdev_bpf *bpf) +static int +nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn, + struct netdev_bpf *bpf) { struct bpf_prog *prog = bpf->verifier.prog; struct nfp_prog *nfp_prog; @@ -98,6 +105,7 @@ int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn, INIT_LIST_HEAD(&nfp_prog->insns); nfp_prog->type = prog->type; + nfp_prog->bpf = app->priv; ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len); if (ret) @@ -114,12 +122,12 @@ err_free: return ret; } -int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn, - struct bpf_prog *prog) +static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog) { struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; unsigned int stack_size; unsigned int max_instr; + int err; stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64; if (prog->aux->stack_depth > stack_size) { @@ -127,50 +135,179 @@ int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn, prog->aux->stack_depth, stack_size); return -EOPNOTSUPP; } - - nfp_prog->stack_depth = prog->aux->stack_depth; - nfp_prog->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START); - nfp_prog->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE); + nfp_prog->stack_depth = round_up(prog->aux->stack_depth, 4); max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN); nfp_prog->__prog_alloc_len = max_instr * sizeof(u64); - nfp_prog->prog = kmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL); + nfp_prog->prog = kvmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL); if (!nfp_prog->prog) return -ENOMEM; - return nfp_bpf_jit(nfp_prog); + err = nfp_bpf_jit(nfp_prog); + if (err) + return err; + + prog->aux->offload->jited_len = nfp_prog->prog_len * sizeof(u64); + prog->aux->offload->jited_image = nfp_prog->prog; + + return 0; } -int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn, - struct bpf_prog *prog) +static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog) { struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; - kfree(nfp_prog->prog); + kvfree(nfp_prog->prog); nfp_prog_free(nfp_prog); return 0; } -static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog) +static int +nfp_bpf_map_get_next_key(struct bpf_offloaded_map *offmap, + void *key, void *next_key) +{ + if (!key) + return nfp_bpf_ctrl_getfirst_entry(offmap, next_key); + return nfp_bpf_ctrl_getnext_entry(offmap, key, next_key); +} + +static int +nfp_bpf_map_delete_elem(struct bpf_offloaded_map *offmap, void *key) +{ + if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY) + return -EINVAL; + return nfp_bpf_ctrl_del_entry(offmap, key); +} + +static const struct bpf_map_dev_ops nfp_bpf_map_ops = { + .map_get_next_key = nfp_bpf_map_get_next_key, + .map_lookup_elem = nfp_bpf_ctrl_lookup_entry, + .map_update_elem = nfp_bpf_ctrl_update_entry, + .map_delete_elem = nfp_bpf_map_delete_elem, +}; + +static int +nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap) +{ + struct nfp_bpf_map *nfp_map; + long long int res; + + if (!bpf->maps.types) + return -EOPNOTSUPP; + + if (offmap->map.map_flags || + offmap->map.numa_node != NUMA_NO_NODE) { + pr_info("map flags are not supported\n"); + return -EINVAL; + } + + if (!(bpf->maps.types & 1 << offmap->map.map_type)) { + pr_info("map type not supported\n"); + return -EOPNOTSUPP; + } + if (bpf->maps.max_maps == bpf->maps_in_use) { + pr_info("too many maps for a device\n"); + return -ENOMEM; + } + if (bpf->maps.max_elems - bpf->map_elems_in_use < + offmap->map.max_entries) { + pr_info("map with too many elements: %u, left: %u\n", + offmap->map.max_entries, + bpf->maps.max_elems - bpf->map_elems_in_use); + return -ENOMEM; + } + if (offmap->map.key_size > bpf->maps.max_key_sz || + offmap->map.value_size > bpf->maps.max_val_sz || + round_up(offmap->map.key_size, 8) + + round_up(offmap->map.value_size, 8) > bpf->maps.max_elem_sz) { + pr_info("elements don't fit in device constraints\n"); + return -ENOMEM; + } + + nfp_map = kzalloc(sizeof(*nfp_map), GFP_USER); + if (!nfp_map) + return -ENOMEM; + + offmap->dev_priv = nfp_map; + nfp_map->offmap = offmap; + nfp_map->bpf = bpf; + + res = nfp_bpf_ctrl_alloc_map(bpf, &offmap->map); + if (res < 0) { + kfree(nfp_map); + return res; + } + + nfp_map->tid = res; + offmap->dev_ops = &nfp_bpf_map_ops; + bpf->maps_in_use++; + bpf->map_elems_in_use += offmap->map.max_entries; + list_add_tail(&nfp_map->l, &bpf->map_list); + + return 0; +} + +static int +nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap) +{ + struct nfp_bpf_map *nfp_map = offmap->dev_priv; + + nfp_bpf_ctrl_free_map(bpf, nfp_map); + list_del_init(&nfp_map->l); + bpf->map_elems_in_use -= offmap->map.max_entries; + bpf->maps_in_use--; + kfree(nfp_map); + + return 0; +} + +int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf) +{ + switch (bpf->command) { + case BPF_OFFLOAD_VERIFIER_PREP: + return nfp_bpf_verifier_prep(app, nn, bpf); + case BPF_OFFLOAD_TRANSLATE: + return nfp_bpf_translate(nn, bpf->offload.prog); + case BPF_OFFLOAD_DESTROY: + return nfp_bpf_destroy(nn, bpf->offload.prog); + case BPF_OFFLOAD_MAP_ALLOC: + return nfp_bpf_map_alloc(app->priv, bpf->offmap); + case BPF_OFFLOAD_MAP_FREE: + return nfp_bpf_map_free(app->priv, bpf->offmap); + default: + return -EINVAL; + } +} + +static int +nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog, + struct netlink_ext_ack *extack) { struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; unsigned int max_mtu; dma_addr_t dma_addr; + void *img; int err; max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; if (max_mtu < nn->dp.netdev->mtu) { - nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n"); + NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with MTU larger than HW packet split boundary"); return -EOPNOTSUPP; } - dma_addr = dma_map_single(nn->dp.dev, nfp_prog->prog, + img = nfp_bpf_relo_for_vnic(nfp_prog, nn->app_priv); + if (IS_ERR(img)) + return PTR_ERR(img); + + dma_addr = dma_map_single(nn->dp.dev, img, nfp_prog->prog_len * sizeof(u64), DMA_TO_DEVICE); - if (dma_mapping_error(nn->dp.dev, dma_addr)) + if (dma_mapping_error(nn->dp.dev, dma_addr)) { + kfree(img); return -ENOMEM; + } nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len); nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr); @@ -178,15 +315,18 @@ static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog) /* Load up the JITed code */ err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF); if (err) - nn_err(nn, "FW command error while loading BPF: %d\n", err); + NL_SET_ERR_MSG_MOD(extack, + "FW command error while loading BPF"); dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64), DMA_TO_DEVICE); + kfree(img); return err; } -static void nfp_net_bpf_start(struct nfp_net *nn) +static void +nfp_net_bpf_start(struct nfp_net *nn, struct netlink_ext_ack *extack) { int err; @@ -195,7 +335,8 @@ static void nfp_net_bpf_start(struct nfp_net *nn) nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl); err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); if (err) - nn_err(nn, "FW command error while enabling BPF: %d\n", err); + NL_SET_ERR_MSG_MOD(extack, + "FW command error while enabling BPF"); } static int nfp_net_bpf_stop(struct nfp_net *nn) @@ -210,12 +351,12 @@ static int nfp_net_bpf_stop(struct nfp_net *nn) } int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, - bool old_prog) + bool old_prog, struct netlink_ext_ack *extack) { int err; if (prog) { - struct bpf_dev_offload *offload = prog->aux->offload; + struct bpf_prog_offload *offload = prog->aux->offload; if (!offload) return -EINVAL; @@ -228,7 +369,8 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, cap = nn_readb(nn, NFP_NET_CFG_BPF_CAP); if (!(cap & NFP_NET_BPF_CAP_RELO)) { - nn_err(nn, "FW does not support live reload\n"); + NL_SET_ERR_MSG_MOD(extack, + "FW does not support live reload"); return -EBUSY; } } @@ -240,12 +382,12 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, if (old_prog && !prog) return nfp_net_bpf_stop(nn); - err = nfp_net_bpf_load(nn, prog); + err = nfp_net_bpf_load(nn, prog, extack); if (err) return err; if (!old_prog) - nfp_net_bpf_start(nn); + nfp_net_bpf_start(nn, extack); return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index 8d43491ddd6b..479f602887e9 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2016 Netronome Systems, Inc. + * Copyright (C) 2016-2017 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -31,16 +31,18 @@ * SOFTWARE. */ -#define pr_fmt(fmt) "NFP net bpf: " fmt - #include <linux/bpf.h> #include <linux/bpf_verifier.h> #include <linux/kernel.h> #include <linux/pkt_cls.h> +#include "fw.h" #include "main.h" -static struct nfp_insn_meta * +#define pr_vlog(env, fmt, ...) \ + bpf_verifier_log_write(env, "[nfp] " fmt, ##__VA_ARGS__) + +struct nfp_insn_meta * nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, unsigned int insn_idx, unsigned int n_insns) { @@ -68,6 +70,114 @@ nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, return meta; } +static void +nfp_record_adjust_head(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog, + struct nfp_insn_meta *meta, + const struct bpf_reg_state *reg2) +{ + unsigned int location = UINT_MAX; + int imm; + + /* Datapath usually can give us guarantees on how much adjust head + * can be done without the need for any checks. Optimize the simple + * case where there is only one adjust head by a constant. + */ + if (reg2->type != SCALAR_VALUE || !tnum_is_const(reg2->var_off)) + goto exit_set_location; + imm = reg2->var_off.value; + /* Translator will skip all checks, we need to guarantee min pkt len */ + if (imm > ETH_ZLEN - ETH_HLEN) + goto exit_set_location; + if (imm > (int)bpf->adjust_head.guaranteed_add || + imm < -bpf->adjust_head.guaranteed_sub) + goto exit_set_location; + + if (nfp_prog->adjust_head_location) { + /* Only one call per program allowed */ + if (nfp_prog->adjust_head_location != meta->n) + goto exit_set_location; + + if (meta->arg2.var_off.value != imm) + goto exit_set_location; + } + + location = meta->n; +exit_set_location: + nfp_prog->adjust_head_location = location; +} + +static int +nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env, + struct nfp_insn_meta *meta) +{ + const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1; + const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2; + struct nfp_app_bpf *bpf = nfp_prog->bpf; + u32 func_id = meta->insn.imm; + s64 off, old_off; + + switch (func_id) { + case BPF_FUNC_xdp_adjust_head: + if (!bpf->adjust_head.off_max) { + pr_vlog(env, "adjust_head not supported by FW\n"); + return -EOPNOTSUPP; + } + if (!(bpf->adjust_head.flags & NFP_BPF_ADJUST_HEAD_NO_META)) { + pr_vlog(env, "adjust_head: FW requires shifting metadata, not supported by the driver\n"); + return -EOPNOTSUPP; + } + + nfp_record_adjust_head(bpf, nfp_prog, meta, reg2); + break; + + case BPF_FUNC_map_lookup_elem: + if (!bpf->helpers.map_lookup) { + pr_vlog(env, "map_lookup: not supported by FW\n"); + return -EOPNOTSUPP; + } + if (reg2->type != PTR_TO_STACK) { + pr_vlog(env, + "map_lookup: unsupported key ptr type %d\n", + reg2->type); + return -EOPNOTSUPP; + } + if (!tnum_is_const(reg2->var_off)) { + pr_vlog(env, "map_lookup: variable key pointer\n"); + return -EOPNOTSUPP; + } + + off = reg2->var_off.value + reg2->off; + if (-off % 4) { + pr_vlog(env, + "map_lookup: unaligned stack pointer %lld\n", + -off); + return -EOPNOTSUPP; + } + + /* Rest of the checks is only if we re-parse the same insn */ + if (!meta->func_id) + break; + + old_off = meta->arg2.var_off.value + meta->arg2.off; + meta->arg2_var_off |= off != old_off; + + if (meta->arg1.map_ptr != reg1->map_ptr) { + pr_vlog(env, "map_lookup: called for different map\n"); + return -EOPNOTSUPP; + } + break; + default: + pr_vlog(env, "unsupported function id: %d\n", func_id); + return -EOPNOTSUPP; + } + + meta->func_id = func_id; + meta->arg1 = *reg1; + meta->arg2 = *reg2; + + return 0; +} + static int nfp_bpf_check_exit(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env) @@ -82,7 +192,7 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog, char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg0->var_off); - pr_info("unsupported exit state: %d, var_off: %s\n", + pr_vlog(env, "unsupported exit state: %d, var_off: %s\n", reg0->type, tn_buf); return -EINVAL; } @@ -92,7 +202,7 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog, imm <= TC_ACT_REDIRECT && imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN && imm != TC_ACT_QUEUED) { - pr_info("unsupported exit state: %d, imm: %llx\n", + pr_vlog(env, "unsupported exit state: %d, imm: %llx\n", reg0->type, imm); return -EINVAL; } @@ -103,12 +213,13 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog, static int nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, - const struct bpf_reg_state *reg) + const struct bpf_reg_state *reg, + struct bpf_verifier_env *env) { s32 old_off, new_off; if (!tnum_is_const(reg->var_off)) { - pr_info("variable ptr stack access\n"); + pr_vlog(env, "variable ptr stack access\n"); return -EINVAL; } @@ -126,7 +237,7 @@ nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog, if (old_off % 4 == new_off % 4) return 0; - pr_info("stack access changed location was:%d is:%d\n", + pr_vlog(env, "stack access changed location was:%d is:%d\n", old_off, new_off); return -EINVAL; } @@ -140,19 +251,27 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, if (reg->type != PTR_TO_CTX && reg->type != PTR_TO_STACK && + reg->type != PTR_TO_MAP_VALUE && reg->type != PTR_TO_PACKET) { - pr_info("unsupported ptr type: %d\n", reg->type); + pr_vlog(env, "unsupported ptr type: %d\n", reg->type); return -EINVAL; } if (reg->type == PTR_TO_STACK) { - err = nfp_bpf_check_stack_access(nfp_prog, meta, reg); + err = nfp_bpf_check_stack_access(nfp_prog, meta, reg, env); if (err) return err; } + if (reg->type == PTR_TO_MAP_VALUE) { + if (is_mbpf_store(meta)) { + pr_vlog(env, "map writes not supported\n"); + return -EOPNOTSUPP; + } + } + if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) { - pr_info("ptr type changed for instruction %d -> %d\n", + pr_vlog(env, "ptr type changed for instruction %d -> %d\n", meta->ptr.type, reg->type); return -EINVAL; } @@ -171,25 +290,33 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx, env->prog->len); nfp_prog->verifier_meta = meta; + if (!nfp_bpf_supported_opcode(meta->insn.code)) { + pr_vlog(env, "instruction %#02x not supported\n", + meta->insn.code); + return -EINVAL; + } + if (meta->insn.src_reg >= MAX_BPF_REG || meta->insn.dst_reg >= MAX_BPF_REG) { - pr_err("program uses extended registers - jit hardening?\n"); + pr_vlog(env, "program uses extended registers - jit hardening?\n"); return -EINVAL; } + if (meta->insn.code == (BPF_JMP | BPF_CALL)) + return nfp_bpf_check_call(nfp_prog, env, meta); if (meta->insn.code == (BPF_JMP | BPF_EXIT)) return nfp_bpf_check_exit(nfp_prog, env); - if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM)) + if (is_mbpf_load(meta)) return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.src_reg); - if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM)) + if (is_mbpf_store(meta)) return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg); return 0; } -const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = { +const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = { .insn_hook = nfp_verify_insn, }; diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index c1c595f8bb87..b3567a596fc1 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -81,6 +81,9 @@ static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev, if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan")) return tun_type == NFP_FL_TUNNEL_VXLAN; + if (!strcmp(out_dev->rtnl_link_ops->kind, "geneve")) + return tun_type == NFP_FL_TUNNEL_GENEVE; + return false; } @@ -93,13 +96,11 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action, size_t act_size = sizeof(struct nfp_fl_output); struct net_device *out_dev; u16 tmp_flags; - int ifindex; output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT; output->head.len_lw = act_size >> NFP_FL_LW_SIZ; - ifindex = tcf_mirred_ifindex(action); - out_dev = __dev_get_by_index(dev_net(in_dev), ifindex); + out_dev = tcf_mirred_dev(action); if (!out_dev) return -EOPNOTSUPP; @@ -138,11 +139,23 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action, return 0; } -static bool nfp_fl_supported_tun_port(const struct tc_action *action) +static enum nfp_flower_tun_type +nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app, + const struct tc_action *action) { struct ip_tunnel_info *tun = tcf_tunnel_info(action); - - return tun->key.tp_dst == htons(NFP_FL_VXLAN_PORT); + struct nfp_flower_priv *priv = app->priv; + + switch (tun->key.tp_dst) { + case htons(NFP_FL_VXLAN_PORT): + return NFP_FL_TUNNEL_VXLAN; + case htons(NFP_FL_GENEVE_PORT): + if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE) + return NFP_FL_TUNNEL_GENEVE; + /* FALLTHROUGH */ + default: + return NFP_FL_TUNNEL_NONE; + } } static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len) @@ -167,38 +180,33 @@ static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len) } static int -nfp_fl_set_vxlan(struct nfp_fl_set_vxlan *set_vxlan, - const struct tc_action *action, - struct nfp_fl_pre_tunnel *pre_tun) +nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun, + const struct tc_action *action, + struct nfp_fl_pre_tunnel *pre_tun, + enum nfp_flower_tun_type tun_type) { - struct ip_tunnel_info *vxlan = tcf_tunnel_info(action); - size_t act_size = sizeof(struct nfp_fl_set_vxlan); - u32 tmp_set_vxlan_type_index = 0; + size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun); + struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action); + u32 tmp_set_ip_tun_type_index = 0; /* Currently support one pre-tunnel so index is always 0. */ int pretun_idx = 0; - if (vxlan->options_len) { - /* Do not support options e.g. vxlan gpe. */ + if (ip_tun->options_len) return -EOPNOTSUPP; - } - set_vxlan->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL; - set_vxlan->head.len_lw = act_size >> NFP_FL_LW_SIZ; + set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL; + set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ; /* Set tunnel type and pre-tunnel index. */ - tmp_set_vxlan_type_index |= - FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, NFP_FL_TUNNEL_VXLAN) | + tmp_set_ip_tun_type_index |= + FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) | FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx); - set_vxlan->tun_type_index = cpu_to_be32(tmp_set_vxlan_type_index); - - set_vxlan->tun_id = vxlan->key.tun_id; - set_vxlan->tun_flags = vxlan->key.tun_flags; - set_vxlan->ipv4_ttl = vxlan->key.ttl; - set_vxlan->ipv4_tos = vxlan->key.tos; + set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index); + set_tun->tun_id = ip_tun->key.tun_id; /* Complete pre_tunnel action. */ - pre_tun->ipv4_dst = vxlan->key.u.ipv4.dst; + pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst; return 0; } @@ -435,8 +443,8 @@ nfp_flower_loop_action(const struct tc_action *a, struct net_device *netdev, enum nfp_flower_tun_type *tun_type, int *tun_out_cnt) { + struct nfp_fl_set_ipv4_udp_tun *set_tun; struct nfp_fl_pre_tunnel *pre_tun; - struct nfp_fl_set_vxlan *s_vxl; struct nfp_fl_push_vlan *psh_v; struct nfp_fl_pop_vlan *pop_v; struct nfp_fl_output *output; @@ -484,26 +492,29 @@ nfp_flower_loop_action(const struct tc_action *a, nfp_fl_push_vlan(psh_v, a); *a_len += sizeof(struct nfp_fl_push_vlan); - } else if (is_tcf_tunnel_set(a) && nfp_fl_supported_tun_port(a)) { + } else if (is_tcf_tunnel_set(a)) { + struct nfp_repr *repr = netdev_priv(netdev); + *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a); + if (*tun_type == NFP_FL_TUNNEL_NONE) + return -EOPNOTSUPP; + /* Pre-tunnel action is required for tunnel encap. * This checks for next hop entries on NFP. * If none, the packet falls back before applying other actions. */ if (*a_len + sizeof(struct nfp_fl_pre_tunnel) + - sizeof(struct nfp_fl_set_vxlan) > NFP_FL_MAX_A_SIZ) + sizeof(struct nfp_fl_set_ipv4_udp_tun) > NFP_FL_MAX_A_SIZ) return -EOPNOTSUPP; - *tun_type = NFP_FL_TUNNEL_VXLAN; pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len); nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); *a_len += sizeof(struct nfp_fl_pre_tunnel); - s_vxl = (struct nfp_fl_set_vxlan *)&nfp_fl->action_data[*a_len]; - err = nfp_fl_set_vxlan(s_vxl, a, pre_tun); + set_tun = (void *)&nfp_fl->action_data[*a_len]; + err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type); if (err) return err; - - *a_len += sizeof(struct nfp_fl_set_vxlan); + *a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun); } else if (is_tcf_tunnel_release(a)) { /* Tunnel decap is handled by default so accept action. */ return 0; diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index e98bb9cdb6a3..baaea6f1a9d8 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -125,6 +125,27 @@ int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok) return 0; } +int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists) +{ + struct nfp_flower_cmsg_portreify *msg; + struct sk_buff *skb; + + skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg), + NFP_FLOWER_CMSG_TYPE_PORT_REIFY, + GFP_KERNEL); + if (!skb) + return -ENOMEM; + + msg = nfp_flower_cmsg_get_data(skb); + msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id); + msg->reserved = 0; + msg->info = cpu_to_be16(exists); + + nfp_ctrl_tx(repr->app->ctrl, skb); + + return 0; +} + static void nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb) { @@ -161,6 +182,28 @@ nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb) } static void +nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb) +{ + struct nfp_flower_priv *priv = app->priv; + struct nfp_flower_cmsg_portreify *msg; + bool exists; + + msg = nfp_flower_cmsg_get_data(skb); + + rcu_read_lock(); + exists = !!nfp_app_repr_get(app, be32_to_cpu(msg->portnum)); + rcu_read_unlock(); + if (!exists) { + nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n", + be32_to_cpu(msg->portnum)); + return; + } + + atomic_inc(&priv->reify_replies); + wake_up_interruptible(&priv->reify_wait_queue); +} + +static void nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) { struct nfp_flower_cmsg_hdr *cmsg_hdr; @@ -168,20 +211,14 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) cmsg_hdr = nfp_flower_cmsg_get_hdr(skb); - if (unlikely(cmsg_hdr->version != NFP_FLOWER_CMSG_VER1)) { - nfp_flower_cmsg_warn(app, "Cannot handle repr control version %u\n", - cmsg_hdr->version); - goto out; - } - type = cmsg_hdr->type; switch (type) { + case NFP_FLOWER_CMSG_TYPE_PORT_REIFY: + nfp_flower_cmsg_portreify_rx(app, skb); + break; case NFP_FLOWER_CMSG_TYPE_PORT_MOD: nfp_flower_cmsg_portmod_rx(app, skb); break; - case NFP_FLOWER_CMSG_TYPE_FLOW_STATS: - nfp_flower_rx_flow_stats(app, skb); - break; case NFP_FLOWER_CMSG_TYPE_NO_NEIGH: nfp_tunnel_request_route(app, skb); break; @@ -217,7 +254,23 @@ void nfp_flower_cmsg_process_rx(struct work_struct *work) void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb) { struct nfp_flower_priv *priv = app->priv; + struct nfp_flower_cmsg_hdr *cmsg_hdr; + + cmsg_hdr = nfp_flower_cmsg_get_hdr(skb); - skb_queue_tail(&priv->cmsg_skbs, skb); - schedule_work(&priv->cmsg_work); + if (unlikely(cmsg_hdr->version != NFP_FLOWER_CMSG_VER1)) { + nfp_flower_cmsg_warn(app, "Cannot handle repr control version %u\n", + cmsg_hdr->version); + dev_kfree_skb_any(skb); + return; + } + + if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_FLOW_STATS) { + /* We need to deal with stats updates from HW asap */ + nfp_flower_rx_flow_stats(app, skb); + dev_consume_skb_any(skb); + } else { + skb_queue_tail(&priv->cmsg_skbs, skb); + schedule_work(&priv->cmsg_work); + } } diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 66070741d55f..adfe474c2cf0 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -41,7 +41,7 @@ #include "../nfp_app.h" #include "../nfpcore/nfp_cpp.h" -#define NFP_FLOWER_LAYER_META BIT(0) +#define NFP_FLOWER_LAYER_EXT_META BIT(0) #define NFP_FLOWER_LAYER_PORT BIT(1) #define NFP_FLOWER_LAYER_MAC BIT(2) #define NFP_FLOWER_LAYER_TP BIT(3) @@ -50,8 +50,7 @@ #define NFP_FLOWER_LAYER_CT BIT(6) #define NFP_FLOWER_LAYER_VXLAN BIT(7) -#define NFP_FLOWER_LAYER_ETHER BIT(3) -#define NFP_FLOWER_LAYER_ARP BIT(4) +#define NFP_FLOWER_LAYER2_GENEVE BIT(5) #define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13) #define NFP_FLOWER_MASK_VLAN_CFI BIT(12) @@ -108,6 +107,7 @@ enum nfp_flower_tun_type { NFP_FL_TUNNEL_NONE = 0, NFP_FL_TUNNEL_VXLAN = 2, + NFP_FL_TUNNEL_GENEVE = 4, }; struct nfp_fl_act_head { @@ -165,20 +165,6 @@ struct nfp_fl_pop_vlan { __be16 reserved; }; -/* Metadata without L2 (1W/4B) - * ---------------------------------------------------------------- - * 3 2 1 - * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 - * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * | key_layers | mask_id | reserved | - * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - */ -struct nfp_flower_meta_one { - u8 nfp_flow_key_layer; - u8 mask_id; - u16 reserved; -}; - struct nfp_fl_pre_tunnel { struct nfp_fl_act_head head; __be16 reserved; @@ -187,16 +173,13 @@ struct nfp_fl_pre_tunnel { __be32 extra[3]; }; -struct nfp_fl_set_vxlan { +struct nfp_fl_set_ipv4_udp_tun { struct nfp_fl_act_head head; __be16 reserved; - __be64 tun_id; + __be64 tun_id __packed; __be32 tun_type_index; - __be16 tun_flags; - u8 ipv4_ttl; - u8 ipv4_tos; - __be32 extra[2]; -} __packed; + __be32 extra[3]; +}; /* Metadata with L2 (1W/4B) * ---------------------------------------------------------------- @@ -209,12 +192,24 @@ struct nfp_fl_set_vxlan { * NOTE: | TCI | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ -struct nfp_flower_meta_two { +struct nfp_flower_meta_tci { u8 nfp_flow_key_layer; u8 mask_id; __be16 tci; }; +/* Extended metadata for additional key_layers (1W/4B) + * ---------------------------------------------------------------- + * 3 2 1 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | nfp_flow_key_layer2 | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ +struct nfp_flower_ext_meta { + __be32 nfp_flow_key_layer2; +}; + /* Port details (1W/4B) * ---------------------------------------------------------------- * 3 2 1 @@ -313,7 +308,7 @@ struct nfp_flower_ipv6 { struct in6_addr ipv6_dst; }; -/* Flow Frame VXLAN --> Tunnel details (4W/16B) +/* Flow Frame IPv4 UDP TUNNEL --> Tunnel details (4W/16B) * ----------------------------------------------------------------- * 3 2 1 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 @@ -322,22 +317,17 @@ struct nfp_flower_ipv6 { * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ipv4_addr_dst | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * | tun_flags | tos | ttl | + * | Reserved | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * | gpe_flags | Reserved | Next Protocol | + * | Reserved | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | VNI | Reserved | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ -struct nfp_flower_vxlan { +struct nfp_flower_ipv4_udp_tun { __be32 ip_src; __be32 ip_dst; - __be16 tun_flags; - u8 tos; - u8 ttl; - u8 gpe_flags; - u8 reserved[2]; - u8 nxt_proto; + __be32 reserved[2]; __be32 tun_id; }; @@ -360,6 +350,7 @@ struct nfp_flower_cmsg_hdr { enum nfp_flower_cmsg_type_port { NFP_FLOWER_CMSG_TYPE_FLOW_ADD = 0, NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2, + NFP_FLOWER_CMSG_TYPE_PORT_REIFY = 6, NFP_FLOWER_CMSG_TYPE_MAC_REPR = 7, NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8, NFP_FLOWER_CMSG_TYPE_NO_NEIGH = 10, @@ -396,6 +387,15 @@ struct nfp_flower_cmsg_portmod { #define NFP_FLOWER_CMSG_PORTMOD_INFO_LINK BIT(0) +/* NFP_FLOWER_CMSG_TYPE_PORT_REIFY */ +struct nfp_flower_cmsg_portreify { + __be32 portnum; + u16 reserved; + __be16 info; +}; + +#define NFP_FLOWER_CMSG_PORTREIFY_INFO_EXIST BIT(0) + enum nfp_flower_cmsg_port_type { NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC = 0x0, NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT = 0x1, @@ -454,6 +454,7 @@ nfp_flower_cmsg_mac_repr_add(struct sk_buff *skb, unsigned int idx, unsigned int nbi, unsigned int nbi_port, unsigned int phys_port); int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok); +int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists); void nfp_flower_cmsg_process_rx(struct work_struct *work); void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb); struct sk_buff * diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 8fcc90c0d2d3..742d6f1575b5 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -32,6 +32,7 @@ */ #include <linux/etherdevice.h> +#include <linux/lockdep.h> #include <linux/pci.h> #include <linux/skbuff.h> #include <linux/vmalloc.h> @@ -98,7 +99,57 @@ nfp_flower_repr_get(struct nfp_app *app, u32 port_id) if (port >= reprs->num_reprs) return NULL; - return reprs->reprs[port]; + return rcu_dereference(reprs->reprs[port]); +} + +static int +nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type, + bool exists) +{ + struct nfp_reprs *reprs; + int i, err, count = 0; + + reprs = rcu_dereference_protected(app->reprs[type], + lockdep_is_held(&app->pf->lock)); + if (!reprs) + return 0; + + for (i = 0; i < reprs->num_reprs; i++) { + struct net_device *netdev; + + netdev = nfp_repr_get_locked(app, reprs, i); + if (netdev) { + struct nfp_repr *repr = netdev_priv(netdev); + + err = nfp_flower_cmsg_portreify(repr, exists); + if (err) + return err; + count++; + } + } + + return count; +} + +static int +nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl) +{ + struct nfp_flower_priv *priv = app->priv; + int err; + + if (!tot_repl) + return 0; + + lockdep_assert_held(&app->pf->lock); + err = wait_event_interruptible_timeout(priv->reify_wait_queue, + atomic_read(replies) >= tot_repl, + msecs_to_jiffies(10)); + if (err <= 0) { + nfp_warn(app->cpp, "Not all reprs responded to reify\n"); + return -EIO; + } + + return 0; } static int @@ -110,7 +161,6 @@ nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr) if (err) return err; - netif_carrier_on(repr->netdev); netif_tx_wake_all_queues(repr->netdev); return 0; @@ -119,7 +169,6 @@ nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr) static int nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr) { - netif_carrier_off(repr->netdev); netif_tx_disable(repr->netdev); return nfp_flower_cmsg_portmod(repr, false); @@ -140,6 +189,24 @@ nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev) netdev_priv(netdev)); } +static void +nfp_flower_repr_netdev_preclean(struct nfp_app *app, struct net_device *netdev) +{ + struct nfp_repr *repr = netdev_priv(netdev); + struct nfp_flower_priv *priv = app->priv; + atomic_t *replies = &priv->reify_replies; + int err; + + atomic_set(replies, 0); + err = nfp_flower_cmsg_portreify(repr, false); + if (err) { + nfp_warn(app->cpp, "Failed to notify firmware about repr destruction\n"); + return; + } + + nfp_flower_wait_repr_reify(app, replies, 1); +} + static void nfp_flower_sriov_disable(struct nfp_app *app) { struct nfp_flower_priv *priv = app->priv; @@ -157,10 +224,11 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, { u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp); struct nfp_flower_priv *priv = app->priv; + atomic_t *replies = &priv->reify_replies; enum nfp_port_type port_type; struct nfp_reprs *reprs; + int i, err, reify_cnt; const u8 queue = 0; - int i, err; port_type = repr_type == NFP_REPR_TYPE_PF ? NFP_PORT_PF_PORT : NFP_PORT_VF_PORT; @@ -170,19 +238,21 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, return -ENOMEM; for (i = 0; i < cnt; i++) { + struct net_device *repr; struct nfp_port *port; u32 port_id; - reprs->reprs[i] = nfp_repr_alloc(app); - if (!reprs->reprs[i]) { + repr = nfp_repr_alloc(app); + if (!repr) { err = -ENOMEM; goto err_reprs_clean; } + RCU_INIT_POINTER(reprs->reprs[i], repr); /* For now we only support 1 PF */ WARN_ON(repr_type == NFP_REPR_TYPE_PF && i); - port = nfp_port_alloc(app, port_type, reprs->reprs[i]); + port = nfp_port_alloc(app, port_type, repr); if (repr_type == NFP_REPR_TYPE_PF) { port->pf_id = i; port->vnic = priv->nn->dp.ctrl_bar; @@ -193,11 +263,11 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ; } - eth_hw_addr_random(reprs->reprs[i]); + eth_hw_addr_random(repr); port_id = nfp_flower_cmsg_pcie_port(nfp_pcie, vnic_type, i, queue); - err = nfp_repr_init(app, reprs->reprs[i], + err = nfp_repr_init(app, repr, port_id, port, priv->nn->dp.netdev); if (err) { nfp_port_free(port); @@ -206,14 +276,28 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, nfp_info(app->cpp, "%s%d Representor(%s) created\n", repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i, - reprs->reprs[i]->name); + repr->name); } nfp_app_reprs_set(app, repr_type, reprs); + atomic_set(replies, 0); + reify_cnt = nfp_flower_reprs_reify(app, repr_type, true); + if (reify_cnt < 0) { + err = reify_cnt; + nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n"); + goto err_reprs_remove; + } + + err = nfp_flower_wait_repr_reify(app, replies, reify_cnt); + if (err) + goto err_reprs_remove; + return 0; +err_reprs_remove: + reprs = nfp_app_reprs_set(app, repr_type, NULL); err_reprs_clean: - nfp_reprs_clean_and_free(reprs); + nfp_reprs_clean_and_free(app, reprs); return err; } @@ -233,10 +317,11 @@ static int nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) { struct nfp_eth_table *eth_tbl = app->pf->eth_tbl; + atomic_t *replies = &priv->reify_replies; struct sk_buff *ctrl_skb; struct nfp_reprs *reprs; + int err, reify_cnt; unsigned int i; - int err; ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count); if (!ctrl_skb) @@ -250,17 +335,18 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) for (i = 0; i < eth_tbl->count; i++) { unsigned int phys_port = eth_tbl->ports[i].index; + struct net_device *repr; struct nfp_port *port; u32 cmsg_port_id; - reprs->reprs[phys_port] = nfp_repr_alloc(app); - if (!reprs->reprs[phys_port]) { + repr = nfp_repr_alloc(app); + if (!repr) { err = -ENOMEM; goto err_reprs_clean; } + RCU_INIT_POINTER(reprs->reprs[phys_port], repr); - port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, - reprs->reprs[phys_port]); + port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr); if (IS_ERR(port)) { err = PTR_ERR(port); goto err_reprs_clean; @@ -271,11 +357,11 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) goto err_reprs_clean; } - SET_NETDEV_DEV(reprs->reprs[phys_port], &priv->nn->pdev->dev); + SET_NETDEV_DEV(repr, &priv->nn->pdev->dev); nfp_net_get_mac_addr(app->pf, port); cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port); - err = nfp_repr_init(app, reprs->reprs[phys_port], + err = nfp_repr_init(app, repr, cmsg_port_id, port, priv->nn->dp.netdev); if (err) { nfp_port_free(port); @@ -288,23 +374,37 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) phys_port); nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n", - phys_port, reprs->reprs[phys_port]->name); + phys_port, repr->name); } nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs); - /* The MAC_REPR control message should be sent after the MAC + /* The REIFY/MAC_REPR control messages should be sent after the MAC * representors are registered using nfp_app_reprs_set(). This is * because the firmware may respond with control messages for the * MAC representors, f.e. to provide the driver with information * about their state, and without registration the driver will drop * any such messages. */ + atomic_set(replies, 0); + reify_cnt = nfp_flower_reprs_reify(app, NFP_REPR_TYPE_PHYS_PORT, true); + if (reify_cnt < 0) { + err = reify_cnt; + nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n"); + goto err_reprs_remove; + } + + err = nfp_flower_wait_repr_reify(app, replies, reify_cnt); + if (err) + goto err_reprs_remove; + nfp_ctrl_tx(app->ctrl, ctrl_skb); return 0; +err_reprs_remove: + reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, NULL); err_reprs_clean: - nfp_reprs_clean_and_free(reprs); + nfp_reprs_clean_and_free(app, reprs); err_free_ctrl_skb: kfree_skb(ctrl_skb); return err; @@ -381,7 +481,7 @@ static int nfp_flower_init(struct nfp_app *app) { const struct nfp_pf *pf = app->pf; struct nfp_flower_priv *app_priv; - u64 version; + u64 version, features; int err; if (!pf->eth_tbl) { @@ -419,11 +519,20 @@ static int nfp_flower_init(struct nfp_app *app) app_priv->app = app; skb_queue_head_init(&app_priv->cmsg_skbs); INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx); + init_waitqueue_head(&app_priv->reify_wait_queue); err = nfp_flower_metadata_init(app); if (err) goto err_free_app_priv; + /* Extract the extra features supported by the firmware. */ + features = nfp_rtsym_read_le(app->pf->rtbl, + "_abi_flower_extra_features", &err); + if (err) + app_priv->flower_ext_feats = 0; + else + app_priv->flower_ext_feats = features; + return 0; err_free_app_priv: @@ -456,6 +565,8 @@ static void nfp_flower_stop(struct nfp_app *app) const struct nfp_app_type app_flower = { .id = NFP_APP_FLOWER_NIC, .name = "flower", + + .ctrl_cap_mask = ~0U, .ctrl_has_meta = true, .extra_cap = nfp_flower_extra_cap, @@ -468,6 +579,7 @@ const struct nfp_app_type app_flower = { .vnic_clean = nfp_flower_vnic_clean, .repr_init = nfp_flower_repr_netdev_init, + .repr_preclean = nfp_flower_repr_netdev_preclean, .repr_clean = nfp_flower_repr_netdev_clean, .repr_open = nfp_flower_repr_netdev_open, diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index e6b26c5ae6e0..332ff0fdc038 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -34,6 +34,8 @@ #ifndef __NFP_FLOWER_H__ #define __NFP_FLOWER_H__ 1 +#include "cmsg.h" + #include <linux/circ_buf.h> #include <linux/hashtable.h> #include <linux/time64.h> @@ -58,6 +60,10 @@ struct nfp_app; #define NFP_FL_MASK_ID_LOCATION 1 #define NFP_FL_VXLAN_PORT 4789 +#define NFP_FL_GENEVE_PORT 6081 + +/* Extra features bitmap. */ +#define NFP_FL_FEATS_GENEVE BIT(0) struct nfp_fl_mask_id { struct circ_buf mask_id_free_list; @@ -77,6 +83,7 @@ struct nfp_fl_stats_id { * @nn: Pointer to vNIC * @mask_id_seed: Seed used for mask hash table * @flower_version: HW version of flower + * @flower_ext_feats: Bitmap of extra features the HW supports * @stats_ids: List of free stats ids * @mask_ids: List of free mask ids * @mask_table: Hash table used to store masks @@ -95,12 +102,16 @@ struct nfp_fl_stats_id { * @nfp_mac_off_count: Number of MACs in address list * @nfp_tun_mac_nb: Notifier to monitor link state * @nfp_tun_neigh_nb: Notifier to monitor neighbour state + * @reify_replies: atomically stores the number of replies received + * from firmware for repr reify + * @reify_wait_queue: wait queue for repr reify response counting */ struct nfp_flower_priv { struct nfp_app *app; struct nfp_net *nn; u32 mask_id_seed; u64 flower_version; + u64 flower_ext_feats; struct nfp_fl_stats_id stats_ids; struct nfp_fl_mask_id mask_ids; DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS); @@ -119,6 +130,8 @@ struct nfp_flower_priv { int nfp_mac_off_count; struct notifier_block nfp_tun_mac_nb; struct notifier_block nfp_tun_neigh_nb; + atomic_t reify_replies; + wait_queue_head_t reify_wait_queue; }; struct nfp_fl_key_ls { @@ -172,7 +185,8 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, struct nfp_fl_key_ls *key_ls, struct net_device *netdev, - struct nfp_fl_payload *nfp_flow); + struct nfp_fl_payload *nfp_flow, + enum nfp_flower_tun_type tun_type); int nfp_flower_compile_action(struct tc_cls_flower_offload *flow, struct net_device *netdev, struct nfp_fl_payload *nfp_flow); diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index 60614d4f0e22..37c2ecae2a7a 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -38,7 +38,7 @@ #include "main.h" static void -nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame, +nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame, struct tc_cls_flower_offload *flow, u8 key_type, bool mask_version) { @@ -46,7 +46,7 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame, struct flow_dissector_key_vlan *flow_vlan; u16 tmp_tci; - memset(frame, 0, sizeof(struct nfp_flower_meta_two)); + memset(frame, 0, sizeof(struct nfp_flower_meta_tci)); /* Populate the metadata frame. */ frame->nfp_flow_key_layer = key_type; frame->mask_id = ~0; @@ -68,11 +68,9 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame, } static void -nfp_flower_compile_meta(struct nfp_flower_meta_one *frame, u8 key_type) +nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext) { - frame->nfp_flow_key_layer = key_type; - frame->mask_id = 0; - frame->reserved = 0; + frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext); } static int @@ -224,16 +222,15 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame, } static void -nfp_flower_compile_vxlan(struct nfp_flower_vxlan *frame, - struct tc_cls_flower_offload *flow, - bool mask_version, __be32 *tun_dst) +nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame, + struct tc_cls_flower_offload *flow, + bool mask_version) { struct fl_flow_key *target = mask_version ? flow->mask : flow->key; - struct flow_dissector_key_ipv4_addrs *vxlan_ips; + struct flow_dissector_key_ipv4_addrs *tun_ips; struct flow_dissector_key_keyid *vni; - /* Wildcard TOS/TTL/GPE_FLAGS/NXT_PROTO for now. */ - memset(frame, 0, sizeof(struct nfp_flower_vxlan)); + memset(frame, 0, sizeof(struct nfp_flower_ipv4_udp_tun)); if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { @@ -248,80 +245,68 @@ nfp_flower_compile_vxlan(struct nfp_flower_vxlan *frame, if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { - vxlan_ips = + tun_ips = skb_flow_dissector_target(flow->dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, target); - frame->ip_src = vxlan_ips->src; - frame->ip_dst = vxlan_ips->dst; - *tun_dst = vxlan_ips->dst; + frame->ip_src = tun_ips->src; + frame->ip_dst = tun_ips->dst; } } int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, struct nfp_fl_key_ls *key_ls, struct net_device *netdev, - struct nfp_fl_payload *nfp_flow) + struct nfp_fl_payload *nfp_flow, + enum nfp_flower_tun_type tun_type) { - enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE; - __be32 tun_dst, tun_dst_mask = 0; struct nfp_repr *netdev_repr; int err; u8 *ext; u8 *msk; - if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN) - tun_type = NFP_FL_TUNNEL_VXLAN; - memset(nfp_flow->unmasked_data, 0, key_ls->key_size); memset(nfp_flow->mask_data, 0, key_ls->key_size); ext = nfp_flow->unmasked_data; msk = nfp_flow->mask_data; - if (NFP_FLOWER_LAYER_PORT & key_ls->key_layer) { - /* Populate Exact Metadata. */ - nfp_flower_compile_meta_tci((struct nfp_flower_meta_two *)ext, - flow, key_ls->key_layer, false); - /* Populate Mask Metadata. */ - nfp_flower_compile_meta_tci((struct nfp_flower_meta_two *)msk, - flow, key_ls->key_layer, true); - ext += sizeof(struct nfp_flower_meta_two); - msk += sizeof(struct nfp_flower_meta_two); - - /* Populate Exact Port data. */ - err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext, - nfp_repr_get_port_id(netdev), - false, tun_type); - if (err) - return err; - - /* Populate Mask Port Data. */ - err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk, - nfp_repr_get_port_id(netdev), - true, tun_type); - if (err) - return err; - - ext += sizeof(struct nfp_flower_in_port); - msk += sizeof(struct nfp_flower_in_port); - } else { - /* Populate Exact Metadata. */ - nfp_flower_compile_meta((struct nfp_flower_meta_one *)ext, - key_ls->key_layer); - /* Populate Mask Metadata. */ - nfp_flower_compile_meta((struct nfp_flower_meta_one *)msk, - key_ls->key_layer); - ext += sizeof(struct nfp_flower_meta_one); - msk += sizeof(struct nfp_flower_meta_one); - } - if (NFP_FLOWER_LAYER_META & key_ls->key_layer) { - /* Additional Metadata Fields. - * Currently unsupported. - */ - return -EOPNOTSUPP; + /* Populate Exact Metadata. */ + nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext, + flow, key_ls->key_layer, false); + /* Populate Mask Metadata. */ + nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)msk, + flow, key_ls->key_layer, true); + ext += sizeof(struct nfp_flower_meta_tci); + msk += sizeof(struct nfp_flower_meta_tci); + + /* Populate Extended Metadata if Required. */ + if (NFP_FLOWER_LAYER_EXT_META & key_ls->key_layer) { + nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)ext, + key_ls->key_layer_two); + nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk, + key_ls->key_layer_two); + ext += sizeof(struct nfp_flower_ext_meta); + msk += sizeof(struct nfp_flower_ext_meta); } + /* Populate Exact Port data. */ + err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext, + nfp_repr_get_port_id(netdev), + false, tun_type); + if (err) + return err; + + /* Populate Mask Port Data. */ + err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk, + nfp_repr_get_port_id(netdev), + true, tun_type); + if (err) + return err; + + ext += sizeof(struct nfp_flower_in_port); + msk += sizeof(struct nfp_flower_in_port); + if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) { /* Populate Exact MAC Data. */ nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext, @@ -366,15 +351,17 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, msk += sizeof(struct nfp_flower_ipv6); } - if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN) { + if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN || + key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) { + __be32 tun_dst; + /* Populate Exact VXLAN Data. */ - nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)ext, - flow, false, &tun_dst); + nfp_flower_compile_ipv4_udp_tun((void *)ext, flow, false); /* Populate Mask VXLAN Data. */ - nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)msk, - flow, true, &tun_dst_mask); - ext += sizeof(struct nfp_flower_vxlan); - msk += sizeof(struct nfp_flower_vxlan); + nfp_flower_compile_ipv4_udp_tun((void *)msk, flow, true); + tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ip_dst; + ext += sizeof(struct nfp_flower_ipv4_udp_tun); + msk += sizeof(struct nfp_flower_ipv4_udp_tun); /* Configure tunnel end point MAC. */ if (nfp_netdev_is_nfp_repr(netdev)) { diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 553f94f55dce..08c4c6dc5f7f 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -130,12 +130,15 @@ static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f) } static int -nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls, +nfp_flower_calculate_key_layers(struct nfp_app *app, + struct nfp_fl_key_ls *ret_key_ls, struct tc_cls_flower_offload *flow, - bool egress) + bool egress, + enum nfp_flower_tun_type *tun_type) { struct flow_dissector_key_basic *mask_basic = NULL; struct flow_dissector_key_basic *key_basic = NULL; + struct nfp_flower_priv *priv = app->priv; u32 key_layer_two; u8 key_layer; int key_size; @@ -150,10 +153,15 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls, return -EOPNOTSUPP; key_layer_two = 0; - key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC; - key_size = sizeof(struct nfp_flower_meta_one) + - sizeof(struct nfp_flower_in_port) + - sizeof(struct nfp_flower_mac_mpls); + key_layer = NFP_FLOWER_LAYER_PORT; + key_size = sizeof(struct nfp_flower_meta_tci) + + sizeof(struct nfp_flower_in_port); + + if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS) || + dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) { + key_layer |= NFP_FLOWER_LAYER_MAC; + key_size += sizeof(struct nfp_flower_mac_mpls); + } if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { @@ -192,12 +200,27 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls, FLOW_DISSECTOR_KEY_ENC_PORTS, flow->key); - if (mask_enc_ports->dst != cpu_to_be16(~0) || - enc_ports->dst != htons(NFP_FL_VXLAN_PORT)) + if (mask_enc_ports->dst != cpu_to_be16(~0)) return -EOPNOTSUPP; - key_layer |= NFP_FLOWER_LAYER_VXLAN; - key_size += sizeof(struct nfp_flower_vxlan); + switch (enc_ports->dst) { + case htons(NFP_FL_VXLAN_PORT): + *tun_type = NFP_FL_TUNNEL_VXLAN; + key_layer |= NFP_FLOWER_LAYER_VXLAN; + key_size += sizeof(struct nfp_flower_ipv4_udp_tun); + break; + case htons(NFP_FL_GENEVE_PORT): + if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) + return -EOPNOTSUPP; + *tun_type = NFP_FL_TUNNEL_GENEVE; + key_layer |= NFP_FLOWER_LAYER_EXT_META; + key_size += sizeof(struct nfp_flower_ext_meta); + key_layer_two |= NFP_FLOWER_LAYER2_GENEVE; + key_size += sizeof(struct nfp_flower_ipv4_udp_tun); + break; + default: + return -EOPNOTSUPP; + } } else if (egress) { /* Reject non tunnel matches offloaded to egress repr. */ return -EOPNOTSUPP; @@ -325,6 +348,7 @@ static int nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, struct tc_cls_flower_offload *flow, bool egress) { + enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE; struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload *flow_pay; struct nfp_fl_key_ls *key_layer; @@ -334,7 +358,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, if (!key_layer) return -ENOMEM; - err = nfp_flower_calculate_key_layers(key_layer, flow, egress); + err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress, + &tun_type); if (err) goto err_free_key_ls; @@ -344,7 +369,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, goto err_free_key_ls; } - err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay); + err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay, + tun_type); if (err) goto err_destroy_flow; @@ -457,8 +483,7 @@ static int nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, struct tc_cls_flower_offload *flower, bool egress) { - if (!eth_proto_is_802_3(flower->common.protocol) || - flower->common.chain_index) + if (!eth_proto_is_802_3(flower->common.protocol)) return -EOPNOTSUPP; switch (flower->command) { @@ -478,7 +503,7 @@ int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data, { struct nfp_repr *repr = cb_priv; - if (!tc_can_offload(repr->netdev)) + if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data)) return -EOPNOTSUPP; switch (type) { @@ -495,7 +520,7 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type, { struct nfp_repr *repr = cb_priv; - if (!tc_can_offload(repr->netdev)) + if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data)) return -EOPNOTSUPP; switch (type) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c index 955a9f44d244..6aedef0ad433 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c @@ -32,6 +32,8 @@ */ #include <linux/bug.h> +#include <linux/lockdep.h> +#include <linux/rcupdate.h> #include <linux/skbuff.h> #include <linux/slab.h> @@ -99,13 +101,19 @@ nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size, gfp_t priority) } struct nfp_reprs * +nfp_reprs_get_locked(struct nfp_app *app, enum nfp_repr_type type) +{ + return rcu_dereference_protected(app->reprs[type], + lockdep_is_held(&app->pf->lock)); +} + +struct nfp_reprs * nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type, struct nfp_reprs *reprs) { struct nfp_reprs *old; - old = rcu_dereference_protected(app->reprs[type], - lockdep_is_held(&app->pf->lock)); + old = nfp_reprs_get_locked(app, type); rcu_assign_pointer(app->reprs[type], reprs); return old; @@ -116,7 +124,7 @@ struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id) struct nfp_app *app; if (id >= ARRAY_SIZE(apps) || !apps[id]) { - nfp_err(pf->cpp, "failed to find app with ID 0x%02hhx\n", id); + nfp_err(pf->cpp, "unknown FW app ID 0x%02hhx, driver too old or support for FW not built in\n", id); return ERR_PTR(-EINVAL); } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index 0e5e0305ad1c..437964afa8ee 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -43,6 +43,7 @@ struct bpf_prog; struct net_device; struct netdev_bpf; +struct netlink_ext_ack; struct pci_dev; struct sk_buff; struct sk_buff; @@ -66,6 +67,9 @@ extern const struct nfp_app_type app_flower; * struct nfp_app_type - application definition * @id: application ID * @name: application name + * @ctrl_cap_mask: ctrl vNIC capability mask, allows disabling features like + * IRQMOD which are on by default but counter-productive for + * control messages which are often latency-sensitive * @ctrl_has_meta: control messages have prepend of type:5/port:CTRL * * Callbacks @@ -77,18 +81,20 @@ extern const struct nfp_app_type app_flower; * @vnic_init: vNIC netdev was registered * @vnic_clean: vNIC netdev about to be unregistered * @repr_init: representor about to be registered + * @repr_preclean: representor about to unregistered, executed before app + * reference to the it is removed * @repr_clean: representor about to be unregistered * @repr_open: representor netdev open callback * @repr_stop: representor netdev stop callback + * @change_mtu: MTU change on a netdev has been requested (veto-only, change + * is not guaranteed to be committed) * @start: start application logic * @stop: stop application logic * @ctrl_msg_rx: control message handler * @setup_tc: setup TC ndo * @tc_busy: TC HW offload busy (rules loaded) + * @bpf: BPF ndo offload-related calls * @xdp_offload: offload an XDP program - * @bpf_verifier_prep: verifier prep for dev-specific BPF programs - * @bpf_translate: translate call for dev-specific BPF programs - * @bpf_destroy: destroy for dev-specific BPF programs * @eswitch_mode_get: get SR-IOV eswitch mode * @sriov_enable: app-specific sriov initialisation * @sriov_disable: app-specific sriov clean-up @@ -98,6 +104,7 @@ struct nfp_app_type { enum nfp_app_id id; const char *name; + u32 ctrl_cap_mask; bool ctrl_has_meta; int (*init)(struct nfp_app *app); @@ -112,11 +119,15 @@ struct nfp_app_type { void (*vnic_clean)(struct nfp_app *app, struct nfp_net *nn); int (*repr_init)(struct nfp_app *app, struct net_device *netdev); + void (*repr_preclean)(struct nfp_app *app, struct net_device *netdev); void (*repr_clean)(struct nfp_app *app, struct net_device *netdev); int (*repr_open)(struct nfp_app *app, struct nfp_repr *repr); int (*repr_stop)(struct nfp_app *app, struct nfp_repr *repr); + int (*change_mtu)(struct nfp_app *app, struct net_device *netdev, + int new_mtu); + int (*start)(struct nfp_app *app); void (*stop)(struct nfp_app *app); @@ -125,14 +136,11 @@ struct nfp_app_type { int (*setup_tc)(struct nfp_app *app, struct net_device *netdev, enum tc_setup_type type, void *type_data); bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn); + int (*bpf)(struct nfp_app *app, struct nfp_net *nn, + struct netdev_bpf *xdp); int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn, - struct bpf_prog *prog); - int (*bpf_verifier_prep)(struct nfp_app *app, struct nfp_net *nn, - struct netdev_bpf *bpf); - int (*bpf_translate)(struct nfp_app *app, struct nfp_net *nn, - struct bpf_prog *prog); - int (*bpf_destroy)(struct nfp_app *app, struct nfp_net *nn, - struct bpf_prog *prog); + struct bpf_prog *prog, + struct netlink_ext_ack *extack); int (*sriov_enable)(struct nfp_app *app, int num_vfs); void (*sriov_disable)(struct nfp_app *app); @@ -163,6 +171,7 @@ struct nfp_app { void *priv; }; +bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb); bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb); static inline int nfp_app_init(struct nfp_app *app) @@ -226,12 +235,27 @@ nfp_app_repr_init(struct nfp_app *app, struct net_device *netdev) } static inline void +nfp_app_repr_preclean(struct nfp_app *app, struct net_device *netdev) +{ + if (app->type->repr_preclean) + app->type->repr_preclean(app, netdev); +} + +static inline void nfp_app_repr_clean(struct nfp_app *app, struct net_device *netdev) { if (app->type->repr_clean) app->type->repr_clean(app, netdev); } +static inline int +nfp_app_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu) +{ + if (!app || !app->type->change_mtu) + return 0; + return app->type->change_mtu(app, netdev, new_mtu); +} + static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl) { app->ctrl = ctrl; @@ -293,39 +317,29 @@ static inline int nfp_app_setup_tc(struct nfp_app *app, return app->type->setup_tc(app, netdev, type, type_data); } -static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn, - struct bpf_prog *prog) +static inline int nfp_app_bpf(struct nfp_app *app, struct nfp_net *nn, + struct netdev_bpf *bpf) { - if (!app || !app->type->xdp_offload) - return -EOPNOTSUPP; - return app->type->xdp_offload(app, nn, prog); + if (!app || !app->type->bpf) + return -EINVAL; + return app->type->bpf(app, nn, bpf); } -static inline int -nfp_app_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn, - struct netdev_bpf *bpf) +static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn, + struct bpf_prog *prog, + struct netlink_ext_ack *extack) { - if (!app || !app->type->bpf_verifier_prep) + if (!app || !app->type->xdp_offload) return -EOPNOTSUPP; - return app->type->bpf_verifier_prep(app, nn, bpf); + return app->type->xdp_offload(app, nn, prog, extack); } -static inline int -nfp_app_bpf_translate(struct nfp_app *app, struct nfp_net *nn, - struct bpf_prog *prog) +static inline bool __nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb) { - if (!app || !app->type->bpf_translate) - return -EOPNOTSUPP; - return app->type->bpf_translate(app, nn, prog); -} + trace_devlink_hwmsg(priv_to_devlink(app->pf), false, 0, + skb->data, skb->len); -static inline int -nfp_app_bpf_destroy(struct nfp_app *app, struct nfp_net *nn, - struct bpf_prog *prog) -{ - if (!app || !app->type->bpf_destroy) - return -EOPNOTSUPP; - return app->type->bpf_destroy(app, nn, prog); + return __nfp_ctrl_tx(app->ctrl, skb); } static inline bool nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb) @@ -378,6 +392,8 @@ static inline struct net_device *nfp_app_repr_get(struct nfp_app *app, u32 id) struct nfp_app *nfp_app_from_netdev(struct net_device *netdev); struct nfp_reprs * +nfp_reprs_get_locked(struct nfp_app *app, enum nfp_repr_type type); +struct nfp_reprs * nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type, struct nfp_reprs *reprs); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c index 830f6de25f47..3f6952b66a49 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c @@ -41,6 +41,7 @@ const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = { [CMD_TGT_WRITE8_SWAP] = { 0x02, 0x42 }, + [CMD_TGT_WRITE32_SWAP] = { 0x02, 0x5f }, [CMD_TGT_READ8] = { 0x01, 0x43 }, [CMD_TGT_READ32] = { 0x00, 0x5c }, [CMD_TGT_READ32_LE] = { 0x01, 0x5c }, @@ -49,6 +50,94 @@ const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = { [CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 }, }; +static bool unreg_is_imm(u16 reg) +{ + return (reg & UR_REG_IMM) == UR_REG_IMM; +} + +u16 br_get_offset(u64 instr) +{ + u16 addr_lo, addr_hi; + + addr_lo = FIELD_GET(OP_BR_ADDR_LO, instr); + addr_hi = FIELD_GET(OP_BR_ADDR_HI, instr); + + return (addr_hi * ((OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO)) + 1)) | + addr_lo; +} + +void br_set_offset(u64 *instr, u16 offset) +{ + u16 addr_lo, addr_hi; + + addr_lo = offset & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO)); + addr_hi = offset != addr_lo; + *instr &= ~(OP_BR_ADDR_HI | OP_BR_ADDR_LO); + *instr |= FIELD_PREP(OP_BR_ADDR_HI, addr_hi); + *instr |= FIELD_PREP(OP_BR_ADDR_LO, addr_lo); +} + +void br_add_offset(u64 *instr, u16 offset) +{ + u16 addr; + + addr = br_get_offset(*instr); + br_set_offset(instr, addr + offset); +} + +static bool immed_can_modify(u64 instr) +{ + if (FIELD_GET(OP_IMMED_INV, instr) || + FIELD_GET(OP_IMMED_SHIFT, instr) || + FIELD_GET(OP_IMMED_WIDTH, instr) != IMMED_WIDTH_ALL) { + pr_err("Can't decode/encode immed!\n"); + return false; + } + return true; +} + +u16 immed_get_value(u64 instr) +{ + u16 reg; + + if (!immed_can_modify(instr)) + return 0; + + reg = FIELD_GET(OP_IMMED_A_SRC, instr); + if (!unreg_is_imm(reg)) + reg = FIELD_GET(OP_IMMED_B_SRC, instr); + + return (reg & 0xff) | FIELD_GET(OP_IMMED_IMM, instr); +} + +void immed_set_value(u64 *instr, u16 immed) +{ + if (!immed_can_modify(*instr)) + return; + + if (unreg_is_imm(FIELD_GET(OP_IMMED_A_SRC, *instr))) { + *instr &= ~FIELD_PREP(OP_IMMED_A_SRC, 0xff); + *instr |= FIELD_PREP(OP_IMMED_A_SRC, immed & 0xff); + } else { + *instr &= ~FIELD_PREP(OP_IMMED_B_SRC, 0xff); + *instr |= FIELD_PREP(OP_IMMED_B_SRC, immed & 0xff); + } + + *instr &= ~OP_IMMED_IMM; + *instr |= FIELD_PREP(OP_IMMED_IMM, immed >> 8); +} + +void immed_add_value(u64 *instr, u16 offset) +{ + u16 val; + + if (!immed_can_modify(*instr)) + return; + + val = immed_get_value(*instr); + immed_set_value(instr, val + offset); +} + static u16 nfp_swreg_to_unreg(swreg reg, bool is_dst) { bool lm_id, lm_dec = false; @@ -120,7 +209,8 @@ int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg, reg->dst = nfp_swreg_to_unreg(dst, true); /* Decode source operands */ - if (swreg_type(lreg) == swreg_type(rreg)) + if (swreg_type(lreg) == swreg_type(rreg) && + swreg_type(lreg) != NN_REG_NONE) return -EFAULT; if (swreg_type(lreg) == NN_REG_GPR_B || @@ -200,7 +290,8 @@ int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg, reg->dst = nfp_swreg_to_rereg(dst, true, false, NULL); /* Decode source operands */ - if (swreg_type(lreg) == swreg_type(rreg)) + if (swreg_type(lreg) == swreg_type(rreg) && + swreg_type(lreg) != NN_REG_NONE) return -EFAULT; if (swreg_type(lreg) == NN_REG_GPR_B || diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h index 74d0c11ab2f9..5f9291db98e0 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2016 Netronome Systems, Inc. + * Copyright (C) 2016-2017 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -77,9 +77,11 @@ enum br_mask { BR_BEQ = 0x00, BR_BNE = 0x01, + BR_BMI = 0x02, BR_BHS = 0x04, BR_BLO = 0x05, BR_BGE = 0x08, + BR_BLT = 0x09, BR_UNC = 0x18, }; @@ -92,6 +94,10 @@ enum br_ctx_signal_state { BR_CSS_NONE = 2, }; +u16 br_get_offset(u64 instr); +void br_set_offset(u64 *instr, u16 offset); +void br_add_offset(u64 *instr, u16 offset); + #define OP_BBYTE_BASE 0x0c800000000ULL #define OP_BB_A_SRC 0x000000000ffULL #define OP_BB_BYTE 0x00000000300ULL @@ -132,6 +138,10 @@ enum immed_shift { IMMED_SHIFT_2B = 2, }; +u16 immed_get_value(u64 instr); +void immed_set_value(u64 *instr, u16 immed); +void immed_add_value(u64 *instr, u16 offset); + #define OP_SHF_BASE 0x08000000000ULL #define OP_SHF_A_SRC 0x000000000ffULL #define OP_SHF_SC 0x00000000300ULL @@ -175,6 +185,7 @@ enum alu_op { ALU_OP_NONE = 0x00, ALU_OP_ADD = 0x01, ALU_OP_NOT = 0x04, + ALU_OP_ADD_2B = 0x05, ALU_OP_AND = 0x08, ALU_OP_SUB_C = 0x0d, ALU_OP_ADD_C = 0x11, @@ -209,6 +220,7 @@ enum alu_dst_ab { #define OP_CMD_CNT 0x0000e000000ULL #define OP_CMD_SIG 0x000f0000000ULL #define OP_CMD_TGT_CMD 0x07f00000000ULL +#define OP_CMD_INDIR 0x20000000000ULL #define OP_CMD_MODE 0x1c0000000000ULL struct cmd_tgt_act { @@ -219,6 +231,7 @@ struct cmd_tgt_act { enum cmd_tgt_map { CMD_TGT_READ8, CMD_TGT_WRITE8_SWAP, + CMD_TGT_WRITE32_SWAP, CMD_TGT_READ32, CMD_TGT_READ32_LE, CMD_TGT_READ32_SWAP, @@ -240,6 +253,9 @@ enum cmd_ctx_swap { CMD_CTX_NO_SWAP = 3, }; +#define CMD_OVE_LEN BIT(7) +#define CMD_OV_LEN GENMASK(12, 8) + #define OP_LCSR_BASE 0x0fc00000000ULL #define OP_LCSR_A_SRC 0x000000003ffULL #define OP_LCSR_B_SRC 0x000000ffc00ULL @@ -257,6 +273,7 @@ enum lcsr_wr_src { #define OP_CARB_BASE 0x0e000000000ULL #define OP_CARB_OR 0x00000010000ULL +#define NFP_CSR_CTX_PTR 0x20 #define NFP_CSR_ACT_LM_ADDR0 0x64 #define NFP_CSR_ACT_LM_ADDR1 0x6c #define NFP_CSR_ACT_LM_ADDR2 0x94 @@ -377,4 +394,13 @@ int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg, int nfp_ustore_check_valid_no_ecc(u64 insn); u64 nfp_ustore_calc_ecc_insn(u64 insn); +#define NFP_IND_ME_REFL_WR_SIG_INIT 3 +#define NFP_IND_ME_CTX_PTR_BASE_MASK GENMASK(9, 0) +#define NFP_IND_NUM_CONTEXTS 8 + +static inline u32 nfp_get_ind_csr_ctx_ptr_offs(u32 read_offset) +{ + return (read_offset & ~NFP_IND_ME_CTX_PTR_BASE_MASK) | NFP_CSR_CTX_PTR; +} + #endif diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c index 6c9f29c2e975..eb0fc614673d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c @@ -152,18 +152,8 @@ out: static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) { struct nfp_pf *pf = devlink_priv(devlink); - int ret; - - mutex_lock(&pf->lock); - if (!pf->app) { - ret = -EBUSY; - goto out; - } - ret = nfp_app_eswitch_mode_get(pf->app, mode); -out: - mutex_unlock(&pf->lock); - return ret; + return nfp_app_eswitch_mode_get(pf->app, mode); } const struct devlink_ops nfp_devlink_ops = { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 35eaccbece36..cc570bb6563c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -45,6 +45,7 @@ #include <linux/pci.h> #include <linux/firmware.h> #include <linux/vermagic.h> +#include <linux/vmalloc.h> #include <net/devlink.h> #include "nfpcore/nfp.h" @@ -498,17 +499,16 @@ static int nfp_pci_probe(struct pci_dev *pdev, if (err) goto err_hwinfo_free; - err = devlink_register(devlink, &pdev->dev); - if (err) - goto err_hwinfo_free; - err = nfp_nsp_init(pdev, pf); if (err) - goto err_devlink_unreg; + goto err_hwinfo_free; pf->mip = nfp_mip_open(pf->cpp); pf->rtbl = __nfp_rtsym_table_read(pf->cpp, pf->mip); + pf->dump_flag = NFP_DUMP_NSP_DIAG; + pf->dumpspec = nfp_net_dump_load_dumpspec(pf->cpp, pf->rtbl); + err = nfp_pcie_sriov_read_nfd_limit(pf); if (err) goto err_fw_unload; @@ -518,6 +518,7 @@ static int nfp_pci_probe(struct pci_dev *pdev, dev_err(&pdev->dev, "Error: %d VFs already enabled, but loaded FW can only support %d\n", pf->num_vfs, pf->limit_vfs); + err = -EINVAL; goto err_fw_unload; } @@ -544,8 +545,7 @@ err_fw_unload: nfp_fw_unload(pf); kfree(pf->eth_tbl); kfree(pf->nspi); -err_devlink_unreg: - devlink_unregister(devlink); + vfree(pf->dumpspec); err_hwinfo_free: kfree(pf->hwinfo); nfp_cpp_free(pf->cpp); @@ -566,19 +566,15 @@ err_pci_disable: static void nfp_pci_remove(struct pci_dev *pdev) { struct nfp_pf *pf = pci_get_drvdata(pdev); - struct devlink *devlink; nfp_hwmon_unregister(pf); - devlink = priv_to_devlink(pf); - - nfp_net_pci_remove(pf); - nfp_pcie_sriov_disable(pdev); pci_sriov_set_totalvfs(pf->pdev, 0); - devlink_unregister(devlink); + nfp_net_pci_remove(pf); + vfree(pf->dumpspec); kfree(pf->rtbl); nfp_mip_close(pf->mip); if (pf->fw_loaded) @@ -592,7 +588,7 @@ static void nfp_pci_remove(struct pci_dev *pdev) kfree(pf->eth_tbl); kfree(pf->nspi); mutex_destroy(&pf->lock); - devlink_free(devlink); + devlink_free(priv_to_devlink(pf)); pci_release_regions(pdev); pci_disable_device(pdev); } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index be0ee59f2eb9..add46e28212b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -39,6 +39,7 @@ #ifndef NFP_MAIN_H #define NFP_MAIN_H +#include <linux/ethtool.h> #include <linux/list.h> #include <linux/types.h> #include <linux/msi.h> @@ -62,6 +63,17 @@ struct nfp_port; struct nfp_rtsym_table; /** + * struct nfp_dumpspec - NFP FW dump specification structure + * @size: Size of the data + * @data: Sequence of TLVs, each being an instruction to dump some data + * from FW + */ +struct nfp_dumpspec { + u32 size; + u8 data[0]; +}; + +/** * struct nfp_pf - NFP PF-specific device structure * @pdev: Backpointer to PCI device * @cpp: Pointer to the CPP handle @@ -83,6 +95,9 @@ struct nfp_rtsym_table; * @mip: MIP handle * @rtbl: RTsym table * @hwinfo: HWInfo table + * @dumpspec: Debug dump specification + * @dump_flag: Store dump flag between set_dump and get_dump_flag + * @dump_len: Store dump length between set_dump and get_dump_flag * @eth_tbl: NSP ETH table * @nspi: NSP identification info * @hwmon_dev: pointer to hwmon device @@ -124,6 +139,9 @@ struct nfp_pf { const struct nfp_mip *mip; struct nfp_rtsym_table *rtbl; struct nfp_hwinfo *hwinfo; + struct nfp_dumpspec *dumpspec; + u32 dump_flag; + u32 dump_len; struct nfp_eth_table *eth_tbl; struct nfp_nsp_identify *nspi; @@ -157,4 +175,15 @@ void nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port); bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb); +enum nfp_dump_diag { + NFP_DUMP_NSP_DIAG = 0, +}; + +struct nfp_dumpspec * +nfp_net_dump_load_dumpspec(struct nfp_cpp *cpp, struct nfp_rtsym_table *rtbl); +s64 nfp_net_dump_calculate_size(struct nfp_pf *pf, struct nfp_dumpspec *spec, + u32 flag); +int nfp_net_dump_populate_buffer(struct nfp_pf *pf, struct nfp_dumpspec *spec, + struct ethtool_dump *dump_param, void *dest); + #endif /* NFP_MAIN_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index 7f9857c276b1..d88eda9707e6 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -47,6 +47,7 @@ #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/io-64-nonatomic-hi-lo.h> +#include <net/xdp.h> #include "nfp_net_ctrl.h" @@ -350,6 +351,7 @@ struct nfp_net_rx_buf { * @rxds: Virtual address of FL/RX ring in host memory * @dma: DMA address of the FL/RX ring * @size: Size, in bytes, of the FL/RX ring (needed to free) + * @xdp_rxq: RX-ring info avail for XDP */ struct nfp_net_rx_ring { struct nfp_net_r_vector *r_vec; @@ -361,13 +363,14 @@ struct nfp_net_rx_ring { u32 idx; int fl_qcidx; + unsigned int size; u8 __iomem *qcp_fl; struct nfp_net_rx_buf *rxbufs; struct nfp_net_rx_desc *rxds; dma_addr_t dma; - unsigned int size; + struct xdp_rxq_info xdp_rxq; } ____cacheline_aligned; /** @@ -548,6 +551,8 @@ struct nfp_net_dp { * @max_r_vecs: Number of allocated interrupt vectors for RX/TX * @max_tx_rings: Maximum number of TX rings supported by the Firmware * @max_rx_rings: Maximum number of RX rings supported by the Firmware + * @stride_rx: Queue controller RX queue spacing + * @stride_tx: Queue controller TX queue spacing * @r_vecs: Pre-allocated array of ring vectors * @irq_entries: Pre-allocated array of MSI-X entries * @lsc_handler: Handler for Link State Change interrupt @@ -573,6 +578,7 @@ struct nfp_net_dp { * @qcp_cfg: Pointer to QCP queue used for configuration notification * @tx_bar: Pointer to mapped TX queues * @rx_bar: Pointer to mapped FL/RX queues + * @tlv_caps: Parsed TLV capabilities * @debugfs_dir: Device directory in debugfs * @vnic_list: Entry on device vNIC list * @pdev: Backpointer to PCI device @@ -639,6 +645,8 @@ struct nfp_net { u8 __iomem *tx_bar; u8 __iomem *rx_bar; + struct nfp_net_tlv_caps tlv_caps; + struct dentry *debugfs_dir; struct list_head vnic_list; @@ -834,6 +842,18 @@ static inline const char *nfp_net_name(struct nfp_net *nn) return nn->dp.netdev ? nn->dp.netdev->name : "ctrl"; } +static inline void nfp_ctrl_lock(struct nfp_net *nn) + __acquires(&nn->r_vecs[0].lock) +{ + spin_lock_bh(&nn->r_vecs[0].lock); +} + +static inline void nfp_ctrl_unlock(struct nfp_net *nn) + __releases(&nn->r_vecs[0].lock) +{ + spin_unlock_bh(&nn->r_vecs[0].lock); +} + /* Globals */ extern const char nfp_driver_version[]; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 99b0487b6d82..c0fd351c86b1 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -293,9 +293,15 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update) */ static int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd) { + u32 mbox = nn->tlv_caps.mbox_off; int ret; - nn_writeq(nn, NFP_NET_CFG_MBOX_CMD, mbox_cmd); + if (!nfp_net_has_mbox(&nn->tlv_caps)) { + nn_err(nn, "no mailbox present, command: %u\n", mbox_cmd); + return -EIO; + } + + nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd); ret = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX); if (ret) { @@ -303,7 +309,7 @@ static int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd) return ret; } - return -nn_readl(nn, NFP_NET_CFG_MBOX_RET); + return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET); } /* Interrupt configuration and handling @@ -1610,11 +1616,13 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) unsigned int true_bufsz; struct sk_buff *skb; int pkts_polled = 0; + struct xdp_buff xdp; int idx; rcu_read_lock(); xdp_prog = READ_ONCE(dp->xdp_prog); true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz; + xdp.rxq = &rx_ring->xdp_rxq; tx_ring = r_vec->xdp_ring; while (pkts_polled < budget) { @@ -1705,7 +1713,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) dp->bpf_offload_xdp) && !meta.portid) { void *orig_data = rxbuf->frag + pkt_off; unsigned int dma_off; - struct xdp_buff xdp; int act; xdp.data_hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM; @@ -1919,6 +1926,13 @@ err_free: return false; } +bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb) +{ + struct nfp_net_r_vector *r_vec = &nn->r_vecs[0]; + + return nfp_ctrl_tx_one(nn, r_vec, skb, false); +} + bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb) { struct nfp_net_r_vector *r_vec = &nn->r_vecs[0]; @@ -2254,6 +2268,8 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) struct nfp_net_r_vector *r_vec = rx_ring->r_vec; struct nfp_net_dp *dp = &r_vec->nfp_net->dp; + if (dp->netdev) + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); kfree(rx_ring->rxbufs); if (rx_ring->rxds) @@ -2277,7 +2293,14 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) static int nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring) { - int sz; + int sz, err; + + if (dp->netdev) { + err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev, + rx_ring->idx); + if (err < 0) + return err; + } rx_ring->cnt = dp->rxd_cnt; rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt; @@ -2441,7 +2464,7 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn) * ME timestamp ticks. There are 16 ME clock cycles for each timestamp * count. */ - factor = nn->me_freq_mhz / 16; + factor = nn->tlv_caps.me_freq_mhz / 16; /* copy RX interrupt coalesce parameters */ value = (nn->rx_coalesce_max_frames << 16) | @@ -2852,6 +2875,11 @@ static void nfp_net_set_rx_mode(struct net_device *netdev) new_ctrl = nn->dp.ctrl; + if (!netdev_mc_empty(netdev) || netdev->flags & IFF_ALLMULTI) + new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_L2MC; + else + new_ctrl &= ~NFP_NET_CFG_CTRL_L2MC; + if (netdev->flags & IFF_PROMISC) { if (nn->cap & NFP_NET_CFG_CTRL_PROMISC) new_ctrl |= NFP_NET_CFG_CTRL_PROMISC; @@ -3036,6 +3064,11 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu) { struct nfp_net *nn = netdev_priv(netdev); struct nfp_net_dp *dp; + int err; + + err = nfp_app_change_mtu(nn->app, netdev, new_mtu); + if (err) + return err; dp = nfp_net_clone_dp(nn); if (!dp) @@ -3057,8 +3090,9 @@ nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) if (!vid) return 0; - nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_VID, vid); - nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_PROTO, ETH_P_8021Q); + nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid); + nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO, + ETH_P_8021Q); return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD); } @@ -3074,8 +3108,9 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) if (!vid) return 0; - nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_VID, vid); - nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_PROTO, ETH_P_8021Q); + nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid); + nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO, + ETH_P_8021Q); return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL); } @@ -3368,7 +3403,7 @@ nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog, u32 flags, if (err) return err; - err = nfp_app_xdp_offload(nn->app, nn, offload_prog); + err = nfp_app_xdp_offload(nn->app, nn, offload_prog, extack); if (err && flags & XDP_FLAGS_HW_MODE) return err; @@ -3394,17 +3429,10 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp) if (nn->dp.bpf_offload_xdp) xdp->prog_attached = XDP_ATTACHED_HW; xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0; + xdp->prog_flags = nn->xdp_prog ? nn->xdp_flags : 0; return 0; - case BPF_OFFLOAD_VERIFIER_PREP: - return nfp_app_bpf_verifier_prep(nn->app, nn, xdp); - case BPF_OFFLOAD_TRANSLATE: - return nfp_app_bpf_translate(nn->app, nn, - xdp->offload.prog); - case BPF_OFFLOAD_DESTROY: - return nfp_app_bpf_destroy(nn->app, nn, - xdp->offload.prog); default: - return -EINVAL; + return nfp_app_bpf(nn->app, nn, xdp); } } @@ -3563,9 +3591,6 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev, */ void nfp_net_free(struct nfp_net *nn) { - if (nn->xdp_prog) - bpf_prog_put(nn->xdp_prog); - if (nn->dp.netdev) free_netdev(nn->dp.netdev); else @@ -3731,18 +3756,8 @@ static void nfp_net_netdev_init(struct nfp_net *nn) nfp_net_set_ethtool_ops(netdev); } -/** - * nfp_net_init() - Initialise/finalise the nfp_net structure - * @nn: NFP Net device structure - * - * Return: 0 on success or negative errno on error. - */ -int nfp_net_init(struct nfp_net *nn) +static int nfp_net_read_caps(struct nfp_net *nn) { - int err; - - nn->dp.rx_dma_dir = DMA_FROM_DEVICE; - /* Get some of the read-only fields from the BAR */ nn->cap = nn_readl(nn, NFP_NET_CFG_CAP); nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU); @@ -3775,6 +3790,29 @@ int nfp_net_init(struct nfp_net *nn) nn->dp.rx_offset = NFP_NET_RX_OFFSET; } + /* For control vNICs mask out the capabilities app doesn't want. */ + if (!nn->dp.netdev) + nn->cap &= nn->app->type->ctrl_cap_mask; + + return 0; +} + +/** + * nfp_net_init() - Initialise/finalise the nfp_net structure + * @nn: NFP Net device structure + * + * Return: 0 on success or negative errno on error. + */ +int nfp_net_init(struct nfp_net *nn) +{ + int err; + + nn->dp.rx_dma_dir = DMA_FROM_DEVICE; + + err = nfp_net_read_caps(nn); + if (err) + return err; + /* Set default MTU and Freelist buffer size */ if (nn->max_mtu < NFP_NET_DEFAULT_MTU) nn->dp.mtu = nn->max_mtu; @@ -3791,8 +3829,6 @@ int nfp_net_init(struct nfp_net *nn) /* Allow L2 Broadcast and Multicast through by default, if supported */ if (nn->cap & NFP_NET_CFG_CTRL_L2BC) nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC; - if (nn->cap & NFP_NET_CFG_CTRL_L2MC) - nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2MC; /* Allow IRQ moderation, if supported */ if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) { @@ -3800,6 +3836,11 @@ int nfp_net_init(struct nfp_net *nn) nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD; } + err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar, + &nn->tlv_caps); + if (err) + return err; + if (nn->dp.netdev) nfp_net_netdev_init(nn); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c new file mode 100644 index 000000000000..ffb402746ad4 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c @@ -0,0 +1,135 @@ +/* + * Copyright (C) 2018 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/bitfield.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/types.h> + +#include "nfp_net_ctrl.h" +#include "nfp_net.h" + +static void nfp_net_tlv_caps_reset(struct nfp_net_tlv_caps *caps) +{ + memset(caps, 0, sizeof(*caps)); + caps->me_freq_mhz = 1200; + caps->mbox_off = NFP_NET_CFG_MBOX_BASE; + caps->mbox_len = NFP_NET_CFG_MBOX_VAL_MAX_SZ; +} + +int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem, + struct nfp_net_tlv_caps *caps) +{ + u8 __iomem *data = ctrl_mem + NFP_NET_CFG_TLV_BASE; + u8 __iomem *end = ctrl_mem + NFP_NET_CFG_BAR_SZ; + u32 hdr; + + nfp_net_tlv_caps_reset(caps); + + hdr = readl(data); + if (!hdr) + return 0; + + while (true) { + unsigned int length, offset; + u32 hdr = readl(data); + + length = FIELD_GET(NFP_NET_CFG_TLV_HEADER_LENGTH, hdr); + offset = data - ctrl_mem + NFP_NET_CFG_TLV_BASE; + + /* Advance past the header */ + data += 4; + + if (length % NFP_NET_CFG_TLV_LENGTH_INC) { + dev_err(dev, "TLV size not multiple of %u len:%u\n", + NFP_NET_CFG_TLV_LENGTH_INC, length); + return -EINVAL; + } + if (data + length > end) { + dev_err(dev, "oversized TLV offset:%u len:%u\n", + offset, length); + return -EINVAL; + } + + switch (FIELD_GET(NFP_NET_CFG_TLV_HEADER_TYPE, hdr)) { + case NFP_NET_CFG_TLV_TYPE_UNKNOWN: + dev_err(dev, "NULL TLV at offset:%u\n", offset); + return -EINVAL; + case NFP_NET_CFG_TLV_TYPE_RESERVED: + break; + case NFP_NET_CFG_TLV_TYPE_END: + if (!length) + return 0; + + dev_err(dev, "END TLV should be empty, has len:%d\n", + length); + return -EINVAL; + case NFP_NET_CFG_TLV_TYPE_ME_FREQ: + if (length != 4) { + dev_err(dev, + "ME FREQ TLV should be 4B, is %dB\n", + length); + return -EINVAL; + } + + caps->me_freq_mhz = readl(data); + break; + case NFP_NET_CFG_TLV_TYPE_MBOX: + if (!length) { + caps->mbox_off = 0; + caps->mbox_len = 0; + } else { + caps->mbox_off = data - ctrl_mem; + caps->mbox_len = length; + } + break; + default: + if (!FIELD_GET(NFP_NET_CFG_TLV_HEADER_REQUIRED, hdr)) + break; + + dev_err(dev, "unknown TLV type:%u offset:%u len:%u\n", + FIELD_GET(NFP_NET_CFG_TLV_HEADER_TYPE, hdr), + offset, length); + return -EINVAL; + } + + data += length; + if (data + 4 > end) { + dev_err(dev, "reached end of BAR without END TLV\n"); + return -EINVAL; + } + } + + /* Not reached */ + return -EINVAL; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h index 782d452e0fc2..eeecef2caac6 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h @@ -43,9 +43,7 @@ #ifndef _NFP_NET_CTRL_H_ #define _NFP_NET_CTRL_H_ -/* IMPORTANT: This header file is shared with the FW, - * no OS specific constructs, please! - */ +#include <linux/types.h> /** * Configuration BAR size. @@ -91,23 +89,24 @@ #define NFP_NET_RSS_IPV6_EX_UDP 9 /** - * @NFP_NET_TXR_MAX: Maximum number of TX rings - * @NFP_NET_RXR_MAX: Maximum number of RX rings + * Ring counts + * %NFP_NET_TXR_MAX: Maximum number of TX rings + * %NFP_NET_RXR_MAX: Maximum number of RX rings */ #define NFP_NET_TXR_MAX 64 #define NFP_NET_RXR_MAX 64 /** * Read/Write config words (0x0000 - 0x002c) - * @NFP_NET_CFG_CTRL: Global control - * @NFP_NET_CFG_UPDATE: Indicate which fields are updated - * @NFP_NET_CFG_TXRS_ENABLE: Bitmask of enabled TX rings - * @NFP_NET_CFG_RXRS_ENABLE: Bitmask of enabled RX rings - * @NFP_NET_CFG_MTU: Set MTU size - * @NFP_NET_CFG_FLBUFSZ: Set freelist buffer size (must be larger than MTU) - * @NFP_NET_CFG_EXN: MSI-X table entry for exceptions - * @NFP_NET_CFG_LSC: MSI-X table entry for link state changes - * @NFP_NET_CFG_MACADDR: MAC address + * %NFP_NET_CFG_CTRL: Global control + * %NFP_NET_CFG_UPDATE: Indicate which fields are updated + * %NFP_NET_CFG_TXRS_ENABLE: Bitmask of enabled TX rings + * %NFP_NET_CFG_RXRS_ENABLE: Bitmask of enabled RX rings + * %NFP_NET_CFG_MTU: Set MTU size + * %NFP_NET_CFG_FLBUFSZ: Set freelist buffer size (must be larger than MTU) + * %NFP_NET_CFG_EXN: MSI-X table entry for exceptions + * %NFP_NET_CFG_LSC: MSI-X table entry for link state changes + * %NFP_NET_CFG_MACADDR: MAC address * * TODO: * - define Error details in UPDATE @@ -176,14 +175,14 @@ /** * Read-only words (0x0030 - 0x0050): - * @NFP_NET_CFG_VERSION: Firmware version number - * @NFP_NET_CFG_STS: Status - * @NFP_NET_CFG_CAP: Capabilities (same bits as @NFP_NET_CFG_CTRL) - * @NFP_NET_CFG_MAX_TXRINGS: Maximum number of TX rings - * @NFP_NET_CFG_MAX_RXRINGS: Maximum number of RX rings - * @NFP_NET_CFG_MAX_MTU: Maximum support MTU - * @NFP_NET_CFG_START_TXQ: Start Queue Control Queue to use for TX (PF only) - * @NFP_NET_CFG_START_RXQ: Start Queue Control Queue to use for RX (PF only) + * %NFP_NET_CFG_VERSION: Firmware version number + * %NFP_NET_CFG_STS: Status + * %NFP_NET_CFG_CAP: Capabilities (same bits as %NFP_NET_CFG_CTRL) + * %NFP_NET_CFG_MAX_TXRINGS: Maximum number of TX rings + * %NFP_NET_CFG_MAX_RXRINGS: Maximum number of RX rings + * %NFP_NET_CFG_MAX_MTU: Maximum support MTU + * %NFP_NET_CFG_START_TXQ: Start Queue Control Queue to use for TX (PF only) + * %NFP_NET_CFG_START_RXQ: Start Queue Control Queue to use for RX (PF only) * * TODO: * - define more STS bits @@ -228,31 +227,37 @@ /** * RSS capabilities - * @NFP_NET_CFG_RSS_CAP_HFUNC: supported hash functions (same bits as - * @NFP_NET_CFG_RSS_HFUNC) + * %NFP_NET_CFG_RSS_CAP_HFUNC: supported hash functions (same bits as + * %NFP_NET_CFG_RSS_HFUNC) */ #define NFP_NET_CFG_RSS_CAP 0x0054 #define NFP_NET_CFG_RSS_CAP_HFUNC 0xff000000 /** + * TLV area start + * %NFP_NET_CFG_TLV_BASE: start anchor of the TLV area + */ +#define NFP_NET_CFG_TLV_BASE 0x0058 + +/** * VXLAN/UDP encap configuration - * @NFP_NET_CFG_VXLAN_PORT: Base address of table of tunnels' UDP dst ports - * @NFP_NET_CFG_VXLAN_SZ: Size of the UDP port table in bytes + * %NFP_NET_CFG_VXLAN_PORT: Base address of table of tunnels' UDP dst ports + * %NFP_NET_CFG_VXLAN_SZ: Size of the UDP port table in bytes */ #define NFP_NET_CFG_VXLAN_PORT 0x0060 #define NFP_NET_CFG_VXLAN_SZ 0x0008 /** * BPF section - * @NFP_NET_CFG_BPF_ABI: BPF ABI version - * @NFP_NET_CFG_BPF_CAP: BPF capabilities - * @NFP_NET_CFG_BPF_MAX_LEN: Maximum size of JITed BPF code in bytes - * @NFP_NET_CFG_BPF_START: Offset at which BPF will be loaded - * @NFP_NET_CFG_BPF_DONE: Offset to jump to on exit - * @NFP_NET_CFG_BPF_STACK_SZ: Total size of stack area in 64B chunks - * @NFP_NET_CFG_BPF_INL_MTU: Packet data split offset in 64B chunks - * @NFP_NET_CFG_BPF_SIZE: Size of the JITed BPF code in instructions - * @NFP_NET_CFG_BPF_ADDR: DMA address of the buffer with JITed BPF code + * %NFP_NET_CFG_BPF_ABI: BPF ABI version + * %NFP_NET_CFG_BPF_CAP: BPF capabilities + * %NFP_NET_CFG_BPF_MAX_LEN: Maximum size of JITed BPF code in bytes + * %NFP_NET_CFG_BPF_START: Offset at which BPF will be loaded + * %NFP_NET_CFG_BPF_DONE: Offset to jump to on exit + * %NFP_NET_CFG_BPF_STACK_SZ: Total size of stack area in 64B chunks + * %NFP_NET_CFG_BPF_INL_MTU: Packet data split offset in 64B chunks + * %NFP_NET_CFG_BPF_SIZE: Size of the JITed BPF code in instructions + * %NFP_NET_CFG_BPF_ADDR: DMA address of the buffer with JITed BPF code */ #define NFP_NET_CFG_BPF_ABI 0x0080 #define NFP_NET_BPF_ABI 2 @@ -278,9 +283,9 @@ /** * RSS configuration (0x0100 - 0x01ac): * Used only when NFP_NET_CFG_CTRL_RSS is enabled - * @NFP_NET_CFG_RSS_CFG: RSS configuration word - * @NFP_NET_CFG_RSS_KEY: RSS "secret" key - * @NFP_NET_CFG_RSS_ITBL: RSS indirection table + * %NFP_NET_CFG_RSS_CFG: RSS configuration word + * %NFP_NET_CFG_RSS_KEY: RSS "secret" key + * %NFP_NET_CFG_RSS_ITBL: RSS indirection table */ #define NFP_NET_CFG_RSS_BASE 0x0100 #define NFP_NET_CFG_RSS_CTRL NFP_NET_CFG_RSS_BASE @@ -305,13 +310,13 @@ /** * TX ring configuration (0x200 - 0x800) - * @NFP_NET_CFG_TXR_BASE: Base offset for TX ring configuration - * @NFP_NET_CFG_TXR_ADDR: Per TX ring DMA address (8B entries) - * @NFP_NET_CFG_TXR_WB_ADDR: Per TX ring write back DMA address (8B entries) - * @NFP_NET_CFG_TXR_SZ: Per TX ring ring size (1B entries) - * @NFP_NET_CFG_TXR_VEC: Per TX ring MSI-X table entry (1B entries) - * @NFP_NET_CFG_TXR_PRIO: Per TX ring priority (1B entries) - * @NFP_NET_CFG_TXR_IRQ_MOD: Per TX ring interrupt moderation packet + * %NFP_NET_CFG_TXR_BASE: Base offset for TX ring configuration + * %NFP_NET_CFG_TXR_ADDR: Per TX ring DMA address (8B entries) + * %NFP_NET_CFG_TXR_WB_ADDR: Per TX ring write back DMA address (8B entries) + * %NFP_NET_CFG_TXR_SZ: Per TX ring ring size (1B entries) + * %NFP_NET_CFG_TXR_VEC: Per TX ring MSI-X table entry (1B entries) + * %NFP_NET_CFG_TXR_PRIO: Per TX ring priority (1B entries) + * %NFP_NET_CFG_TXR_IRQ_MOD: Per TX ring interrupt moderation packet */ #define NFP_NET_CFG_TXR_BASE 0x0200 #define NFP_NET_CFG_TXR_ADDR(_x) (NFP_NET_CFG_TXR_BASE + ((_x) * 0x8)) @@ -325,12 +330,12 @@ /** * RX ring configuration (0x0800 - 0x0c00) - * @NFP_NET_CFG_RXR_BASE: Base offset for RX ring configuration - * @NFP_NET_CFG_RXR_ADDR: Per RX ring DMA address (8B entries) - * @NFP_NET_CFG_RXR_SZ: Per RX ring ring size (1B entries) - * @NFP_NET_CFG_RXR_VEC: Per RX ring MSI-X table entry (1B entries) - * @NFP_NET_CFG_RXR_PRIO: Per RX ring priority (1B entries) - * @NFP_NET_CFG_RXR_IRQ_MOD: Per RX ring interrupt moderation (4B entries) + * %NFP_NET_CFG_RXR_BASE: Base offset for RX ring configuration + * %NFP_NET_CFG_RXR_ADDR: Per RX ring DMA address (8B entries) + * %NFP_NET_CFG_RXR_SZ: Per RX ring ring size (1B entries) + * %NFP_NET_CFG_RXR_VEC: Per RX ring MSI-X table entry (1B entries) + * %NFP_NET_CFG_RXR_PRIO: Per RX ring priority (1B entries) + * %NFP_NET_CFG_RXR_IRQ_MOD: Per RX ring interrupt moderation (4B entries) */ #define NFP_NET_CFG_RXR_BASE 0x0800 #define NFP_NET_CFG_RXR_ADDR(_x) (NFP_NET_CFG_RXR_BASE + ((_x) * 0x8)) @@ -343,7 +348,7 @@ /** * Interrupt Control/Cause registers (0x0c00 - 0x0d00) * These registers are only used when MSI-X auto-masking is not - * enabled (@NFP_NET_CFG_CTRL_MSIXAUTO not set). The array is index + * enabled (%NFP_NET_CFG_CTRL_MSIXAUTO not set). The array is index * by MSI-X entry and are 1B in size. If an entry is zero, the * corresponding entry is enabled. If the FW generates an interrupt, * it writes a cause into the corresponding field. This also masks @@ -393,8 +398,8 @@ /** * Per ring stats (0x1000 - 0x1800) * options, 64bit per entry - * @NFP_NET_CFG_TXR_STATS: TX ring statistics (Packet and Byte count) - * @NFP_NET_CFG_RXR_STATS: RX ring statistics (Packet and Byte count) + * %NFP_NET_CFG_TXR_STATS: TX ring statistics (Packet and Byte count) + * %NFP_NET_CFG_RXR_STATS: RX ring statistics (Packet and Byte count) */ #define NFP_NET_CFG_TXR_STATS_BASE 0x1000 #define NFP_NET_CFG_TXR_STATS(_x) (NFP_NET_CFG_TXR_STATS_BASE + \ @@ -408,24 +413,105 @@ * 4B used for update command and 4B return code * followed by a max of 504B of variable length value */ -#define NFP_NET_CFG_MBOX_CMD 0x1800 -#define NFP_NET_CFG_MBOX_RET 0x1804 -#define NFP_NET_CFG_MBOX_VAL 0x1808 +#define NFP_NET_CFG_MBOX_BASE 0x1800 #define NFP_NET_CFG_MBOX_VAL_MAX_SZ 0x1F8 +#define NFP_NET_CFG_MBOX_SIMPLE_CMD 0x0 +#define NFP_NET_CFG_MBOX_SIMPLE_RET 0x4 +#define NFP_NET_CFG_MBOX_SIMPLE_VAL 0x8 +#define NFP_NET_CFG_MBOX_SIMPLE_LEN 0x12 + #define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD 1 #define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2 /** * VLAN filtering using general use mailbox - * @NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox - * @NFP_NET_CFG_VLAN_FILTER_VID: VLAN ID to filter - * @NFP_NET_CFG_VLAN_FILTER_PROTO: VLAN proto to filter - * @NFP_NET_CFG_VXLAN_SZ: Size of the VLAN filter mailbox in bytes + * %NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox + * %NFP_NET_CFG_VLAN_FILTER_VID: VLAN ID to filter + * %NFP_NET_CFG_VLAN_FILTER_PROTO: VLAN proto to filter + * %NFP_NET_CFG_VXLAN_SZ: Size of the VLAN filter mailbox in bytes */ -#define NFP_NET_CFG_VLAN_FILTER NFP_NET_CFG_MBOX_VAL +#define NFP_NET_CFG_VLAN_FILTER NFP_NET_CFG_MBOX_SIMPLE_VAL #define NFP_NET_CFG_VLAN_FILTER_VID NFP_NET_CFG_VLAN_FILTER #define NFP_NET_CFG_VLAN_FILTER_PROTO (NFP_NET_CFG_VLAN_FILTER + 2) #define NFP_NET_CFG_VLAN_FILTER_SZ 0x0004 +/** + * TLV capabilities + * %NFP_NET_CFG_TLV_TYPE: Offset of type within the TLV + * %NFP_NET_CFG_TLV_TYPE_REQUIRED: Driver must be able to parse the TLV + * %NFP_NET_CFG_TLV_LENGTH: Offset of length within the TLV + * %NFP_NET_CFG_TLV_LENGTH_INC: TLV length increments + * %NFP_NET_CFG_TLV_VALUE: Offset of value with the TLV + * + * List of simple TLV structures, first one starts at %NFP_NET_CFG_TLV_BASE. + * Last structure must be of type %NFP_NET_CFG_TLV_TYPE_END. Presence of TLVs + * is indicated by %NFP_NET_CFG_TLV_BASE being non-zero. TLV structures may + * fill the entire remainder of the BAR or be shorter. FW must make sure TLVs + * don't conflict with other features which allocate space beyond + * %NFP_NET_CFG_TLV_BASE. %NFP_NET_CFG_TLV_TYPE_RESERVED should be used to wrap + * space used by such features. + * Note that the 4 byte TLV header is not counted in %NFP_NET_CFG_TLV_LENGTH. + */ +#define NFP_NET_CFG_TLV_TYPE 0x00 +#define NFP_NET_CFG_TLV_TYPE_REQUIRED 0x8000 +#define NFP_NET_CFG_TLV_LENGTH 0x02 +#define NFP_NET_CFG_TLV_LENGTH_INC 4 +#define NFP_NET_CFG_TLV_VALUE 0x04 + +#define NFP_NET_CFG_TLV_HEADER_REQUIRED 0x80000000 +#define NFP_NET_CFG_TLV_HEADER_TYPE 0x7fff0000 +#define NFP_NET_CFG_TLV_HEADER_LENGTH 0x0000ffff + +/** + * Capability TLV types + * + * %NFP_NET_CFG_TLV_TYPE_UNKNOWN: + * Special TLV type to catch bugs, should never be encountered. Drivers should + * treat encountering this type as error and refuse to probe. + * + * %NFP_NET_CFG_TLV_TYPE_RESERVED: + * Reserved space, may contain legacy fixed-offset fields, or be used for + * padding. The use of this type should be otherwise avoided. + * + * %NFP_NET_CFG_TLV_TYPE_END: + * Empty, end of TLV list. Must be the last TLV. Drivers will stop processing + * further TLVs when encountered. + * + * %NFP_NET_CFG_TLV_TYPE_ME_FREQ: + * Single word, ME frequency in MHz as used in calculation for + * %NFP_NET_CFG_RXR_IRQ_MOD and %NFP_NET_CFG_TXR_IRQ_MOD. + * + * %NFP_NET_CFG_TLV_TYPE_MBOX: + * Variable, mailbox area. Overwrites the default location which is + * %NFP_NET_CFG_MBOX_BASE and length %NFP_NET_CFG_MBOX_VAL_MAX_SZ. + */ +#define NFP_NET_CFG_TLV_TYPE_UNKNOWN 0 +#define NFP_NET_CFG_TLV_TYPE_RESERVED 1 +#define NFP_NET_CFG_TLV_TYPE_END 2 +#define NFP_NET_CFG_TLV_TYPE_ME_FREQ 3 +#define NFP_NET_CFG_TLV_TYPE_MBOX 4 + +struct device; + +/** + * struct nfp_net_tlv_caps - parsed control BAR TLV capabilities + * @me_freq_mhz: ME clock_freq (MHz) + * @mbox_off: vNIC mailbox area offset + * @mbox_len: vNIC mailbox area length + */ +struct nfp_net_tlv_caps { + u32 me_freq_mhz; + unsigned int mbox_off; + unsigned int mbox_len; +}; + +int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem, + struct nfp_net_tlv_caps *caps); + +static inline bool nfp_net_has_mbox(struct nfp_net_tlv_caps *caps) +{ + return caps->mbox_len >= NFP_NET_CFG_MBOX_SIMPLE_LEN; +} + #endif /* _NFP_NET_CTRL_H_ */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c new file mode 100644 index 000000000000..bb8ed460086e --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c @@ -0,0 +1,811 @@ +/* + * Copyright (C) 2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/ethtool.h> +#include <linux/vmalloc.h> + +#include "nfp_asm.h" +#include "nfp_main.h" +#include "nfpcore/nfp.h" +#include "nfpcore/nfp_nffw.h" +#include "nfpcore/nfp6000/nfp6000.h" + +#define NFP_DUMP_SPEC_RTSYM "_abi_dump_spec" + +#define ALIGN8(x) ALIGN(x, 8) + +enum nfp_dumpspec_type { + NFP_DUMPSPEC_TYPE_CPP_CSR = 0, + NFP_DUMPSPEC_TYPE_XPB_CSR = 1, + NFP_DUMPSPEC_TYPE_ME_CSR = 2, + NFP_DUMPSPEC_TYPE_INDIRECT_ME_CSR = 3, + NFP_DUMPSPEC_TYPE_RTSYM = 4, + NFP_DUMPSPEC_TYPE_HWINFO = 5, + NFP_DUMPSPEC_TYPE_FWNAME = 6, + NFP_DUMPSPEC_TYPE_HWINFO_FIELD = 7, + NFP_DUMPSPEC_TYPE_PROLOG = 10000, + NFP_DUMPSPEC_TYPE_ERROR = 10001, +}; + +/* The following structs must be carefully aligned so that they can be used to + * interpret the binary dumpspec and populate the dump data in a deterministic + * way. + */ + +/* generic type plus length */ +struct nfp_dump_tl { + __be32 type; + __be32 length; /* chunk length to follow, aligned to 8 bytes */ + char data[0]; +}; + +/* NFP CPP parameters */ +struct nfp_dumpspec_cpp_isl_id { + u8 target; + u8 action; + u8 token; + u8 island; +}; + +struct nfp_dump_common_cpp { + struct nfp_dumpspec_cpp_isl_id cpp_id; + __be32 offset; /* address to start dump */ + __be32 dump_length; /* total bytes to dump, aligned to reg size */ +}; + +/* CSR dumpables */ +struct nfp_dumpspec_csr { + struct nfp_dump_tl tl; + struct nfp_dump_common_cpp cpp; + __be32 register_width; /* in bits */ +}; + +struct nfp_dumpspec_rtsym { + struct nfp_dump_tl tl; + char rtsym[0]; +}; + +/* header for register dumpable */ +struct nfp_dump_csr { + struct nfp_dump_tl tl; + struct nfp_dump_common_cpp cpp; + __be32 register_width; /* in bits */ + __be32 error; /* error code encountered while reading */ + __be32 error_offset; /* offset being read when error occurred */ +}; + +struct nfp_dump_rtsym { + struct nfp_dump_tl tl; + struct nfp_dump_common_cpp cpp; + __be32 error; /* error code encountered while reading */ + u8 padded_name_length; /* pad so data starts at 8 byte boundary */ + char rtsym[0]; + /* after padded_name_length, there is dump_length data */ +}; + +struct nfp_dump_prolog { + struct nfp_dump_tl tl; + __be32 dump_level; +}; + +struct nfp_dump_error { + struct nfp_dump_tl tl; + __be32 error; + char padding[4]; + char spec[0]; +}; + +/* to track state through debug size calculation TLV traversal */ +struct nfp_level_size { + __be32 requested_level; /* input */ + u32 total_size; /* output */ +}; + +/* to track state during debug dump creation TLV traversal */ +struct nfp_dump_state { + __be32 requested_level; /* input param */ + u32 dumped_size; /* adds up to size of dumped data */ + u32 buf_size; /* size of buffer pointer to by p */ + void *p; /* current point in dump buffer */ +}; + +typedef int (*nfp_tlv_visit)(struct nfp_pf *pf, struct nfp_dump_tl *tl, + void *param); + +static int +nfp_traverse_tlvs(struct nfp_pf *pf, void *data, u32 data_length, void *param, + nfp_tlv_visit tlv_visit) +{ + long long remaining = data_length; + struct nfp_dump_tl *tl; + u32 total_tlv_size; + void *p = data; + int err; + + while (remaining >= sizeof(*tl)) { + tl = p; + if (!tl->type && !tl->length) + break; + + if (be32_to_cpu(tl->length) > remaining - sizeof(*tl)) + return -EINVAL; + + total_tlv_size = sizeof(*tl) + be32_to_cpu(tl->length); + + /* Spec TLVs should be aligned to 4 bytes. */ + if (total_tlv_size % 4 != 0) + return -EINVAL; + + p += total_tlv_size; + remaining -= total_tlv_size; + err = tlv_visit(pf, tl, param); + if (err) + return err; + } + + return 0; +} + +static u32 nfp_get_numeric_cpp_id(struct nfp_dumpspec_cpp_isl_id *cpp_id) +{ + return NFP_CPP_ISLAND_ID(cpp_id->target, cpp_id->action, cpp_id->token, + cpp_id->island); +} + +struct nfp_dumpspec * +nfp_net_dump_load_dumpspec(struct nfp_cpp *cpp, struct nfp_rtsym_table *rtbl) +{ + const struct nfp_rtsym *specsym; + struct nfp_dumpspec *dumpspec; + int bytes_read; + u32 cpp_id; + + specsym = nfp_rtsym_lookup(rtbl, NFP_DUMP_SPEC_RTSYM); + if (!specsym) + return NULL; + + /* expected size of this buffer is in the order of tens of kilobytes */ + dumpspec = vmalloc(sizeof(*dumpspec) + specsym->size); + if (!dumpspec) + return NULL; + + dumpspec->size = specsym->size; + + cpp_id = NFP_CPP_ISLAND_ID(specsym->target, NFP_CPP_ACTION_RW, 0, + specsym->domain); + + bytes_read = nfp_cpp_read(cpp, cpp_id, specsym->addr, dumpspec->data, + specsym->size); + if (bytes_read != specsym->size) { + vfree(dumpspec); + nfp_warn(cpp, "Debug dump specification read failed.\n"); + return NULL; + } + + return dumpspec; +} + +static int nfp_dump_error_tlv_size(struct nfp_dump_tl *spec) +{ + return ALIGN8(sizeof(struct nfp_dump_error) + sizeof(*spec) + + be32_to_cpu(spec->length)); +} + +static int nfp_calc_fwname_tlv_size(struct nfp_pf *pf) +{ + u32 fwname_len = strlen(nfp_mip_name(pf->mip)); + + return sizeof(struct nfp_dump_tl) + ALIGN8(fwname_len + 1); +} + +static int nfp_calc_hwinfo_field_sz(struct nfp_pf *pf, struct nfp_dump_tl *spec) +{ + u32 tl_len, key_len; + const char *value; + + tl_len = be32_to_cpu(spec->length); + key_len = strnlen(spec->data, tl_len); + if (key_len == tl_len) + return nfp_dump_error_tlv_size(spec); + + value = nfp_hwinfo_lookup(pf->hwinfo, spec->data); + if (!value) + return nfp_dump_error_tlv_size(spec); + + return sizeof(struct nfp_dump_tl) + ALIGN8(key_len + strlen(value) + 2); +} + +static bool nfp_csr_spec_valid(struct nfp_dumpspec_csr *spec_csr) +{ + u32 required_read_sz = sizeof(*spec_csr) - sizeof(spec_csr->tl); + u32 available_sz = be32_to_cpu(spec_csr->tl.length); + u32 reg_width; + + if (available_sz < required_read_sz) + return false; + + reg_width = be32_to_cpu(spec_csr->register_width); + + return reg_width == 32 || reg_width == 64; +} + +static int +nfp_calc_rtsym_dump_sz(struct nfp_pf *pf, struct nfp_dump_tl *spec) +{ + struct nfp_rtsym_table *rtbl = pf->rtbl; + struct nfp_dumpspec_rtsym *spec_rtsym; + const struct nfp_rtsym *sym; + u32 tl_len, key_len; + u32 size; + + spec_rtsym = (struct nfp_dumpspec_rtsym *)spec; + tl_len = be32_to_cpu(spec->length); + key_len = strnlen(spec_rtsym->rtsym, tl_len); + if (key_len == tl_len) + return nfp_dump_error_tlv_size(spec); + + sym = nfp_rtsym_lookup(rtbl, spec_rtsym->rtsym); + if (!sym) + return nfp_dump_error_tlv_size(spec); + + if (sym->type == NFP_RTSYM_TYPE_ABS) + size = sizeof(sym->addr); + else + size = sym->size; + + return ALIGN8(offsetof(struct nfp_dump_rtsym, rtsym) + key_len + 1) + + ALIGN8(size); +} + +static int +nfp_add_tlv_size(struct nfp_pf *pf, struct nfp_dump_tl *tl, void *param) +{ + struct nfp_dumpspec_csr *spec_csr; + u32 *size = param; + u32 hwinfo_size; + + switch (be32_to_cpu(tl->type)) { + case NFP_DUMPSPEC_TYPE_FWNAME: + *size += nfp_calc_fwname_tlv_size(pf); + break; + case NFP_DUMPSPEC_TYPE_CPP_CSR: + case NFP_DUMPSPEC_TYPE_XPB_CSR: + case NFP_DUMPSPEC_TYPE_ME_CSR: + spec_csr = (struct nfp_dumpspec_csr *)tl; + if (!nfp_csr_spec_valid(spec_csr)) + *size += nfp_dump_error_tlv_size(tl); + else + *size += ALIGN8(sizeof(struct nfp_dump_csr)) + + ALIGN8(be32_to_cpu(spec_csr->cpp.dump_length)); + break; + case NFP_DUMPSPEC_TYPE_INDIRECT_ME_CSR: + spec_csr = (struct nfp_dumpspec_csr *)tl; + if (!nfp_csr_spec_valid(spec_csr)) + *size += nfp_dump_error_tlv_size(tl); + else + *size += ALIGN8(sizeof(struct nfp_dump_csr)) + + ALIGN8(be32_to_cpu(spec_csr->cpp.dump_length) * + NFP_IND_NUM_CONTEXTS); + break; + case NFP_DUMPSPEC_TYPE_RTSYM: + *size += nfp_calc_rtsym_dump_sz(pf, tl); + break; + case NFP_DUMPSPEC_TYPE_HWINFO: + hwinfo_size = nfp_hwinfo_get_packed_str_size(pf->hwinfo); + *size += sizeof(struct nfp_dump_tl) + ALIGN8(hwinfo_size); + break; + case NFP_DUMPSPEC_TYPE_HWINFO_FIELD: + *size += nfp_calc_hwinfo_field_sz(pf, tl); + break; + default: + *size += nfp_dump_error_tlv_size(tl); + break; + } + + return 0; +} + +static int +nfp_calc_specific_level_size(struct nfp_pf *pf, struct nfp_dump_tl *dump_level, + void *param) +{ + struct nfp_level_size *lev_sz = param; + + if (dump_level->type != lev_sz->requested_level) + return 0; + + return nfp_traverse_tlvs(pf, dump_level->data, + be32_to_cpu(dump_level->length), + &lev_sz->total_size, nfp_add_tlv_size); +} + +s64 nfp_net_dump_calculate_size(struct nfp_pf *pf, struct nfp_dumpspec *spec, + u32 flag) +{ + struct nfp_level_size lev_sz; + int err; + + lev_sz.requested_level = cpu_to_be32(flag); + lev_sz.total_size = ALIGN8(sizeof(struct nfp_dump_prolog)); + + err = nfp_traverse_tlvs(pf, spec->data, spec->size, &lev_sz, + nfp_calc_specific_level_size); + if (err) + return err; + + return lev_sz.total_size; +} + +static int nfp_add_tlv(u32 type, u32 total_tlv_sz, struct nfp_dump_state *dump) +{ + struct nfp_dump_tl *tl = dump->p; + + if (total_tlv_sz > dump->buf_size) + return -ENOSPC; + + if (dump->buf_size - total_tlv_sz < dump->dumped_size) + return -ENOSPC; + + tl->type = cpu_to_be32(type); + tl->length = cpu_to_be32(total_tlv_sz - sizeof(*tl)); + + dump->dumped_size += total_tlv_sz; + dump->p += total_tlv_sz; + + return 0; +} + +static int +nfp_dump_error_tlv(struct nfp_dump_tl *spec, int error, + struct nfp_dump_state *dump) +{ + struct nfp_dump_error *dump_header = dump->p; + u32 total_spec_size, total_size; + int err; + + total_spec_size = sizeof(*spec) + be32_to_cpu(spec->length); + total_size = ALIGN8(sizeof(*dump_header) + total_spec_size); + + err = nfp_add_tlv(NFP_DUMPSPEC_TYPE_ERROR, total_size, dump); + if (err) + return err; + + dump_header->error = cpu_to_be32(error); + memcpy(dump_header->spec, spec, total_spec_size); + + return 0; +} + +static int nfp_dump_fwname(struct nfp_pf *pf, struct nfp_dump_state *dump) +{ + struct nfp_dump_tl *dump_header = dump->p; + u32 fwname_len, total_size; + const char *fwname; + int err; + + fwname = nfp_mip_name(pf->mip); + fwname_len = strlen(fwname); + total_size = sizeof(*dump_header) + ALIGN8(fwname_len + 1); + + err = nfp_add_tlv(NFP_DUMPSPEC_TYPE_FWNAME, total_size, dump); + if (err) + return err; + + memcpy(dump_header->data, fwname, fwname_len); + + return 0; +} + +static int +nfp_dump_hwinfo(struct nfp_pf *pf, struct nfp_dump_tl *spec, + struct nfp_dump_state *dump) +{ + struct nfp_dump_tl *dump_header = dump->p; + u32 hwinfo_size, total_size; + char *hwinfo; + int err; + + hwinfo = nfp_hwinfo_get_packed_strings(pf->hwinfo); + hwinfo_size = nfp_hwinfo_get_packed_str_size(pf->hwinfo); + total_size = sizeof(*dump_header) + ALIGN8(hwinfo_size); + + err = nfp_add_tlv(NFP_DUMPSPEC_TYPE_HWINFO, total_size, dump); + if (err) + return err; + + memcpy(dump_header->data, hwinfo, hwinfo_size); + + return 0; +} + +static int nfp_dump_hwinfo_field(struct nfp_pf *pf, struct nfp_dump_tl *spec, + struct nfp_dump_state *dump) +{ + struct nfp_dump_tl *dump_header = dump->p; + u32 tl_len, key_len, val_len; + const char *key, *value; + u32 total_size; + int err; + + tl_len = be32_to_cpu(spec->length); + key_len = strnlen(spec->data, tl_len); + if (key_len == tl_len) + return nfp_dump_error_tlv(spec, -EINVAL, dump); + + key = spec->data; + value = nfp_hwinfo_lookup(pf->hwinfo, key); + if (!value) + return nfp_dump_error_tlv(spec, -ENOENT, dump); + + val_len = strlen(value); + total_size = sizeof(*dump_header) + ALIGN8(key_len + val_len + 2); + err = nfp_add_tlv(NFP_DUMPSPEC_TYPE_HWINFO_FIELD, total_size, dump); + if (err) + return err; + + memcpy(dump_header->data, key, key_len + 1); + memcpy(dump_header->data + key_len + 1, value, val_len + 1); + + return 0; +} + +static bool is_xpb_read(struct nfp_dumpspec_cpp_isl_id *cpp_id) +{ + return cpp_id->target == NFP_CPP_TARGET_ISLAND_XPB && + cpp_id->action == 0 && cpp_id->token == 0; +} + +static int +nfp_dump_csr_range(struct nfp_pf *pf, struct nfp_dumpspec_csr *spec_csr, + struct nfp_dump_state *dump) +{ + struct nfp_dump_csr *dump_header = dump->p; + u32 reg_sz, header_size, total_size; + u32 cpp_rd_addr, max_rd_addr; + int bytes_read; + void *dest; + u32 cpp_id; + int err; + + if (!nfp_csr_spec_valid(spec_csr)) + return nfp_dump_error_tlv(&spec_csr->tl, -EINVAL, dump); + + reg_sz = be32_to_cpu(spec_csr->register_width) / BITS_PER_BYTE; + header_size = ALIGN8(sizeof(*dump_header)); + total_size = header_size + + ALIGN8(be32_to_cpu(spec_csr->cpp.dump_length)); + dest = dump->p + header_size; + + err = nfp_add_tlv(be32_to_cpu(spec_csr->tl.type), total_size, dump); + if (err) + return err; + + dump_header->cpp = spec_csr->cpp; + dump_header->register_width = spec_csr->register_width; + + cpp_id = nfp_get_numeric_cpp_id(&spec_csr->cpp.cpp_id); + cpp_rd_addr = be32_to_cpu(spec_csr->cpp.offset); + max_rd_addr = cpp_rd_addr + be32_to_cpu(spec_csr->cpp.dump_length); + + while (cpp_rd_addr < max_rd_addr) { + if (is_xpb_read(&spec_csr->cpp.cpp_id)) { + err = nfp_xpb_readl(pf->cpp, cpp_rd_addr, (u32 *)dest); + } else { + bytes_read = nfp_cpp_read(pf->cpp, cpp_id, cpp_rd_addr, + dest, reg_sz); + err = bytes_read == reg_sz ? 0 : -EIO; + } + if (err) { + dump_header->error = cpu_to_be32(err); + dump_header->error_offset = cpu_to_be32(cpp_rd_addr); + break; + } + cpp_rd_addr += reg_sz; + dest += reg_sz; + } + + return 0; +} + +/* Write context to CSRCtxPtr, then read from it. Then the value can be read + * from IndCtxStatus. + */ +static int +nfp_read_indirect_csr(struct nfp_cpp *cpp, + struct nfp_dumpspec_cpp_isl_id cpp_params, u32 offset, + u32 reg_sz, u32 context, void *dest) +{ + u32 csr_ctx_ptr_offs; + u32 cpp_id; + int result; + + csr_ctx_ptr_offs = nfp_get_ind_csr_ctx_ptr_offs(offset); + cpp_id = NFP_CPP_ISLAND_ID(cpp_params.target, + NFP_IND_ME_REFL_WR_SIG_INIT, + cpp_params.token, cpp_params.island); + result = nfp_cpp_writel(cpp, cpp_id, csr_ctx_ptr_offs, context); + if (result) + return result; + + cpp_id = nfp_get_numeric_cpp_id(&cpp_params); + result = nfp_cpp_read(cpp, cpp_id, csr_ctx_ptr_offs, dest, reg_sz); + if (result != reg_sz) + return result < 0 ? result : -EIO; + + result = nfp_cpp_read(cpp, cpp_id, offset, dest, reg_sz); + if (result != reg_sz) + return result < 0 ? result : -EIO; + + return 0; +} + +static int +nfp_read_all_indirect_csr_ctx(struct nfp_cpp *cpp, + struct nfp_dumpspec_csr *spec_csr, u32 address, + u32 reg_sz, void *dest) +{ + u32 ctx; + int err; + + for (ctx = 0; ctx < NFP_IND_NUM_CONTEXTS; ctx++) { + err = nfp_read_indirect_csr(cpp, spec_csr->cpp.cpp_id, address, + reg_sz, ctx, dest + ctx * reg_sz); + if (err) + return err; + } + + return 0; +} + +static int +nfp_dump_indirect_csr_range(struct nfp_pf *pf, + struct nfp_dumpspec_csr *spec_csr, + struct nfp_dump_state *dump) +{ + struct nfp_dump_csr *dump_header = dump->p; + u32 reg_sz, header_size, total_size; + u32 cpp_rd_addr, max_rd_addr; + u32 reg_data_length; + void *dest; + int err; + + if (!nfp_csr_spec_valid(spec_csr)) + return nfp_dump_error_tlv(&spec_csr->tl, -EINVAL, dump); + + reg_sz = be32_to_cpu(spec_csr->register_width) / BITS_PER_BYTE; + header_size = ALIGN8(sizeof(*dump_header)); + reg_data_length = be32_to_cpu(spec_csr->cpp.dump_length) * + NFP_IND_NUM_CONTEXTS; + total_size = header_size + ALIGN8(reg_data_length); + dest = dump->p + header_size; + + err = nfp_add_tlv(be32_to_cpu(spec_csr->tl.type), total_size, dump); + if (err) + return err; + + dump_header->cpp = spec_csr->cpp; + dump_header->register_width = spec_csr->register_width; + + cpp_rd_addr = be32_to_cpu(spec_csr->cpp.offset); + max_rd_addr = cpp_rd_addr + be32_to_cpu(spec_csr->cpp.dump_length); + while (cpp_rd_addr < max_rd_addr) { + err = nfp_read_all_indirect_csr_ctx(pf->cpp, spec_csr, + cpp_rd_addr, reg_sz, dest); + if (err) { + dump_header->error = cpu_to_be32(err); + dump_header->error_offset = cpu_to_be32(cpp_rd_addr); + break; + } + cpp_rd_addr += reg_sz; + dest += reg_sz * NFP_IND_NUM_CONTEXTS; + } + + return 0; +} + +static int +nfp_dump_single_rtsym(struct nfp_pf *pf, struct nfp_dumpspec_rtsym *spec, + struct nfp_dump_state *dump) +{ + struct nfp_dump_rtsym *dump_header = dump->p; + struct nfp_dumpspec_cpp_isl_id cpp_params; + struct nfp_rtsym_table *rtbl = pf->rtbl; + u32 header_size, total_size, sym_size; + const struct nfp_rtsym *sym; + u32 tl_len, key_len; + int bytes_read; + u32 cpp_id; + void *dest; + int err; + + tl_len = be32_to_cpu(spec->tl.length); + key_len = strnlen(spec->rtsym, tl_len); + if (key_len == tl_len) + return nfp_dump_error_tlv(&spec->tl, -EINVAL, dump); + + sym = nfp_rtsym_lookup(rtbl, spec->rtsym); + if (!sym) + return nfp_dump_error_tlv(&spec->tl, -ENOENT, dump); + + if (sym->type == NFP_RTSYM_TYPE_ABS) + sym_size = sizeof(sym->addr); + else + sym_size = sym->size; + + header_size = + ALIGN8(offsetof(struct nfp_dump_rtsym, rtsym) + key_len + 1); + total_size = header_size + ALIGN8(sym_size); + dest = dump->p + header_size; + + err = nfp_add_tlv(be32_to_cpu(spec->tl.type), total_size, dump); + if (err) + return err; + + dump_header->padded_name_length = + header_size - offsetof(struct nfp_dump_rtsym, rtsym); + memcpy(dump_header->rtsym, spec->rtsym, key_len + 1); + dump_header->cpp.dump_length = cpu_to_be32(sym_size); + + if (sym->type == NFP_RTSYM_TYPE_ABS) { + *(u64 *)dest = sym->addr; + } else { + cpp_params.target = sym->target; + cpp_params.action = NFP_CPP_ACTION_RW; + cpp_params.token = 0; + cpp_params.island = sym->domain; + cpp_id = nfp_get_numeric_cpp_id(&cpp_params); + dump_header->cpp.cpp_id = cpp_params; + dump_header->cpp.offset = cpu_to_be32(sym->addr); + bytes_read = nfp_cpp_read(pf->cpp, cpp_id, sym->addr, dest, + sym_size); + if (bytes_read != sym_size) { + if (bytes_read >= 0) + bytes_read = -EIO; + dump_header->error = cpu_to_be32(bytes_read); + } + } + + return 0; +} + +static int +nfp_dump_for_tlv(struct nfp_pf *pf, struct nfp_dump_tl *tl, void *param) +{ + struct nfp_dumpspec_rtsym *spec_rtsym; + struct nfp_dump_state *dump = param; + struct nfp_dumpspec_csr *spec_csr; + int err; + + switch (be32_to_cpu(tl->type)) { + case NFP_DUMPSPEC_TYPE_FWNAME: + err = nfp_dump_fwname(pf, dump); + if (err) + return err; + break; + case NFP_DUMPSPEC_TYPE_CPP_CSR: + case NFP_DUMPSPEC_TYPE_XPB_CSR: + case NFP_DUMPSPEC_TYPE_ME_CSR: + spec_csr = (struct nfp_dumpspec_csr *)tl; + err = nfp_dump_csr_range(pf, spec_csr, dump); + if (err) + return err; + break; + case NFP_DUMPSPEC_TYPE_INDIRECT_ME_CSR: + spec_csr = (struct nfp_dumpspec_csr *)tl; + err = nfp_dump_indirect_csr_range(pf, spec_csr, dump); + if (err) + return err; + break; + case NFP_DUMPSPEC_TYPE_RTSYM: + spec_rtsym = (struct nfp_dumpspec_rtsym *)tl; + err = nfp_dump_single_rtsym(pf, spec_rtsym, dump); + if (err) + return err; + break; + case NFP_DUMPSPEC_TYPE_HWINFO: + err = nfp_dump_hwinfo(pf, tl, dump); + if (err) + return err; + break; + case NFP_DUMPSPEC_TYPE_HWINFO_FIELD: + err = nfp_dump_hwinfo_field(pf, tl, dump); + if (err) + return err; + break; + default: + err = nfp_dump_error_tlv(tl, -EOPNOTSUPP, dump); + if (err) + return err; + } + + return 0; +} + +static int +nfp_dump_specific_level(struct nfp_pf *pf, struct nfp_dump_tl *dump_level, + void *param) +{ + struct nfp_dump_state *dump = param; + + if (dump_level->type != dump->requested_level) + return 0; + + return nfp_traverse_tlvs(pf, dump_level->data, + be32_to_cpu(dump_level->length), dump, + nfp_dump_for_tlv); +} + +static int nfp_dump_populate_prolog(struct nfp_dump_state *dump) +{ + struct nfp_dump_prolog *prolog = dump->p; + u32 total_size; + int err; + + total_size = ALIGN8(sizeof(*prolog)); + + err = nfp_add_tlv(NFP_DUMPSPEC_TYPE_PROLOG, total_size, dump); + if (err) + return err; + + prolog->dump_level = dump->requested_level; + + return 0; +} + +int nfp_net_dump_populate_buffer(struct nfp_pf *pf, struct nfp_dumpspec *spec, + struct ethtool_dump *dump_param, void *dest) +{ + struct nfp_dump_state dump; + int err; + + dump.requested_level = cpu_to_be32(dump_param->flag); + dump.dumped_size = 0; + dump.p = dest; + dump.buf_size = dump_param->len; + + err = nfp_dump_populate_prolog(&dump); + if (err) + return err; + + err = nfp_traverse_tlvs(pf, spec->data, spec->size, &dump, + nfp_dump_specific_level); + if (err) + return err; + + /* Set size of actual dump, to trigger warning if different from + * calculated size. + */ + dump_param->len = dump.dumped_size; + + return 0; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 6c02b2d6ba06..e1dae0616f52 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -47,18 +47,16 @@ #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/ethtool.h> +#include <linux/firmware.h> #include "nfpcore/nfp.h" #include "nfpcore/nfp_nsp.h" #include "nfp_app.h" +#include "nfp_main.h" #include "nfp_net_ctrl.h" #include "nfp_net.h" #include "nfp_port.h" -enum nfp_dump_diag { - NFP_DUMP_NSP_DIAG = 0, -}; - struct nfp_et_stat { char name[ETH_GSTRING_LEN]; int off; @@ -1066,15 +1064,34 @@ exit_release: return ret; } +/* Set the dump flag/level. Calculate the dump length for flag > 0 only (new TLV + * based dumps), since flag 0 (default) calculates the length in + * nfp_app_get_dump_flag(), and we need to support triggering a level 0 dump + * without setting the flag first, for backward compatibility. + */ static int nfp_app_set_dump(struct net_device *netdev, struct ethtool_dump *val) { struct nfp_app *app = nfp_app_from_netdev(netdev); + s64 len; if (!app) return -EOPNOTSUPP; - if (val->flag != NFP_DUMP_NSP_DIAG) - return -EINVAL; + if (val->flag == NFP_DUMP_NSP_DIAG) { + app->pf->dump_flag = val->flag; + return 0; + } + + if (!app->pf->dumpspec) + return -EOPNOTSUPP; + + len = nfp_net_dump_calculate_size(app->pf, app->pf->dumpspec, + val->flag); + if (len < 0) + return len; + + app->pf->dump_flag = val->flag; + app->pf->dump_len = len; return 0; } @@ -1082,14 +1099,37 @@ static int nfp_app_set_dump(struct net_device *netdev, struct ethtool_dump *val) static int nfp_app_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) { - return nfp_dump_nsp_diag(nfp_app_from_netdev(netdev), dump, NULL); + struct nfp_app *app = nfp_app_from_netdev(netdev); + + if (!app) + return -EOPNOTSUPP; + + if (app->pf->dump_flag == NFP_DUMP_NSP_DIAG) + return nfp_dump_nsp_diag(app, dump, NULL); + + dump->flag = app->pf->dump_flag; + dump->len = app->pf->dump_len; + + return 0; } static int nfp_app_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, void *buffer) { - return nfp_dump_nsp_diag(nfp_app_from_netdev(netdev), dump, buffer); + struct nfp_app *app = nfp_app_from_netdev(netdev); + + if (!app) + return -EOPNOTSUPP; + + if (app->pf->dump_flag == NFP_DUMP_NSP_DIAG) + return nfp_dump_nsp_diag(app, dump, buffer); + + dump->flag = app->pf->dump_flag; + dump->len = app->pf->dump_len; + + return nfp_net_dump_populate_buffer(app->pf, app->pf->dumpspec, dump, + buffer); } static int nfp_net_set_coalesce(struct net_device *netdev, @@ -1230,6 +1270,57 @@ static int nfp_net_set_channels(struct net_device *netdev, return nfp_net_set_num_rings(nn, total_rx, total_tx); } +static int +nfp_net_flash_device(struct net_device *netdev, struct ethtool_flash *flash) +{ + const struct firmware *fw; + struct nfp_app *app; + struct nfp_nsp *nsp; + struct device *dev; + int err; + + if (flash->region != ETHTOOL_FLASH_ALL_REGIONS) + return -EOPNOTSUPP; + + app = nfp_app_from_netdev(netdev); + if (!app) + return -EOPNOTSUPP; + + dev = &app->pdev->dev; + + nsp = nfp_nsp_open(app->cpp); + if (IS_ERR(nsp)) { + err = PTR_ERR(nsp); + dev_err(dev, "Failed to access the NSP: %d\n", err); + return err; + } + + err = request_firmware_direct(&fw, flash->data, dev); + if (err) + goto exit_close_nsp; + + dev_info(dev, "Please be patient while writing flash image: %s\n", + flash->data); + dev_hold(netdev); + rtnl_unlock(); + + err = nfp_nsp_write_flash(nsp, fw); + if (err < 0) { + dev_err(dev, "Flash write failed: %d\n", err); + goto exit_rtnl_lock; + } + dev_info(dev, "Finished writing flash image\n"); + +exit_rtnl_lock: + rtnl_lock(); + dev_put(netdev); + release_firmware(fw); + +exit_close_nsp: + nfp_nsp_close(nsp); + return err; +} + static const struct ethtool_ops nfp_net_ethtool_ops = { .get_drvinfo = nfp_net_get_drvinfo, .get_link = ethtool_op_get_link, @@ -1240,6 +1331,7 @@ static const struct ethtool_ops nfp_net_ethtool_ops = { .get_sset_count = nfp_net_get_sset_count, .get_rxnfc = nfp_net_get_rxnfc, .set_rxnfc = nfp_net_set_rxnfc, + .flash_device = nfp_net_flash_device, .get_rxfh_indir_size = nfp_net_get_rxfh_indir_size, .get_rxfh_key_size = nfp_net_get_rxfh_key_size, .get_rxfh = nfp_net_get_rxfh, @@ -1265,6 +1357,7 @@ const struct ethtool_ops nfp_port_ethtool_ops = { .get_strings = nfp_port_get_strings, .get_ethtool_stats = nfp_port_get_stats, .get_sset_count = nfp_port_get_sset_count, + .flash_device = nfp_net_flash_device, .set_dump = nfp_app_set_dump, .get_dump_flag = nfp_app_get_dump_flag, .get_dump_data = nfp_app_get_dump_data, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index c505014121c4..15fa47f622aa 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -208,12 +208,6 @@ nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id) { int err; - /* Get ME clock frequency from ctrl BAR - * XXX for now frequency is hardcoded until we figure out how - * to get the value from nfp-hwinfo into ctrl bar - */ - nn->me_freq_mhz = 1200; - err = nfp_net_init(nn); if (err) return err; @@ -373,7 +367,9 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride) if (IS_ERR(pf->app)) return PTR_ERR(pf->app); + mutex_lock(&pf->lock); err = nfp_app_init(pf->app); + mutex_unlock(&pf->lock); if (err) goto err_free; @@ -401,7 +397,9 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride) err_unmap: nfp_cpp_area_release_free(pf->ctrl_vnic_bar); err_app_clean: + mutex_lock(&pf->lock); nfp_app_clean(pf->app); + mutex_unlock(&pf->lock); err_free: nfp_app_free(pf->app); pf->app = NULL; @@ -414,7 +412,11 @@ static void nfp_net_pf_app_clean(struct nfp_pf *pf) nfp_net_pf_free_vnic(pf, pf->ctrl_vnic); nfp_cpp_area_release_free(pf->ctrl_vnic_bar); } + + mutex_lock(&pf->lock); nfp_app_clean(pf->app); + mutex_unlock(&pf->lock); + nfp_app_free(pf->app); pf->app = NULL; } @@ -570,17 +572,6 @@ err_unmap_ctrl: return err; } -static void nfp_net_pci_remove_finish(struct nfp_pf *pf) -{ - nfp_net_pf_app_stop(pf); - /* stop app first, to avoid double free of ctrl vNIC's ddir */ - nfp_net_debugfs_dir_clean(&pf->ddir); - - nfp_net_pf_free_irqs(pf); - nfp_net_pf_app_clean(pf); - nfp_net_pci_unmap_mem(pf); -} - static int nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port, struct nfp_eth_table *eth_table) @@ -655,9 +646,6 @@ int nfp_net_refresh_port_table_sync(struct nfp_pf *pf) nfp_net_pf_free_vnic(pf, nn); } - if (list_empty(&pf->vnics)) - nfp_net_pci_remove_finish(pf); - return 0; } @@ -707,6 +695,7 @@ int nfp_net_refresh_eth_port(struct nfp_port *port) */ int nfp_net_pci_probe(struct nfp_pf *pf) { + struct devlink *devlink = priv_to_devlink(pf); struct nfp_net_fw_version fw_ver; u8 __iomem *ctrl_bar, *qc_bar; int stride; @@ -720,16 +709,13 @@ int nfp_net_pci_probe(struct nfp_pf *pf) return -EINVAL; } - mutex_lock(&pf->lock); pf->max_data_vnics = nfp_net_pf_get_num_ports(pf); - if ((int)pf->max_data_vnics < 0) { - err = pf->max_data_vnics; - goto err_unlock; - } + if ((int)pf->max_data_vnics < 0) + return pf->max_data_vnics; err = nfp_net_pci_map_mem(pf); if (err) - goto err_unlock; + return err; ctrl_bar = nfp_cpp_area_iomem(pf->data_vnic_bar); qc_bar = nfp_cpp_area_iomem(pf->qc_area); @@ -768,6 +754,11 @@ int nfp_net_pci_probe(struct nfp_pf *pf) if (err) goto err_unmap; + err = devlink_register(devlink, &pf->pdev->dev); + if (err) + goto err_app_clean; + + mutex_lock(&pf->lock); pf->ddir = nfp_net_debugfs_device_add(pf->pdev); /* Allocate the vnics and do basic init */ @@ -799,32 +790,39 @@ err_free_vnics: nfp_net_pf_free_vnics(pf); err_clean_ddir: nfp_net_debugfs_dir_clean(&pf->ddir); + mutex_unlock(&pf->lock); + cancel_work_sync(&pf->port_refresh_work); + devlink_unregister(devlink); +err_app_clean: nfp_net_pf_app_clean(pf); err_unmap: nfp_net_pci_unmap_mem(pf); -err_unlock: - mutex_unlock(&pf->lock); - cancel_work_sync(&pf->port_refresh_work); return err; } void nfp_net_pci_remove(struct nfp_pf *pf) { - struct nfp_net *nn; + struct nfp_net *nn, *next; mutex_lock(&pf->lock); - if (list_empty(&pf->vnics)) - goto out; - - list_for_each_entry(nn, &pf->vnics, vnic_list) - if (nfp_net_is_data_vnic(nn)) - nfp_net_pf_clean_vnic(pf, nn); + list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) { + if (!nfp_net_is_data_vnic(nn)) + continue; + nfp_net_pf_clean_vnic(pf, nn); + nfp_net_pf_free_vnic(pf, nn); + } - nfp_net_pf_free_vnics(pf); + nfp_net_pf_app_stop(pf); + /* stop app first, to avoid double free of ctrl vNIC's ddir */ + nfp_net_debugfs_dir_clean(&pf->ddir); - nfp_net_pci_remove_finish(pf); -out: mutex_unlock(&pf->lock); + devlink_unregister(priv_to_devlink(pf)); + + nfp_net_pf_free_irqs(pf); + nfp_net_pf_app_clean(pf); + nfp_net_pci_unmap_mem(pf); + cancel_work_sync(&pf->port_refresh_work); } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 78b36c67c232..f67da6bde9da 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -46,6 +46,13 @@ #include "nfp_net_sriov.h" #include "nfp_port.h" +struct net_device * +nfp_repr_get_locked(struct nfp_app *app, struct nfp_reprs *set, unsigned int id) +{ + return rcu_dereference_protected(set->reprs[id], + lockdep_is_held(&app->pf->lock)); +} + static void nfp_repr_inc_tx_stats(struct net_device *netdev, unsigned int len, int tx_status) @@ -186,6 +193,13 @@ nfp_repr_get_offload_stats(int attr_id, const struct net_device *dev, return -EINVAL; } +static int nfp_repr_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct nfp_repr *repr = netdev_priv(netdev); + + return nfp_app_change_mtu(repr->app, netdev, new_mtu); +} + static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev) { struct nfp_repr *repr = netdev_priv(netdev); @@ -240,6 +254,7 @@ const struct net_device_ops nfp_repr_netdev_ops = { .ndo_open = nfp_repr_open, .ndo_stop = nfp_repr_stop, .ndo_start_xmit = nfp_repr_xmit, + .ndo_change_mtu = nfp_repr_change_mtu, .ndo_get_stats64 = nfp_repr_get_stats64, .ndo_has_offload_stats = nfp_repr_has_offload_stats, .ndo_get_offload_stats = nfp_repr_get_offload_stats, @@ -336,6 +351,8 @@ struct net_device *nfp_repr_alloc(struct nfp_app *app) if (!netdev) return NULL; + netif_carrier_off(netdev); + repr = netdev_priv(netdev); repr->netdev = netdev; repr->app = app; @@ -359,29 +376,45 @@ static void nfp_repr_clean_and_free(struct nfp_repr *repr) nfp_repr_free(repr); } -void nfp_reprs_clean_and_free(struct nfp_reprs *reprs) +void nfp_reprs_clean_and_free(struct nfp_app *app, struct nfp_reprs *reprs) { + struct net_device *netdev; unsigned int i; - for (i = 0; i < reprs->num_reprs; i++) - if (reprs->reprs[i]) - nfp_repr_clean_and_free(netdev_priv(reprs->reprs[i])); + for (i = 0; i < reprs->num_reprs; i++) { + netdev = nfp_repr_get_locked(app, reprs, i); + if (netdev) + nfp_repr_clean_and_free(netdev_priv(netdev)); + } kfree(reprs); } void -nfp_reprs_clean_and_free_by_type(struct nfp_app *app, - enum nfp_repr_type type) +nfp_reprs_clean_and_free_by_type(struct nfp_app *app, enum nfp_repr_type type) { + struct net_device *netdev; struct nfp_reprs *reprs; + int i; - reprs = nfp_app_reprs_set(app, type, NULL); + reprs = rcu_dereference_protected(app->reprs[type], + lockdep_is_held(&app->pf->lock)); if (!reprs) return; + /* Preclean must happen before we remove the reprs reference from the + * app below. + */ + for (i = 0; i < reprs->num_reprs; i++) { + netdev = nfp_repr_get_locked(app, reprs, i); + if (netdev) + nfp_app_repr_preclean(app, netdev); + } + + reprs = nfp_app_reprs_set(app, type, NULL); + synchronize_rcu(); - nfp_reprs_clean_and_free(reprs); + nfp_reprs_clean_and_free(app, reprs); } struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs) @@ -399,47 +432,29 @@ struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs) int nfp_reprs_resync_phys_ports(struct nfp_app *app) { - struct nfp_reprs *reprs, *old_reprs; + struct net_device *netdev; + struct nfp_reprs *reprs; struct nfp_repr *repr; int i; - old_reprs = - rcu_dereference_protected(app->reprs[NFP_REPR_TYPE_PHYS_PORT], - lockdep_is_held(&app->pf->lock)); - if (!old_reprs) - return 0; - - reprs = nfp_reprs_alloc(old_reprs->num_reprs); + reprs = nfp_reprs_get_locked(app, NFP_REPR_TYPE_PHYS_PORT); if (!reprs) - return -ENOMEM; - - for (i = 0; i < old_reprs->num_reprs; i++) { - if (!old_reprs->reprs[i]) - continue; - - repr = netdev_priv(old_reprs->reprs[i]); - if (repr->port->type == NFP_PORT_INVALID) - continue; - - reprs->reprs[i] = old_reprs->reprs[i]; - } - - old_reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs); - synchronize_rcu(); + return 0; - /* Now we free up removed representors */ - for (i = 0; i < old_reprs->num_reprs; i++) { - if (!old_reprs->reprs[i]) + for (i = 0; i < reprs->num_reprs; i++) { + netdev = nfp_repr_get_locked(app, reprs, i); + if (!netdev) continue; - repr = netdev_priv(old_reprs->reprs[i]); + repr = netdev_priv(netdev); if (repr->port->type != NFP_PORT_INVALID) continue; - nfp_app_repr_stop(app, repr); + nfp_app_repr_preclean(app, netdev); + rcu_assign_pointer(reprs->reprs[i], NULL); + synchronize_rcu(); nfp_repr_clean(repr); } - kfree(old_reprs); return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h index 5d4d897bc9c6..a621e8ff528e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h @@ -35,6 +35,7 @@ #define NFP_NET_REPR_H struct metadata_dst; +struct nfp_app; struct nfp_net; struct nfp_port; @@ -47,7 +48,7 @@ struct nfp_port; */ struct nfp_reprs { unsigned int num_reprs; - struct net_device *reprs[0]; + struct net_device __rcu *reprs[0]; }; /** @@ -89,6 +90,7 @@ struct nfp_repr { * @NFP_REPR_TYPE_PHYS_PORT: external NIC port * @NFP_REPR_TYPE_PF: physical function * @NFP_REPR_TYPE_VF: virtual function + * @__NFP_REPR_TYPE_MAX: number of representor types */ enum nfp_repr_type { NFP_REPR_TYPE_PHYS_PORT, @@ -113,16 +115,18 @@ static inline int nfp_repr_get_port_id(struct net_device *netdev) return priv->dst->u.port_info.port_id; } +struct net_device * +nfp_repr_get_locked(struct nfp_app *app, struct nfp_reprs *set, + unsigned int id); + void nfp_repr_inc_rx_stats(struct net_device *netdev, unsigned int len); int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, u32 cmsg_port_id, struct nfp_port *port, struct net_device *pf_netdev); struct net_device *nfp_repr_alloc(struct nfp_app *app); -void -nfp_reprs_clean_and_free(struct nfp_reprs *reprs); -void -nfp_reprs_clean_and_free_by_type(struct nfp_app *app, - enum nfp_repr_type type); +void nfp_reprs_clean_and_free(struct nfp_app *app, struct nfp_reprs *reprs); +void nfp_reprs_clean_and_free_by_type(struct nfp_app *app, + enum nfp_repr_type type); struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs); int nfp_reprs_resync_phys_ports(struct nfp_app *app); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c index c879626e035b..b802a1d55449 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c @@ -277,12 +277,6 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, } nfp_net_irqs_assign(nn, vf->irq_entries, num_irqs); - /* Get ME clock frequency from ctrl BAR - * XXX for now frequency is hardcoded until we figure out how - * to get the value from nfp-hwinfo into ctrl bar - */ - nn->me_freq_mhz = 1200; - err = nfp_net_init(nn); if (err) goto err_irqs_disable; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h index 3ce51f03126f..ced62d112aa2 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h @@ -49,6 +49,8 @@ struct nfp_hwinfo; struct nfp_hwinfo *nfp_hwinfo_read(struct nfp_cpp *cpp); const char *nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup); +char *nfp_hwinfo_get_packed_strings(struct nfp_hwinfo *hwinfo); +u32 nfp_hwinfo_get_packed_str_size(struct nfp_hwinfo *hwinfo); /* Implemented in nfp_nsp.c, low level functions */ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h index 5798adc57cbc..c8f2c064cce3 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h @@ -242,6 +242,7 @@ int nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset, void *buffer, size_t length); int nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset, const void *buffer, size_t length); +size_t nfp_cpp_area_size(struct nfp_cpp_area *area); const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area); void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area); struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c index 04dd5758ecf5..ef30597aa319 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c @@ -372,8 +372,7 @@ nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 dest, * that it can be accessed directly. * * NOTE: @address and @size must be 32-bit aligned values. - * - * NOTE: The area must also be 'released' when the structure is freed. + * The area must also be 'released' when the structure is freed. * * Return: NFP CPP Area handle, or NULL */ @@ -536,8 +535,7 @@ void nfp_cpp_area_release_free(struct nfp_cpp_area *area) * Read data from indicated CPP region. * * NOTE: @offset and @length must be 32-bit aligned values. - * - * NOTE: Area must have been locked down with an 'acquire'. + * Area must have been locked down with an 'acquire'. * * Return: length of io, or -ERRNO */ @@ -558,8 +556,7 @@ int nfp_cpp_area_read(struct nfp_cpp_area *area, * Write data to indicated CPP region. * * NOTE: @offset and @length must be 32-bit aligned values. - * - * NOTE: Area must have been locked down with an 'acquire'. + * Area must have been locked down with an 'acquire'. * * Return: length of io, or -ERRNO */ @@ -571,6 +568,17 @@ int nfp_cpp_area_write(struct nfp_cpp_area *area, } /** + * nfp_cpp_area_size() - return size of a CPP area + * @cpp_area: CPP area handle + * + * Return: Size of the area + */ +size_t nfp_cpp_area_size(struct nfp_cpp_area *cpp_area) +{ + return cpp_area->size; +} + +/** * nfp_cpp_area_name() - return name of a CPP area * @cpp_area: CPP area handle * @@ -666,18 +674,20 @@ void __iomem *nfp_cpp_area_iomem(struct nfp_cpp_area *area) * @offset: Offset into area * @value: Pointer to read buffer * - * Return: length of the io, or -ERRNO + * Return: 0 on success, or -ERRNO */ int nfp_cpp_area_readl(struct nfp_cpp_area *area, unsigned long offset, u32 *value) { u8 tmp[4]; - int err; + int n; - err = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp)); - *value = get_unaligned_le32(tmp); + n = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp)); + if (n != sizeof(tmp)) + return n < 0 ? n : -EIO; - return err; + *value = get_unaligned_le32(tmp); + return 0; } /** @@ -686,16 +696,18 @@ int nfp_cpp_area_readl(struct nfp_cpp_area *area, * @offset: Offset into area * @value: Value to write * - * Return: length of the io, or -ERRNO + * Return: 0 on success, or -ERRNO */ int nfp_cpp_area_writel(struct nfp_cpp_area *area, unsigned long offset, u32 value) { u8 tmp[4]; + int n; put_unaligned_le32(value, tmp); + n = nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp)); - return nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp)); + return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO; } /** @@ -704,18 +716,20 @@ int nfp_cpp_area_writel(struct nfp_cpp_area *area, * @offset: Offset into area * @value: Pointer to read buffer * - * Return: length of the io, or -ERRNO + * Return: 0 on success, or -ERRNO */ int nfp_cpp_area_readq(struct nfp_cpp_area *area, unsigned long offset, u64 *value) { u8 tmp[8]; - int err; + int n; - err = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp)); - *value = get_unaligned_le64(tmp); + n = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp)); + if (n != sizeof(tmp)) + return n < 0 ? n : -EIO; - return err; + *value = get_unaligned_le64(tmp); + return 0; } /** @@ -724,16 +738,18 @@ int nfp_cpp_area_readq(struct nfp_cpp_area *area, * @offset: Offset into area * @value: Value to write * - * Return: length of the io, or -ERRNO + * Return: 0 on success, or -ERRNO */ int nfp_cpp_area_writeq(struct nfp_cpp_area *area, unsigned long offset, u64 value) { u8 tmp[8]; + int n; put_unaligned_le64(value, tmp); + n = nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp)); - return nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp)); + return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO; } /** @@ -1072,7 +1088,7 @@ static u32 nfp_xpb_to_cpp(struct nfp_cpp *cpp, u32 *xpb_addr) * @xpb_addr: Address for operation * @value: Pointer to read buffer * - * Return: length of the io, or -ERRNO + * Return: 0 on success, or -ERRNO */ int nfp_xpb_readl(struct nfp_cpp *cpp, u32 xpb_addr, u32 *value) { @@ -1087,7 +1103,7 @@ int nfp_xpb_readl(struct nfp_cpp *cpp, u32 xpb_addr, u32 *value) * @xpb_addr: Address for operation * @value: Value to write * - * Return: length of the io, or -ERRNO + * Return: 0 on success, or -ERRNO */ int nfp_xpb_writel(struct nfp_cpp *cpp, u32 xpb_addr, u32 value) { @@ -1105,7 +1121,7 @@ int nfp_xpb_writel(struct nfp_cpp *cpp, u32 xpb_addr, u32 value) * * KERNEL: This operation is safe to call in interrupt or softirq context. * - * Return: length of the io, or -ERRNO + * Return: 0 on success, or -ERRNO */ int nfp_xpb_writelm(struct nfp_cpp *cpp, u32 xpb_tgt, u32 mask, u32 value) diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c index ab86bceb93f2..20bad05e2e92 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c @@ -64,18 +64,20 @@ * @address: Address for operation * @value: Pointer to read buffer * - * Return: length of the io, or -ERRNO + * Return: 0 on success, or -ERRNO */ int nfp_cpp_readl(struct nfp_cpp *cpp, u32 cpp_id, unsigned long long address, u32 *value) { u8 tmp[4]; - int err; + int n; - err = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp)); - *value = get_unaligned_le32(tmp); + n = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp)); + if (n != sizeof(tmp)) + return n < 0 ? n : -EIO; - return err; + *value = get_unaligned_le32(tmp); + return 0; } /** @@ -85,15 +87,18 @@ int nfp_cpp_readl(struct nfp_cpp *cpp, u32 cpp_id, * @address: Address for operation * @value: Value to write * - * Return: length of the io, or -ERRNO + * Return: 0 on success, or -ERRNO */ int nfp_cpp_writel(struct nfp_cpp *cpp, u32 cpp_id, unsigned long long address, u32 value) { u8 tmp[4]; + int n; put_unaligned_le32(value, tmp); - return nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp)); + n = nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp)); + + return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO; } /** @@ -103,18 +108,20 @@ int nfp_cpp_writel(struct nfp_cpp *cpp, u32 cpp_id, * @address: Address for operation * @value: Pointer to read buffer * - * Return: length of the io, or -ERRNO + * Return: 0 on success, or -ERRNO */ int nfp_cpp_readq(struct nfp_cpp *cpp, u32 cpp_id, unsigned long long address, u64 *value) { u8 tmp[8]; - int err; + int n; - err = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp)); - *value = get_unaligned_le64(tmp); + n = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp)); + if (n != sizeof(tmp)) + return n < 0 ? n : -EIO; - return err; + *value = get_unaligned_le64(tmp); + return 0; } /** @@ -124,15 +131,18 @@ int nfp_cpp_readq(struct nfp_cpp *cpp, u32 cpp_id, * @address: Address for operation * @value: Value to write * - * Return: length of the io, or -ERRNO + * Return: 0 on success, or -ERRNO */ int nfp_cpp_writeq(struct nfp_cpp *cpp, u32 cpp_id, unsigned long long address, u64 value) { u8 tmp[8]; + int n; put_unaligned_le64(value, tmp); - return nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp)); + n = nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp)); + + return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO; } /* NOTE: This code should not use nfp_xpb_* functions, diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c index 4f24aff1e772..063a9a6243d6 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c @@ -302,3 +302,13 @@ const char *nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup) return NULL; } + +char *nfp_hwinfo_get_packed_strings(struct nfp_hwinfo *hwinfo) +{ + return hwinfo->data; +} + +u32 nfp_hwinfo_get_packed_str_size(struct nfp_hwinfo *hwinfo) +{ + return le32_to_cpu(hwinfo->size) - sizeof(u32); +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index 14a6d1ba51a9..39abac678b71 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -51,6 +51,9 @@ #include "nfp_cpp.h" #include "nfp_nsp.h" +#define NFP_NSP_TIMEOUT_DEFAULT 30 +#define NFP_NSP_TIMEOUT_BOOT 30 + /* Offsets relative to the CSR base */ #define NSP_STATUS 0x00 #define NSP_STATUS_MAGIC GENMASK_ULL(63, 48) @@ -93,6 +96,7 @@ enum nfp_nsp_cmd { SPCODE_FW_LOAD = 6, /* Load fw from buffer, len in option */ SPCODE_ETH_RESCAN = 7, /* Rescan ETHs, write ETH_TABLE to buf */ SPCODE_ETH_CONTROL = 8, /* Update media config from buffer */ + SPCODE_NSP_WRITE_FLASH = 11, /* Load and flash image from buffer */ SPCODE_NSP_SENSORS = 12, /* Read NSP sensor(s) */ SPCODE_NSP_IDENTIFY = 13, /* Read NSP version */ }; @@ -260,10 +264,10 @@ u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state) } static int -nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, - u32 nsp_cpp, u64 addr, u64 mask, u64 val) +nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, u32 nsp_cpp, u64 addr, + u64 mask, u64 val, u32 timeout_sec) { - const unsigned long wait_until = jiffies + 30 * HZ; + const unsigned long wait_until = jiffies + timeout_sec * HZ; int err; for (;;) { @@ -285,12 +289,13 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, } /** - * nfp_nsp_command() - Execute a command on the NFP Service Processor + * __nfp_nsp_command() - Execute a command on the NFP Service Processor * @state: NFP SP state * @code: NFP SP Command Code * @option: NFP SP Command Argument * @buff_cpp: NFP SP Buffer CPP Address info * @buff_addr: NFP SP Buffer Host address + * @timeout_sec:Timeout value to wait for completion in seconds * * Return: 0 for success with no result * @@ -300,10 +305,11 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, * -ENODEV if the NSP is not a supported model * -EBUSY if the NSP is stuck * -EINTR if interrupted while waiting for completion - * -ETIMEDOUT if the NSP took longer than 30 seconds to complete + * -ETIMEDOUT if the NSP took longer than @timeout_sec seconds to complete */ -static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, - u32 buff_cpp, u64 buff_addr) +static int +__nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp, + u64 buff_addr, u32 timeout_sec) { u64 reg, ret_val, nsp_base, nsp_buffer, nsp_status, nsp_command; struct nfp_cpp *cpp = state->cpp; @@ -341,8 +347,8 @@ static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, return err; /* Wait for NSP_COMMAND_START to go to 0 */ - err = nfp_nsp_wait_reg(cpp, ®, - nsp_cpp, nsp_command, NSP_COMMAND_START, 0); + err = nfp_nsp_wait_reg(cpp, ®, nsp_cpp, nsp_command, + NSP_COMMAND_START, 0, NFP_NSP_TIMEOUT_DEFAULT); if (err) { nfp_err(cpp, "Error %d waiting for code 0x%04x to start\n", err, code); @@ -350,8 +356,8 @@ static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, } /* Wait for NSP_STATUS_BUSY to go to 0 */ - err = nfp_nsp_wait_reg(cpp, ®, - nsp_cpp, nsp_status, NSP_STATUS_BUSY, 0); + err = nfp_nsp_wait_reg(cpp, ®, nsp_cpp, nsp_status, NSP_STATUS_BUSY, + 0, timeout_sec); if (err) { nfp_err(cpp, "Error %d waiting for code 0x%04x to complete\n", err, code); @@ -374,9 +380,18 @@ static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, return ret_val; } -static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, - const void *in_buf, unsigned int in_size, - void *out_buf, unsigned int out_size) +static int +nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp, + u64 buff_addr) +{ + return __nfp_nsp_command(state, code, option, buff_cpp, buff_addr, + NFP_NSP_TIMEOUT_DEFAULT); +} + +static int +__nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, + const void *in_buf, unsigned int in_size, void *out_buf, + unsigned int out_size, u32 timeout_sec) { struct nfp_cpp *cpp = nsp->cpp; unsigned int max_size; @@ -429,7 +444,8 @@ static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, return err; } - ret = nfp_nsp_command(nsp, code, option, cpp_id, cpp_buf); + ret = __nfp_nsp_command(nsp, code, option, cpp_id, cpp_buf, + timeout_sec); if (ret < 0) return ret; @@ -442,12 +458,23 @@ static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, return ret; } +static int +nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, + const void *in_buf, unsigned int in_size, void *out_buf, + unsigned int out_size) +{ + return __nfp_nsp_command_buf(nsp, code, option, in_buf, in_size, + out_buf, out_size, + NFP_NSP_TIMEOUT_DEFAULT); +} + int nfp_nsp_wait(struct nfp_nsp *state) { - const unsigned long wait_until = jiffies + 30 * HZ; + const unsigned long wait_until = jiffies + NFP_NSP_TIMEOUT_BOOT * HZ; int err; - nfp_dbg(state->cpp, "Waiting for NSP to respond (30 sec max).\n"); + nfp_dbg(state->cpp, "Waiting for NSP to respond (%u sec max).\n", + NFP_NSP_TIMEOUT_BOOT); for (;;) { const unsigned long start_time = jiffies; @@ -488,6 +515,17 @@ int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw) fw->size, NULL, 0); } +int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw) +{ + /* The flash time is specified to take a maximum of 70s so we add an + * additional factor to this spec time. + */ + u32 timeout_sec = 2.5 * 70; + + return __nfp_nsp_command_buf(state, SPCODE_NSP_WRITE_FLASH, fw->size, + fw->data, fw->size, NULL, 0, timeout_sec); +} + int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size) { return nfp_nsp_command_buf(state, SPCODE_ETH_RESCAN, size, NULL, 0, diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h index 650ca1a5bd21..e983c9d7f86c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h @@ -48,6 +48,7 @@ u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state); int nfp_nsp_wait(struct nfp_nsp *state); int nfp_nsp_device_soft_reset(struct nfp_nsp *state); int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw); +int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw); int nfp_nsp_mac_reinit(struct nfp_nsp *state); static inline bool nfp_nsp_has_mac_reinit(struct nfp_nsp *state) diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c index ecda474ac7c3..46107aefad1c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c @@ -277,10 +277,6 @@ u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, break; } - if (err == sym->size) - err = 0; - else if (err >= 0) - err = -EIO; exit: if (error) *error = err; diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 481876b5424c..66c665d0b926 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -793,9 +793,9 @@ struct fe_priv { /* rx specific fields. * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); */ - union ring_type get_rx, put_rx, first_rx, last_rx; + union ring_type get_rx, put_rx, last_rx; struct nv_skb_map *get_rx_ctx, *put_rx_ctx; - struct nv_skb_map *first_rx_ctx, *last_rx_ctx; + struct nv_skb_map *last_rx_ctx; struct nv_skb_map *rx_skb; union ring_type rx_ring; @@ -822,9 +822,9 @@ struct fe_priv { /* * tx specific fields. */ - union ring_type get_tx, put_tx, first_tx, last_tx; + union ring_type get_tx, put_tx, last_tx; struct nv_skb_map *get_tx_ctx, *put_tx_ctx; - struct nv_skb_map *first_tx_ctx, *last_tx_ctx; + struct nv_skb_map *last_tx_ctx; struct nv_skb_map *tx_skb; union ring_type tx_ring; @@ -1812,12 +1812,12 @@ static int nv_alloc_rx(struct net_device *dev) struct ring_desc *less_rx; less_rx = np->get_rx.orig; - if (less_rx-- == np->first_rx.orig) + if (less_rx-- == np->rx_ring.orig) less_rx = np->last_rx.orig; while (np->put_rx.orig != less_rx) { struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD); - if (skb) { + if (likely(skb)) { np->put_rx_ctx->skb = skb; np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev, skb->data, @@ -1833,9 +1833,9 @@ static int nv_alloc_rx(struct net_device *dev) wmb(); np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); if (unlikely(np->put_rx.orig++ == np->last_rx.orig)) - np->put_rx.orig = np->first_rx.orig; + np->put_rx.orig = np->rx_ring.orig; if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) - np->put_rx_ctx = np->first_rx_ctx; + np->put_rx_ctx = np->rx_skb; } else { packet_dropped: u64_stats_update_begin(&np->swstats_rx_syncp); @@ -1853,12 +1853,12 @@ static int nv_alloc_rx_optimized(struct net_device *dev) struct ring_desc_ex *less_rx; less_rx = np->get_rx.ex; - if (less_rx-- == np->first_rx.ex) + if (less_rx-- == np->rx_ring.ex) less_rx = np->last_rx.ex; while (np->put_rx.ex != less_rx) { struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD); - if (skb) { + if (likely(skb)) { np->put_rx_ctx->skb = skb; np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev, skb->data, @@ -1875,9 +1875,9 @@ static int nv_alloc_rx_optimized(struct net_device *dev) wmb(); np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); if (unlikely(np->put_rx.ex++ == np->last_rx.ex)) - np->put_rx.ex = np->first_rx.ex; + np->put_rx.ex = np->rx_ring.ex; if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) - np->put_rx_ctx = np->first_rx_ctx; + np->put_rx_ctx = np->rx_skb; } else { packet_dropped: u64_stats_update_begin(&np->swstats_rx_syncp); @@ -1903,13 +1903,15 @@ static void nv_init_rx(struct net_device *dev) struct fe_priv *np = netdev_priv(dev); int i; - np->get_rx = np->put_rx = np->first_rx = np->rx_ring; + np->get_rx = np->rx_ring; + np->put_rx = np->rx_ring; if (!nv_optimized(np)) np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1]; else np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1]; - np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb; + np->get_rx_ctx = np->rx_skb; + np->put_rx_ctx = np->rx_skb; np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1]; for (i = 0; i < np->rx_ring_size; i++) { @@ -1932,13 +1934,15 @@ static void nv_init_tx(struct net_device *dev) struct fe_priv *np = netdev_priv(dev); int i; - np->get_tx = np->put_tx = np->first_tx = np->tx_ring; + np->get_tx = np->tx_ring; + np->put_tx = np->tx_ring; if (!nv_optimized(np)) np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1]; else np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; - np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb; + np->get_tx_ctx = np->tx_skb; + np->put_tx_ctx = np->tx_skb; np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1]; netdev_reset_queue(np->dev); np->tx_pkts_in_progress = 0; @@ -2248,9 +2252,9 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) offset += bcnt; size -= bcnt; if (unlikely(put_tx++ == np->last_tx.orig)) - put_tx = np->first_tx.orig; + put_tx = np->tx_ring.orig; if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) - np->put_tx_ctx = np->first_tx_ctx; + np->put_tx_ctx = np->tx_skb; } while (size); /* setup the fragments */ @@ -2276,7 +2280,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) do { nv_unmap_txskb(np, start_tx_ctx); if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) - tmp_tx_ctx = np->first_tx_ctx; + tmp_tx_ctx = np->tx_skb; } while (tmp_tx_ctx != np->put_tx_ctx); dev_kfree_skb_any(skb); np->put_tx_ctx = start_tx_ctx; @@ -2294,18 +2298,18 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) offset += bcnt; frag_size -= bcnt; if (unlikely(put_tx++ == np->last_tx.orig)) - put_tx = np->first_tx.orig; + put_tx = np->tx_ring.orig; if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) - np->put_tx_ctx = np->first_tx_ctx; + np->put_tx_ctx = np->tx_skb; } while (frag_size); } - if (unlikely(put_tx == np->first_tx.orig)) + if (unlikely(put_tx == np->tx_ring.orig)) prev_tx = np->last_tx.orig; else prev_tx = put_tx - 1; - if (unlikely(np->put_tx_ctx == np->first_tx_ctx)) + if (unlikely(np->put_tx_ctx == np->tx_skb)) prev_tx_ctx = np->last_tx_ctx; else prev_tx_ctx = np->put_tx_ctx - 1; @@ -2406,9 +2410,9 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, offset += bcnt; size -= bcnt; if (unlikely(put_tx++ == np->last_tx.ex)) - put_tx = np->first_tx.ex; + put_tx = np->tx_ring.ex; if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) - np->put_tx_ctx = np->first_tx_ctx; + np->put_tx_ctx = np->tx_skb; } while (size); /* setup the fragments */ @@ -2434,7 +2438,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, do { nv_unmap_txskb(np, start_tx_ctx); if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) - tmp_tx_ctx = np->first_tx_ctx; + tmp_tx_ctx = np->tx_skb; } while (tmp_tx_ctx != np->put_tx_ctx); dev_kfree_skb_any(skb); np->put_tx_ctx = start_tx_ctx; @@ -2452,18 +2456,18 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, offset += bcnt; frag_size -= bcnt; if (unlikely(put_tx++ == np->last_tx.ex)) - put_tx = np->first_tx.ex; + put_tx = np->tx_ring.ex; if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) - np->put_tx_ctx = np->first_tx_ctx; + np->put_tx_ctx = np->tx_skb; } while (frag_size); } - if (unlikely(put_tx == np->first_tx.ex)) + if (unlikely(put_tx == np->tx_ring.ex)) prev_tx = np->last_tx.ex; else prev_tx = put_tx - 1; - if (unlikely(np->put_tx_ctx == np->first_tx_ctx)) + if (unlikely(np->put_tx_ctx == np->tx_skb)) prev_tx_ctx = np->last_tx_ctx; else prev_tx_ctx = np->put_tx_ctx - 1; @@ -2563,7 +2567,7 @@ static int nv_tx_done(struct net_device *dev, int limit) if (np->desc_ver == DESC_VER_1) { if (flags & NV_TX_LASTPACKET) { - if (flags & NV_TX_ERROR) { + if (unlikely(flags & NV_TX_ERROR)) { if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK)) nv_legacybackoff_reseed(dev); @@ -2580,7 +2584,7 @@ static int nv_tx_done(struct net_device *dev, int limit) } } else { if (flags & NV_TX2_LASTPACKET) { - if (flags & NV_TX2_ERROR) { + if (unlikely(flags & NV_TX2_ERROR)) { if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) nv_legacybackoff_reseed(dev); @@ -2597,9 +2601,9 @@ static int nv_tx_done(struct net_device *dev, int limit) } } if (unlikely(np->get_tx.orig++ == np->last_tx.orig)) - np->get_tx.orig = np->first_tx.orig; + np->get_tx.orig = np->tx_ring.orig; if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) - np->get_tx_ctx = np->first_tx_ctx; + np->get_tx_ctx = np->tx_skb; } netdev_completed_queue(np->dev, tx_work, bytes_compl); @@ -2626,7 +2630,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit) nv_unmap_txskb(np, np->get_tx_ctx); if (flags & NV_TX2_LASTPACKET) { - if (flags & NV_TX2_ERROR) { + if (unlikely(flags & NV_TX2_ERROR)) { if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) { if (np->driver_data & DEV_HAS_GEAR_MODE) @@ -2651,9 +2655,9 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit) } if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) - np->get_tx.ex = np->first_tx.ex; + np->get_tx.ex = np->tx_ring.ex; if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) - np->get_tx_ctx = np->first_tx_ctx; + np->get_tx_ctx = np->tx_skb; } netdev_completed_queue(np->dev, tx_work, bytes_cleaned); @@ -2909,9 +2913,9 @@ static int nv_rx_process(struct net_device *dev, int limit) u64_stats_update_end(&np->swstats_rx_syncp); next_pkt: if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) - np->get_rx.orig = np->first_rx.orig; + np->get_rx.orig = np->rx_ring.orig; if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) - np->get_rx_ctx = np->first_rx_ctx; + np->get_rx_ctx = np->rx_skb; rx_work++; } @@ -2998,9 +3002,9 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit) } next_pkt: if (unlikely(np->get_rx.ex++ == np->last_rx.ex)) - np->get_rx.ex = np->first_rx.ex; + np->get_rx.ex = np->rx_ring.ex; if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) - np->get_rx_ctx = np->first_rx_ctx; + np->get_rx_ctx = np->rx_skb; rx_work++; } @@ -5507,11 +5511,9 @@ static int nv_open(struct net_device *dev) /* One manual link speed update: Interrupts are enabled, future link * speed changes cause interrupts and are handled by nv_link_irq(). */ - { - u32 miistat; - miistat = readl(base + NvRegMIIStatus); - writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); - } + readl(base + NvRegMIIStatus); + writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); + /* set linkspeed to invalid value, thus force nv_update_linkspeed * to init hw */ np->linkspeed = 0; diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index c9a55b774935..07a2eb3781b1 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -212,9 +212,7 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac) return -ENOENT; } - if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", - &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) - != ETH_ALEN) { + if (!mac_pton(maddr, addr)) { dev_warn(&pdev->dev, "can't parse mac address, not configuring\n"); return -EINVAL; diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig index 26ddf092e3ec..0ee2490db729 100644 --- a/drivers/net/ethernet/qlogic/Kconfig +++ b/drivers/net/ethernet/qlogic/Kconfig @@ -85,6 +85,7 @@ config QED tristate "QLogic QED 25/40/100Gb core driver" depends on PCI select ZLIB_INFLATE + select CRC8 ---help--- This enables the support for ... diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c index 0a66389c06c2..1cd39c9a0345 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c @@ -2502,12 +2502,10 @@ netxen_collect_minidump(struct netxen_adapter *adapter) { int ret = 0; struct netxen_minidump_template_hdr *hdr; - struct timespec val; hdr = (struct netxen_minidump_template_hdr *) adapter->mdump.md_template; hdr->driver_capture_mask = adapter->mdump.md_capture_mask; - jiffies_to_timespec(jiffies, &val); - hdr->driver_timestamp = (u32) val.tv_sec; + hdr->driver_timestamp = ktime_get_seconds(); hdr->driver_info_word2 = adapter->fw_version; hdr->driver_info_word3 = NXRD32(adapter, CRB_DRIVER_VERSION); ret = netxen_parse_md_template(adapter); diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 91003bc6f00b..69488554f4b9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -52,10 +52,10 @@ extern const struct qed_common_ops qed_common_ops_pass; -#define QED_MAJOR_VERSION 8 -#define QED_MINOR_VERSION 10 -#define QED_REVISION_VERSION 11 -#define QED_ENGINEERING_VERSION 21 +#define QED_MAJOR_VERSION 8 +#define QED_MINOR_VERSION 33 +#define QED_REVISION_VERSION 0 +#define QED_ENGINEERING_VERSION 20 #define QED_VERSION \ ((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \ @@ -778,8 +778,8 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, return sw_fid; } -#define PURE_LB_TC 8 -#define PKT_LB_TC 9 +#define PKT_LB_TC 9 +#define MAX_NUM_VOQS_E4 20 int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate); void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index afd07ad91631..6f546e869d8d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -86,22 +86,22 @@ /* connection context union */ union conn_context { - struct core_conn_context core_ctx; - struct eth_conn_context eth_ctx; - struct iscsi_conn_context iscsi_ctx; - struct fcoe_conn_context fcoe_ctx; - struct roce_conn_context roce_ctx; + struct e4_core_conn_context core_ctx; + struct e4_eth_conn_context eth_ctx; + struct e4_iscsi_conn_context iscsi_ctx; + struct e4_fcoe_conn_context fcoe_ctx; + struct e4_roce_conn_context roce_ctx; }; /* TYPE-0 task context - iSCSI, FCOE */ union type0_task_context { - struct iscsi_task_context iscsi_ctx; - struct fcoe_task_context fcoe_ctx; + struct e4_iscsi_task_context iscsi_ctx; + struct e4_fcoe_task_context fcoe_ctx; }; /* TYPE-1 task context - ROCE */ union type1_task_context { - struct rdma_task_context roce_ctx; + struct e4_rdma_task_context roce_ctx; }; struct src_ent { @@ -109,8 +109,8 @@ struct src_ent { u64 next; }; -#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */ -#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12)) +#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */ +#define CDUT_SEG_ALIGNMET_IN_BYTES BIT(CDUT_SEG_ALIGNMET + 12) #define CONN_CXT_SIZE(p_hwfn) \ ALIGNED_TYPE_SIZE(union conn_context, p_hwfn) @@ -742,7 +742,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count) p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]); qed_cxt_qm_iids(p_hwfn, &qm_iids); - total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, + total = qed_qm_pf_mem_size(qm_iids.cids, qm_iids.vf_cids, qm_iids.tids, p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs); @@ -1055,11 +1055,10 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn, u32 size; size = min_t(u32, sz_left, p_blk->real_size_in_page); - p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, - size, &p_phys, GFP_KERNEL); + p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, size, + &p_phys, GFP_KERNEL); if (!p_virt) return -ENOMEM; - memset(p_virt, 0, size); ilt_shadow[line].p_phys = p_phys; ilt_shadow[line].p_virt = p_virt; @@ -1496,20 +1495,24 @@ static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn) } } -void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +void qed_qm_init_pf(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool is_pf_loading) { - struct qed_qm_pf_rt_init_params params; struct qed_qm_info *qm_info = &p_hwfn->qm_info; + struct qed_qm_pf_rt_init_params params; + struct qed_mcp_link_state *p_link; struct qed_qm_iids iids; memset(&iids, 0, sizeof(iids)); qed_cxt_qm_iids(p_hwfn, &iids); + p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output; + memset(¶ms, 0, sizeof(params)); params.port_id = p_hwfn->port_id; params.pf_id = p_hwfn->rel_pf_id; params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; - params.is_first_pf = p_hwfn->first_on_engine; + params.is_pf_loading = is_pf_loading; params.num_pf_cids = iids.cids; params.num_vf_cids = iids.vf_cids; params.num_tids = iids.tids; @@ -1520,6 +1523,7 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) params.num_vports = qm_info->num_vports; params.pf_wfq = qm_info->pf_wfq; params.pf_rl = qm_info->pf_rl; + params.link_speed = p_link->speed; params.pq_params = qm_info->qm_pq_params; params.vport_params = qm_info->qm_vport_params; @@ -1883,7 +1887,7 @@ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn) void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - qed_qm_init_pf(p_hwfn, p_ptt); + qed_qm_init_pf(p_hwfn, p_ptt, true); qed_cm_init_pf(p_hwfn); qed_dq_init_pf(p_hwfn); qed_cdu_init_pf(p_hwfn); @@ -2303,14 +2307,13 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, goto out0; } - p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, - p_blk->real_size_in_page, - &p_phys, GFP_KERNEL); + p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, + p_blk->real_size_in_page, &p_phys, + GFP_KERNEL); if (!p_virt) { rc = -ENOMEM; goto out1; } - memset(p_virt, 0, p_blk->real_size_in_page); /* configuration of refTagMask to 0xF is required for RoCE DIF MR only, * to compensate for a HW bug, but it is configured even if DIF is not @@ -2326,7 +2329,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, for (elem_i = 0; elem_i < elems_per_p; elem_i++) { elem = (union type1_task_context *)elem_start; SET_FIELD(elem->roce_ctx.tdif_context.flags1, - TDIF_TASK_CONTEXT_REFTAGMASK, 0xf); + TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf); elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn); } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index 17836349a274..a4e95869889f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -169,8 +169,10 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); * * @param p_hwfn * @param p_ptt + * @param is_pf_loading */ -void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +void qed_qm_init_pf(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool is_pf_loading); /** * @brief Reconfigures QM pf on the fly diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index fe7c1f230028..449777f21237 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -954,9 +954,7 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src, struct pf_update_ramrod_data *p_dest) { struct protocol_dcb_data *p_dcb_data; - bool update_flag = false; - - p_dest->pf_id = p_src->pf_id; + u8 update_flag; update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update; p_dest->update_fcoe_dcb_data_mode = update_flag; diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 03c3cf77aaff..f2633ec87a6a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -21,25 +21,26 @@ enum mem_groups { MEM_GROUP_DMAE_MEM, MEM_GROUP_CM_MEM, MEM_GROUP_QM_MEM, - MEM_GROUP_TM_MEM, + MEM_GROUP_DORQ_MEM, MEM_GROUP_BRB_RAM, MEM_GROUP_BRB_MEM, MEM_GROUP_PRS_MEM, - MEM_GROUP_SDM_MEM, MEM_GROUP_IOR, - MEM_GROUP_RAM, MEM_GROUP_BTB_RAM, - MEM_GROUP_RDIF_CTX, - MEM_GROUP_TDIF_CTX, - MEM_GROUP_CFC_MEM, MEM_GROUP_CONN_CFC_MEM, MEM_GROUP_TASK_CFC_MEM, MEM_GROUP_CAU_PI, MEM_GROUP_CAU_MEM, MEM_GROUP_PXP_ILT, + MEM_GROUP_TM_MEM, + MEM_GROUP_SDM_MEM, MEM_GROUP_PBUF, + MEM_GROUP_RAM, MEM_GROUP_MULD_MEM, MEM_GROUP_BTB_MEM, + MEM_GROUP_RDIF_CTX, + MEM_GROUP_TDIF_CTX, + MEM_GROUP_CFC_MEM, MEM_GROUP_IGU_MEM, MEM_GROUP_IGU_MSIX, MEM_GROUP_CAU_SB, @@ -54,25 +55,26 @@ static const char * const s_mem_group_names[] = { "DMAE_MEM", "CM_MEM", "QM_MEM", - "TM_MEM", + "DORQ_MEM", "BRB_RAM", "BRB_MEM", "PRS_MEM", - "SDM_MEM", "IOR", - "RAM", "BTB_RAM", - "RDIF_CTX", - "TDIF_CTX", - "CFC_MEM", "CONN_CFC_MEM", "TASK_CFC_MEM", "CAU_PI", "CAU_MEM", "PXP_ILT", + "TM_MEM", + "SDM_MEM", "PBUF", + "RAM", "MULD_MEM", "BTB_MEM", + "RDIF_CTX", + "TDIF_CTX", + "CFC_MEM", "IGU_MEM", "IGU_MSIX", "CAU_SB", @@ -92,11 +94,6 @@ static u32 cond7(const u32 *r, const u32 *imm) return ((r[0] >> imm[0]) & imm[1]) != imm[2]; } -static u32 cond14(const u32 *r, const u32 *imm) -{ - return (r[0] != imm[0]) && (((r[1] >> imm[1]) & imm[2]) == imm[3]); -} - static u32 cond6(const u32 *r, const u32 *imm) { return (r[0] & imm[0]) != imm[1]; @@ -174,7 +171,6 @@ static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = { cond11, cond12, cond13, - cond14, }; /******************************* Data Types **********************************/ @@ -203,6 +199,8 @@ struct chip_defs { struct platform_defs { const char *name; u32 delay_factor; + u32 dmae_thresh; + u32 log_thresh; }; /* Storm constant definitions. @@ -234,7 +232,7 @@ struct storm_defs { /* Block constant definitions */ struct block_defs { const char *name; - bool has_dbg_bus[MAX_CHIP_IDS]; + bool exists[MAX_CHIP_IDS]; bool associated_to_storm; /* Valid only if associated_to_storm is true */ @@ -258,8 +256,8 @@ struct block_defs { /* Reset register definitions */ struct reset_reg_defs { u32 addr; - u32 unreset_val; bool exists[MAX_CHIP_IDS]; + u32 unreset_val[MAX_CHIP_IDS]; }; struct grc_param_defs { @@ -276,8 +274,8 @@ struct rss_mem_defs { const char *mem_name; const char *type_name; u32 addr; + u32 entry_width; u32 num_entries[MAX_CHIP_IDS]; - u32 entry_width[MAX_CHIP_IDS]; }; struct vfc_ram_defs { @@ -294,7 +292,9 @@ struct big_ram_defs { enum dbg_grc_params grc_param; u32 addr_reg_addr; u32 data_reg_addr; - u32 num_of_blocks[MAX_CHIP_IDS]; + u32 is_256b_reg_addr; + u32 is_256b_bit_offset[MAX_CHIP_IDS]; + u32 ram_size[MAX_CHIP_IDS]; /* In dwords */ }; struct phy_defs { @@ -358,20 +358,14 @@ struct phy_defs { (arr)[i] = qed_rd(dev, ptt, addr); \ } while (0) -#ifndef DWORDS_TO_BYTES #define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD) -#endif -#ifndef BYTES_TO_DWORDS #define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD) -#endif -/* extra lines include a signature line + optional latency events line */ -#ifndef NUM_DBG_LINES +/* Extra lines include a signature line + optional latency events line */ #define NUM_EXTRA_DBG_LINES(block_desc) \ (1 + ((block_desc)->has_latency_events ? 1 : 0)) #define NUM_DBG_LINES(block_desc) \ ((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc)) -#endif #define RAM_LINES_TO_DWORDS(lines) ((lines) * 2) #define RAM_LINES_TO_BYTES(lines) \ @@ -424,9 +418,6 @@ struct phy_defs { #define NUM_RSS_MEM_TYPES 5 #define NUM_BIG_RAM_TYPES 3 -#define BIG_RAM_BLOCK_SIZE_BYTES 128 -#define BIG_RAM_BLOCK_SIZE_DWORDS \ - BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES) #define NUM_PHY_TBUS_ADDRESSES 2048 #define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2) @@ -441,23 +432,17 @@ struct phy_defs { #define FW_IMG_MAIN 1 -#ifndef REG_FIFO_ELEMENT_DWORDS #define REG_FIFO_ELEMENT_DWORDS 2 -#endif #define REG_FIFO_DEPTH_ELEMENTS 32 #define REG_FIFO_DEPTH_DWORDS \ (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS) -#ifndef IGU_FIFO_ELEMENT_DWORDS #define IGU_FIFO_ELEMENT_DWORDS 4 -#endif #define IGU_FIFO_DEPTH_ELEMENTS 64 #define IGU_FIFO_DEPTH_DWORDS \ (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS) -#ifndef PROTECTION_OVERRIDE_ELEMENT_DWORDS #define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2 -#endif #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \ (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \ @@ -491,6 +476,11 @@ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = { {{MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2}, {0, 0, 0}, {0, 0, 0}, + {0, 0, 0} } }, + { "reserved", + {{0, 0, 0}, + {0, 0, 0}, + {0, 0, 0}, {0, 0, 0} } } }; @@ -498,7 +488,8 @@ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = { static struct storm_defs s_storm_defs[] = { /* Tstorm */ {'T', BLOCK_TSEM, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, + DBG_BUS_CLIENT_RBCT}, true, TSEM_REG_FAST_MEMORY, TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2, TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2, @@ -511,7 +502,8 @@ static struct storm_defs s_storm_defs[] = { /* Mstorm */ {'M', BLOCK_MSEM, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, + DBG_BUS_CLIENT_RBCM}, false, MSEM_REG_FAST_MEMORY, MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2, MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2, @@ -524,7 +516,8 @@ static struct storm_defs s_storm_defs[] = { /* Ustorm */ {'U', BLOCK_USEM, - {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false, + {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, + DBG_BUS_CLIENT_RBCU}, false, USEM_REG_FAST_MEMORY, USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2, USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2, @@ -537,7 +530,8 @@ static struct storm_defs s_storm_defs[] = { /* Xstorm */ {'X', BLOCK_XSEM, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, + DBG_BUS_CLIENT_RBCX}, false, XSEM_REG_FAST_MEMORY, XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2, XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2, @@ -550,7 +544,8 @@ static struct storm_defs s_storm_defs[] = { /* Ystorm */ {'Y', BLOCK_YSEM, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, + DBG_BUS_CLIENT_RBCY}, false, YSEM_REG_FAST_MEMORY, YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2, YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2, @@ -563,7 +558,8 @@ static struct storm_defs s_storm_defs[] = { /* Pstorm */ {'P', BLOCK_PSEM, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true, + {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, + DBG_BUS_CLIENT_RBCS}, true, PSEM_REG_FAST_MEMORY, PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2, PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2, @@ -579,8 +575,8 @@ static struct storm_defs s_storm_defs[] = { static struct block_defs block_grc_defs = { "grc", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN}, GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE, GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID, GRC_REG_DBG_FORCE_FRAME, @@ -588,30 +584,30 @@ static struct block_defs block_grc_defs = { }; static struct block_defs block_miscs_defs = { - "miscs", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "miscs", {true, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, false, false, MAX_DBG_RESET_REGS, 0 }; static struct block_defs block_misc_defs = { - "misc", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "misc", {true, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, false, false, MAX_DBG_RESET_REGS, 0 }; static struct block_defs block_dbu_defs = { - "dbu", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "dbu", {true, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, false, false, MAX_DBG_RESET_REGS, 0 }; static struct block_defs block_pglue_b_defs = { "pglue_b", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH}, PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE, PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID, PGLUE_B_REG_DBG_FORCE_FRAME, @@ -620,25 +616,26 @@ static struct block_defs block_pglue_b_defs = { static struct block_defs block_cnig_defs = { "cnig", - {false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW}, - CNIG_REG_DBG_SELECT_K2, CNIG_REG_DBG_DWORD_ENABLE_K2, - CNIG_REG_DBG_SHIFT_K2, CNIG_REG_DBG_FORCE_VALID_K2, - CNIG_REG_DBG_FORCE_FRAME_K2, + {true, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, + DBG_BUS_CLIENT_RBCW}, + CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5, + CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5, + CNIG_REG_DBG_FORCE_FRAME_K2_E5, true, false, DBG_RESET_REG_MISCS_PL_HV, 0 }; static struct block_defs block_cpmu_defs = { - "cpmu", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "cpmu", {true, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, true, false, DBG_RESET_REG_MISCS_PL_HV, 8 }; static struct block_defs block_ncsi_defs = { "ncsi", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ}, NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE, NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID, NCSI_REG_DBG_FORCE_FRAME, @@ -646,16 +643,16 @@ static struct block_defs block_ncsi_defs = { }; static struct block_defs block_opte_defs = { - "opte", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "opte", {true, true, false}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, true, false, DBG_RESET_REG_MISCS_PL_HV, 4 }; static struct block_defs block_bmb_defs = { "bmb", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB}, BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE, BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID, BMB_REG_DBG_FORCE_FRAME, @@ -664,27 +661,28 @@ static struct block_defs block_bmb_defs = { static struct block_defs block_pcie_defs = { "pcie", - {false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH}, - PCIE_REG_DBG_COMMON_SELECT_K2, - PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2, - PCIE_REG_DBG_COMMON_SHIFT_K2, - PCIE_REG_DBG_COMMON_FORCE_VALID_K2, - PCIE_REG_DBG_COMMON_FORCE_FRAME_K2, + {true, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH, + DBG_BUS_CLIENT_RBCH}, + PCIE_REG_DBG_COMMON_SELECT_K2_E5, + PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5, + PCIE_REG_DBG_COMMON_SHIFT_K2_E5, + PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5, + PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5, false, false, MAX_DBG_RESET_REGS, 0 }; static struct block_defs block_mcp_defs = { - "mcp", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "mcp", {true, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, false, false, MAX_DBG_RESET_REGS, 0 }; static struct block_defs block_mcp2_defs = { "mcp2", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ}, MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE, MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID, MCP2_REG_DBG_FORCE_FRAME, @@ -693,8 +691,8 @@ static struct block_defs block_mcp2_defs = { static struct block_defs block_pswhst_defs = { "pswhst", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE, PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID, PSWHST_REG_DBG_FORCE_FRAME, @@ -703,8 +701,8 @@ static struct block_defs block_pswhst_defs = { static struct block_defs block_pswhst2_defs = { "pswhst2", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE, PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID, PSWHST2_REG_DBG_FORCE_FRAME, @@ -713,8 +711,8 @@ static struct block_defs block_pswhst2_defs = { static struct block_defs block_pswrd_defs = { "pswrd", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE, PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID, PSWRD_REG_DBG_FORCE_FRAME, @@ -723,8 +721,8 @@ static struct block_defs block_pswrd_defs = { static struct block_defs block_pswrd2_defs = { "pswrd2", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE, PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID, PSWRD2_REG_DBG_FORCE_FRAME, @@ -733,8 +731,8 @@ static struct block_defs block_pswrd2_defs = { static struct block_defs block_pswwr_defs = { "pswwr", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE, PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID, PSWWR_REG_DBG_FORCE_FRAME, @@ -742,16 +740,16 @@ static struct block_defs block_pswwr_defs = { }; static struct block_defs block_pswwr2_defs = { - "pswwr2", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "pswwr2", {true, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, true, false, DBG_RESET_REG_MISC_PL_HV, 3 }; static struct block_defs block_pswrq_defs = { "pswrq", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE, PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID, PSWRQ_REG_DBG_FORCE_FRAME, @@ -760,8 +758,8 @@ static struct block_defs block_pswrq_defs = { static struct block_defs block_pswrq2_defs = { "pswrq2", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE, PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID, PSWRQ2_REG_DBG_FORCE_FRAME, @@ -770,18 +768,19 @@ static struct block_defs block_pswrq2_defs = { static struct block_defs block_pglcs_defs = { "pglcs", - {false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH}, - PGLCS_REG_DBG_SELECT_K2, PGLCS_REG_DBG_DWORD_ENABLE_K2, - PGLCS_REG_DBG_SHIFT_K2, PGLCS_REG_DBG_FORCE_VALID_K2, - PGLCS_REG_DBG_FORCE_FRAME_K2, + {true, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH, + DBG_BUS_CLIENT_RBCH}, + PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5, + PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5, + PGLCS_REG_DBG_FORCE_FRAME_K2_E5, true, false, DBG_RESET_REG_MISCS_PL_HV, 2 }; static struct block_defs block_ptu_defs = { "ptu", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE, PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID, PTU_REG_DBG_FORCE_FRAME, @@ -790,8 +789,8 @@ static struct block_defs block_ptu_defs = { static struct block_defs block_dmae_defs = { "dmae", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE, DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID, DMAE_REG_DBG_FORCE_FRAME, @@ -800,8 +799,8 @@ static struct block_defs block_dmae_defs = { static struct block_defs block_tcm_defs = { "tcm", - {true, true}, true, DBG_TSTORM_ID, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, + {true, true, true}, true, DBG_TSTORM_ID, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE, TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID, TCM_REG_DBG_FORCE_FRAME, @@ -810,8 +809,8 @@ static struct block_defs block_tcm_defs = { static struct block_defs block_mcm_defs = { "mcm", - {true, true}, true, DBG_MSTORM_ID, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, + {true, true, true}, true, DBG_MSTORM_ID, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM}, MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE, MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID, MCM_REG_DBG_FORCE_FRAME, @@ -820,8 +819,8 @@ static struct block_defs block_mcm_defs = { static struct block_defs block_ucm_defs = { "ucm", - {true, true}, true, DBG_USTORM_ID, - {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, + {true, true, true}, true, DBG_USTORM_ID, + {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE, UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID, UCM_REG_DBG_FORCE_FRAME, @@ -830,8 +829,8 @@ static struct block_defs block_ucm_defs = { static struct block_defs block_xcm_defs = { "xcm", - {true, true}, true, DBG_XSTORM_ID, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, + {true, true, true}, true, DBG_XSTORM_ID, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE, XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID, XCM_REG_DBG_FORCE_FRAME, @@ -840,8 +839,8 @@ static struct block_defs block_xcm_defs = { static struct block_defs block_ycm_defs = { "ycm", - {true, true}, true, DBG_YSTORM_ID, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, + {true, true, true}, true, DBG_YSTORM_ID, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY}, YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE, YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID, YCM_REG_DBG_FORCE_FRAME, @@ -850,8 +849,8 @@ static struct block_defs block_ycm_defs = { static struct block_defs block_pcm_defs = { "pcm", - {true, true}, true, DBG_PSTORM_ID, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, + {true, true, true}, true, DBG_PSTORM_ID, + {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE, PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID, PCM_REG_DBG_FORCE_FRAME, @@ -860,8 +859,8 @@ static struct block_defs block_pcm_defs = { static struct block_defs block_qm_defs = { "qm", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ, DBG_BUS_CLIENT_RBCQ}, QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE, QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID, QM_REG_DBG_FORCE_FRAME, @@ -870,8 +869,8 @@ static struct block_defs block_qm_defs = { static struct block_defs block_tm_defs = { "tm", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE, TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID, TM_REG_DBG_FORCE_FRAME, @@ -880,8 +879,8 @@ static struct block_defs block_tm_defs = { static struct block_defs block_dorq_defs = { "dorq", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY}, DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE, DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID, DORQ_REG_DBG_FORCE_FRAME, @@ -890,8 +889,8 @@ static struct block_defs block_dorq_defs = { static struct block_defs block_brb_defs = { "brb", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR}, BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE, BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID, BRB_REG_DBG_FORCE_FRAME, @@ -900,8 +899,8 @@ static struct block_defs block_brb_defs = { static struct block_defs block_src_defs = { "src", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE, SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID, SRC_REG_DBG_FORCE_FRAME, @@ -910,8 +909,8 @@ static struct block_defs block_src_defs = { static struct block_defs block_prs_defs = { "prs", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR}, PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE, PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID, PRS_REG_DBG_FORCE_FRAME, @@ -920,8 +919,8 @@ static struct block_defs block_prs_defs = { static struct block_defs block_tsdm_defs = { "tsdm", - {true, true}, true, DBG_TSTORM_ID, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, + {true, true, true}, true, DBG_TSTORM_ID, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE, TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID, TSDM_REG_DBG_FORCE_FRAME, @@ -930,8 +929,8 @@ static struct block_defs block_tsdm_defs = { static struct block_defs block_msdm_defs = { "msdm", - {true, true}, true, DBG_MSTORM_ID, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, + {true, true, true}, true, DBG_MSTORM_ID, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM}, MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE, MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID, MSDM_REG_DBG_FORCE_FRAME, @@ -940,8 +939,8 @@ static struct block_defs block_msdm_defs = { static struct block_defs block_usdm_defs = { "usdm", - {true, true}, true, DBG_USTORM_ID, - {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, + {true, true, true}, true, DBG_USTORM_ID, + {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE, USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID, USDM_REG_DBG_FORCE_FRAME, @@ -950,8 +949,8 @@ static struct block_defs block_usdm_defs = { static struct block_defs block_xsdm_defs = { "xsdm", - {true, true}, true, DBG_XSTORM_ID, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, + {true, true, true}, true, DBG_XSTORM_ID, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE, XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID, XSDM_REG_DBG_FORCE_FRAME, @@ -960,8 +959,8 @@ static struct block_defs block_xsdm_defs = { static struct block_defs block_ysdm_defs = { "ysdm", - {true, true}, true, DBG_YSTORM_ID, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, + {true, true, true}, true, DBG_YSTORM_ID, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY}, YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE, YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID, YSDM_REG_DBG_FORCE_FRAME, @@ -970,8 +969,8 @@ static struct block_defs block_ysdm_defs = { static struct block_defs block_psdm_defs = { "psdm", - {true, true}, true, DBG_PSTORM_ID, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, + {true, true, true}, true, DBG_PSTORM_ID, + {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE, PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID, PSDM_REG_DBG_FORCE_FRAME, @@ -980,8 +979,8 @@ static struct block_defs block_psdm_defs = { static struct block_defs block_tsem_defs = { "tsem", - {true, true}, true, DBG_TSTORM_ID, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, + {true, true, true}, true, DBG_TSTORM_ID, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE, TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID, TSEM_REG_DBG_FORCE_FRAME, @@ -990,8 +989,8 @@ static struct block_defs block_tsem_defs = { static struct block_defs block_msem_defs = { "msem", - {true, true}, true, DBG_MSTORM_ID, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, + {true, true, true}, true, DBG_MSTORM_ID, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM}, MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE, MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID, MSEM_REG_DBG_FORCE_FRAME, @@ -1000,8 +999,8 @@ static struct block_defs block_msem_defs = { static struct block_defs block_usem_defs = { "usem", - {true, true}, true, DBG_USTORM_ID, - {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, + {true, true, true}, true, DBG_USTORM_ID, + {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE, USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID, USEM_REG_DBG_FORCE_FRAME, @@ -1010,8 +1009,8 @@ static struct block_defs block_usem_defs = { static struct block_defs block_xsem_defs = { "xsem", - {true, true}, true, DBG_XSTORM_ID, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, + {true, true, true}, true, DBG_XSTORM_ID, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE, XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID, XSEM_REG_DBG_FORCE_FRAME, @@ -1020,8 +1019,8 @@ static struct block_defs block_xsem_defs = { static struct block_defs block_ysem_defs = { "ysem", - {true, true}, true, DBG_YSTORM_ID, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, + {true, true, true}, true, DBG_YSTORM_ID, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY}, YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE, YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID, YSEM_REG_DBG_FORCE_FRAME, @@ -1030,8 +1029,8 @@ static struct block_defs block_ysem_defs = { static struct block_defs block_psem_defs = { "psem", - {true, true}, true, DBG_PSTORM_ID, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, + {true, true, true}, true, DBG_PSTORM_ID, + {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE, PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID, PSEM_REG_DBG_FORCE_FRAME, @@ -1040,8 +1039,8 @@ static struct block_defs block_psem_defs = { static struct block_defs block_rss_defs = { "rss", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE, RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID, RSS_REG_DBG_FORCE_FRAME, @@ -1050,8 +1049,8 @@ static struct block_defs block_rss_defs = { static struct block_defs block_tmld_defs = { "tmld", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM}, TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE, TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID, TMLD_REG_DBG_FORCE_FRAME, @@ -1060,8 +1059,8 @@ static struct block_defs block_tmld_defs = { static struct block_defs block_muld_defs = { "muld", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE, MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID, MULD_REG_DBG_FORCE_FRAME, @@ -1070,8 +1069,9 @@ static struct block_defs block_muld_defs = { static struct block_defs block_yuld_defs = { "yuld", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, + {true, true, false}, false, 0, + {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, + MAX_DBG_BUS_CLIENTS}, YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2, YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2, YULD_REG_DBG_FORCE_FRAME_BB_K2, @@ -1081,18 +1081,40 @@ static struct block_defs block_yuld_defs = { static struct block_defs block_xyld_defs = { "xyld", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE, XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID, XYLD_REG_DBG_FORCE_FRAME, true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12 }; +static struct block_defs block_ptld_defs = { + "ptld", + {false, false, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCT}, + PTLD_REG_DBG_SELECT_E5, PTLD_REG_DBG_DWORD_ENABLE_E5, + PTLD_REG_DBG_SHIFT_E5, PTLD_REG_DBG_FORCE_VALID_E5, + PTLD_REG_DBG_FORCE_FRAME_E5, + true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, + 28 +}; + +static struct block_defs block_ypld_defs = { + "ypld", + {false, false, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCS}, + YPLD_REG_DBG_SELECT_E5, YPLD_REG_DBG_DWORD_ENABLE_E5, + YPLD_REG_DBG_SHIFT_E5, YPLD_REG_DBG_FORCE_VALID_E5, + YPLD_REG_DBG_FORCE_FRAME_E5, + true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, + 27 +}; + static struct block_defs block_prm_defs = { "prm", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM}, PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE, PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID, PRM_REG_DBG_FORCE_FRAME, @@ -1101,8 +1123,8 @@ static struct block_defs block_prm_defs = { static struct block_defs block_pbf_pb1_defs = { "pbf_pb1", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV}, PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE, PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID, PBF_PB1_REG_DBG_FORCE_FRAME, @@ -1112,8 +1134,8 @@ static struct block_defs block_pbf_pb1_defs = { static struct block_defs block_pbf_pb2_defs = { "pbf_pb2", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV}, PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE, PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID, PBF_PB2_REG_DBG_FORCE_FRAME, @@ -1123,8 +1145,8 @@ static struct block_defs block_pbf_pb2_defs = { static struct block_defs block_rpb_defs = { "rpb", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM}, RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE, RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID, RPB_REG_DBG_FORCE_FRAME, @@ -1133,8 +1155,8 @@ static struct block_defs block_rpb_defs = { static struct block_defs block_btb_defs = { "btb", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV}, BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE, BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID, BTB_REG_DBG_FORCE_FRAME, @@ -1143,8 +1165,8 @@ static struct block_defs block_btb_defs = { static struct block_defs block_pbf_defs = { "pbf", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV}, PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE, PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID, PBF_REG_DBG_FORCE_FRAME, @@ -1153,8 +1175,8 @@ static struct block_defs block_pbf_defs = { static struct block_defs block_rdif_defs = { "rdif", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM}, RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE, RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID, RDIF_REG_DBG_FORCE_FRAME, @@ -1163,8 +1185,8 @@ static struct block_defs block_rdif_defs = { static struct block_defs block_tdif_defs = { "tdif", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE, TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID, TDIF_REG_DBG_FORCE_FRAME, @@ -1173,8 +1195,8 @@ static struct block_defs block_tdif_defs = { static struct block_defs block_cdu_defs = { "cdu", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE, CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID, CDU_REG_DBG_FORCE_FRAME, @@ -1183,8 +1205,8 @@ static struct block_defs block_cdu_defs = { static struct block_defs block_ccfc_defs = { "ccfc", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE, CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID, CCFC_REG_DBG_FORCE_FRAME, @@ -1193,8 +1215,8 @@ static struct block_defs block_ccfc_defs = { static struct block_defs block_tcfc_defs = { "tcfc", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE, TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID, TCFC_REG_DBG_FORCE_FRAME, @@ -1203,8 +1225,8 @@ static struct block_defs block_tcfc_defs = { static struct block_defs block_igu_defs = { "igu", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE, IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID, IGU_REG_DBG_FORCE_FRAME, @@ -1213,42 +1235,79 @@ static struct block_defs block_igu_defs = { static struct block_defs block_cau_defs = { "cau", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE, CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID, CAU_REG_DBG_FORCE_FRAME, true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19 }; +static struct block_defs block_rgfs_defs = { + "rgfs", {false, false, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + 0, 0, 0, 0, 0, + true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 29 +}; + +static struct block_defs block_rgsrc_defs = { + "rgsrc", + {false, false, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH}, + RGSRC_REG_DBG_SELECT_E5, RGSRC_REG_DBG_DWORD_ENABLE_E5, + RGSRC_REG_DBG_SHIFT_E5, RGSRC_REG_DBG_FORCE_VALID_E5, + RGSRC_REG_DBG_FORCE_FRAME_E5, + true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, + 30 +}; + +static struct block_defs block_tgfs_defs = { + "tgfs", {false, false, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + 0, 0, 0, 0, 0, + true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 30 +}; + +static struct block_defs block_tgsrc_defs = { + "tgsrc", + {false, false, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCV}, + TGSRC_REG_DBG_SELECT_E5, TGSRC_REG_DBG_DWORD_ENABLE_E5, + TGSRC_REG_DBG_SHIFT_E5, TGSRC_REG_DBG_FORCE_VALID_E5, + TGSRC_REG_DBG_FORCE_FRAME_E5, + true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, + 31 +}; + static struct block_defs block_umac_defs = { "umac", - {false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ}, - UMAC_REG_DBG_SELECT_K2, UMAC_REG_DBG_DWORD_ENABLE_K2, - UMAC_REG_DBG_SHIFT_K2, UMAC_REG_DBG_FORCE_VALID_K2, - UMAC_REG_DBG_FORCE_FRAME_K2, + {true, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, + DBG_BUS_CLIENT_RBCZ}, + UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5, + UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5, + UMAC_REG_DBG_FORCE_FRAME_K2_E5, true, false, DBG_RESET_REG_MISCS_PL_HV, 6 }; static struct block_defs block_xmac_defs = { - "xmac", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "xmac", {true, false, false}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, false, false, MAX_DBG_RESET_REGS, 0 }; static struct block_defs block_dbg_defs = { - "dbg", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "dbg", {true, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3 }; static struct block_defs block_nig_defs = { "nig", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN}, NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE, NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID, NIG_REG_DBG_FORCE_FRAME, @@ -1257,139 +1316,106 @@ static struct block_defs block_nig_defs = { static struct block_defs block_wol_defs = { "wol", - {false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ}, - WOL_REG_DBG_SELECT_K2, WOL_REG_DBG_DWORD_ENABLE_K2, - WOL_REG_DBG_SHIFT_K2, WOL_REG_DBG_FORCE_VALID_K2, - WOL_REG_DBG_FORCE_FRAME_K2, + {false, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ}, + WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5, + WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5, + WOL_REG_DBG_FORCE_FRAME_K2_E5, true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7 }; static struct block_defs block_bmbn_defs = { "bmbn", - {false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB}, - BMBN_REG_DBG_SELECT_K2, BMBN_REG_DBG_DWORD_ENABLE_K2, - BMBN_REG_DBG_SHIFT_K2, BMBN_REG_DBG_FORCE_VALID_K2, - BMBN_REG_DBG_FORCE_FRAME_K2, + {false, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB, + DBG_BUS_CLIENT_RBCB}, + BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5, + BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5, + BMBN_REG_DBG_FORCE_FRAME_K2_E5, false, false, MAX_DBG_RESET_REGS, 0 }; static struct block_defs block_ipc_defs = { - "ipc", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "ipc", {true, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, true, false, DBG_RESET_REG_MISCS_PL_UA, 8 }; static struct block_defs block_nwm_defs = { "nwm", - {false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW}, - NWM_REG_DBG_SELECT_K2, NWM_REG_DBG_DWORD_ENABLE_K2, - NWM_REG_DBG_SHIFT_K2, NWM_REG_DBG_FORCE_VALID_K2, - NWM_REG_DBG_FORCE_FRAME_K2, + {false, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW}, + NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5, + NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5, + NWM_REG_DBG_FORCE_FRAME_K2_E5, true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0 }; static struct block_defs block_nws_defs = { "nws", - {false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW}, - NWS_REG_DBG_SELECT_K2, NWS_REG_DBG_DWORD_ENABLE_K2, - NWS_REG_DBG_SHIFT_K2, NWS_REG_DBG_FORCE_VALID_K2, - NWS_REG_DBG_FORCE_FRAME_K2, + {false, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW}, + NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5, + NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5, + NWS_REG_DBG_FORCE_FRAME_K2_E5, true, false, DBG_RESET_REG_MISCS_PL_HV, 12 }; static struct block_defs block_ms_defs = { "ms", - {false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ}, - MS_REG_DBG_SELECT_K2, MS_REG_DBG_DWORD_ENABLE_K2, - MS_REG_DBG_SHIFT_K2, MS_REG_DBG_FORCE_VALID_K2, - MS_REG_DBG_FORCE_FRAME_K2, + {false, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ}, + MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5, + MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5, + MS_REG_DBG_FORCE_FRAME_K2_E5, true, false, DBG_RESET_REG_MISCS_PL_HV, 13 }; static struct block_defs block_phy_pcie_defs = { "phy_pcie", - {false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH}, - PCIE_REG_DBG_COMMON_SELECT_K2, - PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2, - PCIE_REG_DBG_COMMON_SHIFT_K2, - PCIE_REG_DBG_COMMON_FORCE_VALID_K2, - PCIE_REG_DBG_COMMON_FORCE_FRAME_K2, + {false, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH, + DBG_BUS_CLIENT_RBCH}, + PCIE_REG_DBG_COMMON_SELECT_K2_E5, + PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5, + PCIE_REG_DBG_COMMON_SHIFT_K2_E5, + PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5, + PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5, false, false, MAX_DBG_RESET_REGS, 0 }; static struct block_defs block_led_defs = { - "led", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "led", {false, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, true, false, DBG_RESET_REG_MISCS_PL_HV, 14 }; static struct block_defs block_avs_wrap_defs = { - "avs_wrap", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "avs_wrap", {false, true, false}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, true, false, DBG_RESET_REG_MISCS_PL_UA, 11 }; -static struct block_defs block_rgfs_defs = { - "rgfs", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, - 0, 0, 0, 0, 0, - false, false, MAX_DBG_RESET_REGS, 0 -}; - -static struct block_defs block_rgsrc_defs = { - "rgsrc", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, - 0, 0, 0, 0, 0, - false, false, MAX_DBG_RESET_REGS, 0 -}; - -static struct block_defs block_tgfs_defs = { - "tgfs", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, - 0, 0, 0, 0, 0, - false, false, MAX_DBG_RESET_REGS, 0 -}; - -static struct block_defs block_tgsrc_defs = { - "tgsrc", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, - 0, 0, 0, 0, 0, - false, false, MAX_DBG_RESET_REGS, 0 -}; - -static struct block_defs block_ptld_defs = { - "ptld", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, - 0, 0, 0, 0, 0, - false, false, MAX_DBG_RESET_REGS, 0 -}; - -static struct block_defs block_ypld_defs = { - "ypld", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, +static struct block_defs block_pxpreqbus_defs = { + "pxpreqbus", {false, false, false}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, false, false, MAX_DBG_RESET_REGS, 0 }; static struct block_defs block_misc_aeu_defs = { - "misc_aeu", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "misc_aeu", {true, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, false, false, MAX_DBG_RESET_REGS, 0 }; static struct block_defs block_bar0_map_defs = { - "bar0_map", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "bar0_map", {true, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, false, false, MAX_DBG_RESET_REGS, 0 }; @@ -1480,164 +1506,160 @@ static struct block_defs *s_block_defs[MAX_BLOCK_ID] = { &block_phy_pcie_defs, &block_led_defs, &block_avs_wrap_defs, + &block_pxpreqbus_defs, &block_misc_aeu_defs, &block_bar0_map_defs, }; static struct platform_defs s_platform_defs[] = { - {"asic", 1}, - {"reserved", 0}, - {"reserved2", 0}, - {"reserved3", 0} + {"asic", 1, 256, 32768}, + {"reserved", 0, 0, 0}, + {"reserved2", 0, 0, 0}, + {"reserved3", 0, 0, 0} }; static struct grc_param_defs s_grc_param_defs[] = { /* DBG_GRC_PARAM_DUMP_TSTORM */ - {{1, 1}, 0, 1, false, 1, 1}, + {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_MSTORM */ - {{1, 1}, 0, 1, false, 1, 1}, + {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_USTORM */ - {{1, 1}, 0, 1, false, 1, 1}, + {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_XSTORM */ - {{1, 1}, 0, 1, false, 1, 1}, + {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_YSTORM */ - {{1, 1}, 0, 1, false, 1, 1}, + {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_PSTORM */ - {{1, 1}, 0, 1, false, 1, 1}, + {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_REGS */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RAM */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PBUF */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IOR */ - {{0, 0}, 0, 1, false, 0, 1}, + {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_VFC */ - {{0, 0}, 0, 1, false, 0, 1}, + {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM_CTX */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_ILT */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RSS */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CAU */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_QM */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MCP */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_RESERVED */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CFC */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IGU */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BRB */ - {{0, 0}, 0, 1, false, 0, 1}, + {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BTB */ - {{0, 0}, 0, 1, false, 0, 1}, + {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */ - {{0, 0}, 0, 1, false, 0, 1}, + {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_NIG */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MULD */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PRS */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DMAE */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_TM */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_SDM */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DIF */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_STATIC */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_UNSTALL */ - {{0, 0}, 0, 1, false, 0, 0}, + {{0, 0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_NUM_LCIDS */ - {{MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS, + {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS, MAX_LCIDS}, /* DBG_GRC_PARAM_NUM_LTIDS */ - {{MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS, + {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS, MAX_LTIDS}, /* DBG_GRC_PARAM_EXCLUDE_ALL */ - {{0, 0}, 0, 1, true, 0, 0}, + {{0, 0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_CRASH */ - {{0, 0}, 0, 1, true, 0, 0}, + {{0, 0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_PARITY_SAFE */ - {{0, 0}, 0, 1, false, 1, 0}, + {{0, 0, 0}, 0, 1, false, 1, 0}, /* DBG_GRC_PARAM_DUMP_CM */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PHY */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_NO_MCP */ - {{0, 0}, 0, 1, false, 0, 0}, + {{0, 0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_NO_FW_VER */ - {{0, 0}, 0, 1, false, 0, 0} + {{0, 0, 0}, 0, 1, false, 0, 0} }; static struct rss_mem_defs s_rss_mem_defs[] = { - { "rss_mem_cid", "rss_cid", 0, - {256, 320}, - {32, 32} }, + { "rss_mem_cid", "rss_cid", 0, 32, + {256, 320, 512} }, - { "rss_mem_key_msb", "rss_key", 1024, - {128, 208}, - {256, 256} }, + { "rss_mem_key_msb", "rss_key", 1024, 256, + {128, 208, 257} }, - { "rss_mem_key_lsb", "rss_key", 2048, - {128, 208}, - {64, 64} }, + { "rss_mem_key_lsb", "rss_key", 2048, 64, + {128, 208, 257} }, - { "rss_mem_info", "rss_info", 3072, - {128, 208}, - {16, 16} }, + { "rss_mem_info", "rss_info", 3072, 16, + {128, 208, 256} }, - { "rss_mem_ind", "rss_ind", 4096, - {16384, 26624}, - {16, 16} } + { "rss_mem_ind", "rss_ind", 4096, 16, + {16384, 26624, 32768} } }; static struct vfc_ram_defs s_vfc_ram_defs[] = { @@ -1650,72 +1672,75 @@ static struct vfc_ram_defs s_vfc_ram_defs[] = { static struct big_ram_defs s_big_ram_defs[] = { { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB, BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA, - {4800, 5632} }, + MISC_REG_BLOCK_256B_EN, {0, 0, 0}, + {153600, 180224, 282624} }, { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB, BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA, - {2880, 3680} }, + MISC_REG_BLOCK_256B_EN, {0, 1, 1}, + {92160, 117760, 168960} }, { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB, BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA, - {1152, 1152} } + MISCS_REG_BLOCK_256B_EN, {0, 0, 0}, + {36864, 36864, 36864} } }; static struct reset_reg_defs s_reset_regs_defs[] = { /* DBG_RESET_REG_MISCS_PL_UA */ - { MISCS_REG_RESET_PL_UA, 0x0, - {true, true} }, + { MISCS_REG_RESET_PL_UA, + {true, true, true}, {0x0, 0x0, 0x0} }, /* DBG_RESET_REG_MISCS_PL_HV */ - { MISCS_REG_RESET_PL_HV, 0x0, - {true, true} }, + { MISCS_REG_RESET_PL_HV, + {true, true, true}, {0x0, 0x400, 0x600} }, /* DBG_RESET_REG_MISCS_PL_HV_2 */ - { MISCS_REG_RESET_PL_HV_2_K2, 0x0, - {false, true} }, + { MISCS_REG_RESET_PL_HV_2_K2_E5, + {false, true, true}, {0x0, 0x0, 0x0} }, /* DBG_RESET_REG_MISC_PL_UA */ - { MISC_REG_RESET_PL_UA, 0x0, - {true, true} }, + { MISC_REG_RESET_PL_UA, + {true, true, true}, {0x0, 0x0, 0x0} }, /* DBG_RESET_REG_MISC_PL_HV */ - { MISC_REG_RESET_PL_HV, 0x0, - {true, true} }, + { MISC_REG_RESET_PL_HV, + {true, true, true}, {0x0, 0x0, 0x0} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */ - { MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040, - {true, true} }, + { MISC_REG_RESET_PL_PDA_VMAIN_1, + {true, true, true}, {0x4404040, 0x4404040, 0x404040} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */ - { MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007, - {true, true} }, + { MISC_REG_RESET_PL_PDA_VMAIN_2, + {true, true, true}, {0x7, 0x7c00007, 0x5c08007} }, /* DBG_RESET_REG_MISC_PL_PDA_VAUX */ - { MISC_REG_RESET_PL_PDA_VAUX, 0x2, - {true, true} }, + { MISC_REG_RESET_PL_PDA_VAUX, + {true, true, true}, {0x2, 0x2, 0x2} }, }; static struct phy_defs s_phy_defs[] = { {"nw_phy", NWS_REG_NWS_CMU_K2, - PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2, - PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2, - PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2, - PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2}, - {"sgmii_phy", MS_REG_MS_CMU_K2, - PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2, - PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2, - PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2, - PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2}, - {"pcie_phy0", PHY_PCIE_REG_PHY0_K2, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2}, - {"pcie_phy1", PHY_PCIE_REG_PHY1_K2, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2}, + PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5, + PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5, + PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5, + PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5}, + {"sgmii_phy", MS_REG_MS_CMU_K2_E5, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5}, + {"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5}, + {"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5}, }; /**************************** Private Functions ******************************/ @@ -1774,7 +1799,9 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn, /* Initializes the GRC parameters */ qed_dbg_grc_init_params(p_hwfn); - dev_data->initialized = true; + dev_data->use_dmae = true; + dev_data->num_regs_read = 0; + dev_data->initialized = 1; return DBG_STATUS_OK; } @@ -1807,7 +1834,7 @@ static void qed_read_fw_info(struct qed_hwfn *p_hwfn, * The address is located in the last line of the Storm RAM. */ addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM + - DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) - + DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_BB_K2) - sizeof(fw_info_location); dest = (u32 *)&fw_info_location; @@ -2071,8 +2098,7 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn, /* Writes the "last" section (including CRC) to the specified buffer at the * given offset. Returns the dumped size in dwords. */ -static u32 qed_dump_last_section(struct qed_hwfn *p_hwfn, - u32 *dump_buf, u32 offset, bool dump) +static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump) { u32 start_offset = offset; @@ -2235,7 +2261,8 @@ static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn, case MEM_GROUP_CFC_MEM: case MEM_GROUP_CONN_CFC_MEM: case MEM_GROUP_TASK_CFC_MEM: - return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC); + return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) || + qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX); case MEM_GROUP_IGU_MEM: case MEM_GROUP_IGU_MSIX: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU); @@ -2293,7 +2320,8 @@ static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn, for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { struct block_defs *block = s_block_defs[block_id]; - if (block->has_reset_bit && block->unreset) + if (block->exists[dev_data->chip_id] && block->has_reset_bit && + block->unreset) reg_val[block->reset_reg] |= BIT(block->reset_bit_offset); } @@ -2303,7 +2331,8 @@ static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn, if (!s_reset_regs_defs[i].exists[dev_data->chip_id]) continue; - reg_val[i] |= s_reset_regs_defs[i].unreset_val; + reg_val[i] |= + s_reset_regs_defs[i].unreset_val[dev_data->chip_id]; if (reg_val[i]) qed_wr(p_hwfn, @@ -2413,6 +2442,18 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf, return offset; } +/* Reads the specified registers into the specified buffer. + * The addr and len arguments are specified in dwords. + */ +void qed_read_regs(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len) +{ + u32 i; + + for (i = 0; i < len; i++) + buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i)); +} + /* Dumps the GRC registers in the specified address range. * Returns the dumped size in dwords. * The addr and len arguments are specified in dwords. @@ -2422,15 +2463,39 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn, u32 *dump_buf, bool dump, u32 addr, u32 len, bool wide_bus) { - u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i; + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; if (!dump) return len; - for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++) - *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr); + /* Print log if needed */ + dev_data->num_regs_read += len; + if (dev_data->num_regs_read >= + s_platform_defs[dev_data->platform_id].log_thresh) { + DP_VERBOSE(p_hwfn, + QED_MSG_DEBUG, + "Dumping %d registers...\n", + dev_data->num_regs_read); + dev_data->num_regs_read = 0; + } - return offset; + /* Try reading using DMAE */ + if (dev_data->use_dmae && + (len >= s_platform_defs[dev_data->platform_id].dmae_thresh || + wide_bus)) { + if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr), + (u64)(uintptr_t)(dump_buf), len, 0)) + return len; + dev_data->use_dmae = 0; + DP_VERBOSE(p_hwfn, + QED_MSG_DEBUG, + "Failed reading from chip using DMAE, using GRC instead\n"); + } + + /* Read registers */ + qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len); + + return len; } /* Dumps GRC registers sequence header. Returns the dumped size in dwords. @@ -2630,9 +2695,6 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, chip = &s_chip_defs[dev_data->chip_id]; chip_platform = &chip->per_platform[dev_data->platform_id]; - if (dump) - DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n"); - while (input_offset < s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) { const struct dbg_dump_split_hdr *split_hdr; @@ -2966,22 +3028,12 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn, offset += qed_dump_str_param(dump_buf + offset, dump, "name", buf); - if (dump) - DP_VERBOSE(p_hwfn, - QED_MSG_DEBUG, - "Dumping %d registers from %s...\n", - len, buf); } else { /* Dump address */ u32 addr_in_bytes = DWORDS_TO_BYTES(addr); offset += qed_dump_num_param(dump_buf + offset, dump, "addr", addr_in_bytes); - if (dump && len > 64) - DP_VERBOSE(p_hwfn, - QED_MSG_DEBUG, - "Dumping %d registers from address 0x%x...\n", - len, addr_in_bytes); } /* Dump len */ @@ -3530,17 +3582,16 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn, u8 rss_mem_id; for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) { - u32 rss_addr, num_entries, entry_width, total_dwords, i; + u32 rss_addr, num_entries, total_dwords; struct rss_mem_defs *rss_defs; - u32 addr, size; + u32 addr, num_dwords_to_read; bool packed; rss_defs = &s_rss_mem_defs[rss_mem_id]; rss_addr = rss_defs->addr; num_entries = rss_defs->num_entries[dev_data->chip_id]; - entry_width = rss_defs->entry_width[dev_data->chip_id]; - total_dwords = (num_entries * entry_width) / 32; - packed = (entry_width == 16); + total_dwords = (num_entries * rss_defs->entry_width) / 32; + packed = (rss_defs->entry_width == 16); offset += qed_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, @@ -3548,7 +3599,7 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn, rss_defs->mem_name, 0, total_dwords, - entry_width, + rss_defs->entry_width, packed, rss_defs->type_name, false, 0); @@ -3559,16 +3610,20 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn, } addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA); - size = RSS_REG_RSS_RAM_DATA_SIZE; - for (i = 0; i < total_dwords; i += size, rss_addr++) { + while (total_dwords) { + num_dwords_to_read = min_t(u32, + RSS_REG_RSS_RAM_DATA_SIZE, + total_dwords); qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr); offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, - size, + num_dwords_to_read, false); + total_dwords -= num_dwords_to_read; + rss_addr++; } } @@ -3581,14 +3636,18 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn, u32 *dump_buf, bool dump, u8 big_ram_id) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - u32 total_blocks, ram_size, offset = 0, i; + u32 block_size, ram_size, offset = 0, reg_val, i; char mem_name[12] = "???_BIG_RAM"; char type_name[8] = "???_RAM"; struct big_ram_defs *big_ram; big_ram = &s_big_ram_defs[big_ram_id]; - total_blocks = big_ram->num_of_blocks[dev_data->chip_id]; - ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS; + ram_size = big_ram->ram_size[dev_data->chip_id]; + + reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr); + block_size = reg_val & + BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256 + : 128; strncpy(type_name, big_ram->instance_name, strlen(big_ram->instance_name)); @@ -3602,7 +3661,7 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn, mem_name, 0, ram_size, - BIG_RAM_BLOCK_SIZE_BYTES * 8, + block_size * 8, false, type_name, false, 0); /* Read and dump Big RAM data */ @@ -3610,12 +3669,13 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn, return offset + ram_size; /* Dump Big RAM */ - for (i = 0; i < total_blocks / 2; i++) { + for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE); + i++) { u32 addr, len; qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i); addr = BYTES_TO_DWORDS(big_ram->data_reg_addr); - len = 2 * BIG_RAM_BLOCK_SIZE_DWORDS; + len = BRB_REG_BIG_RAM_DATA_SIZE; offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, @@ -3649,7 +3709,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn, dump, NULL, BYTES_TO_DWORDS(MCP_REG_SCRATCH), - MCP_REG_SCRATCH_SIZE, + MCP_REG_SCRATCH_SIZE_BB_K2, false, 0, false, "MCP", false, 0); /* Dump MCP cpu_reg_file */ @@ -3710,7 +3770,6 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn, phy_defs->tbus_data_lo_addr; data_hi_addr = phy_defs->base_addr + phy_defs->tbus_data_hi_addr; - bytes_buf = (u8 *)(dump_buf + offset); if (snprintf(mem_name, sizeof(mem_name), "tbus_%s", phy_defs->phy_name) < 0) @@ -3730,6 +3789,7 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn, continue; } + bytes_buf = (u8 *)(dump_buf + offset); for (tbus_hi_offset = 0; tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8); tbus_hi_offset++) { @@ -3778,19 +3838,17 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn, struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; u32 block_id, line_id, offset = 0; - /* Skip static debug if a debug bus recording is in progress */ - if (qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON)) + /* Don't dump static debug if a debug bus recording is in progress */ + if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON)) return 0; if (dump) { - DP_VERBOSE(p_hwfn, - QED_MSG_DEBUG, "Dumping static debug data...\n"); - /* Disable all blocks debug output */ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { struct block_defs *block = s_block_defs[block_id]; - if (block->has_dbg_bus[dev_data->chip_id]) + if (block->dbg_client_id[dev_data->chip_id] != + MAX_DBG_BUS_CLIENTS) qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0); } @@ -3811,12 +3869,12 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn, u32 block_dwords, addr, len; u8 dbg_client_id; - if (!block->has_dbg_bus[dev_data->chip_id]) + if (block->dbg_client_id[dev_data->chip_id] == + MAX_DBG_BUS_CLIENTS) continue; - block_desc = - get_dbg_bus_block_desc(p_hwfn, - (enum block_id)block_id); + block_desc = get_dbg_bus_block_desc(p_hwfn, + (enum block_id)block_id); block_dwords = NUM_DBG_LINES(block_desc) * STATIC_DEBUG_LINE_DWORDS; @@ -4044,7 +4102,7 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, dump_buf + offset, dump); /* Dump last section */ - offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump); + offset += qed_dump_last_section(dump_buf, offset, dump); if (dump) { /* Unstall storms */ @@ -4253,30 +4311,33 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, if (!check_rule && dump) continue; + if (!dump) { + u32 entry_dump_size = + qed_idle_chk_dump_failure(p_hwfn, + p_ptt, + dump_buf + offset, + false, + rule->rule_id, + rule, + 0, + NULL); + + offset += num_reg_entries * entry_dump_size; + (*num_failing_rules) += num_reg_entries; + continue; + } + /* Go over all register entries (number of entries is the same * for all condition registers). */ for (entry_id = 0; entry_id < num_reg_entries; entry_id++) { u32 next_reg_offset = 0; - if (!dump) { - offset += qed_idle_chk_dump_failure(p_hwfn, - p_ptt, - dump_buf + offset, - false, - rule->rule_id, - rule, - entry_id, - NULL); - (*num_failing_rules)++; - break; - } - /* Read current entry of all condition registers */ for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) { const struct dbg_idle_chk_cond_reg *reg = - &cond_regs[reg_id]; + &cond_regs[reg_id]; u32 padded_entry_size, addr; bool wide_bus; @@ -4291,9 +4352,9 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, if (reg->num_entries > 1 || reg->start_entry > 0) { padded_entry_size = - reg->entry_size > 1 ? - roundup_pow_of_two(reg->entry_size) - : 1; + reg->entry_size > 1 ? + roundup_pow_of_two(reg->entry_size) : + 1; addr += (reg->start_entry + entry_id) * padded_entry_size; } @@ -4329,7 +4390,6 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, entry_id, cond_reg_values); (*num_failing_rules)++; - break; } } } @@ -4402,7 +4462,7 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn, dump, "num_rules", num_failing_rules); /* Dump last section */ - offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump); + offset += qed_dump_last_section(dump_buf, offset, dump); return offset; } @@ -4474,7 +4534,7 @@ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn, (nvram_offset_bytes + read_offset) | (bytes_to_copy << - DRV_MB_PARAM_NVM_LEN_SHIFT), + DRV_MB_PARAM_NVM_LEN_OFFSET), &ret_mcp_resp, &ret_mcp_param, &ret_read_size, (u32 *)((u8 *)ret_buf + read_offset))) @@ -4701,7 +4761,7 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, offset += trace_meta_size_dwords; /* Dump last section */ - offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump); + offset += qed_dump_last_section(dump_buf, offset, dump); *num_dumped_dwords = offset; @@ -4717,7 +4777,7 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn, u32 *dump_buf, bool dump, u32 *num_dumped_dwords) { - u32 dwords_read, size_param_offset, offset = 0; + u32 dwords_read, size_param_offset, offset = 0, addr, len; bool fifo_has_data; *num_dumped_dwords = 0; @@ -4753,14 +4813,18 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn, * buffer size since more entries could be added to the buffer as we are * emptying it. */ + addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO); + len = REG_FIFO_ELEMENT_DWORDS; for (dwords_read = 0; fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS; - dwords_read += REG_FIFO_ELEMENT_DWORDS, offset += - REG_FIFO_ELEMENT_DWORDS) { - if (qed_dmae_grc2host(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO, - (u64)(uintptr_t)(&dump_buf[offset]), - REG_FIFO_ELEMENT_DWORDS, 0)) - return DBG_STATUS_DMAE_FAILED; + dwords_read += REG_FIFO_ELEMENT_DWORDS) { + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + true, + addr, + len, + true); fifo_has_data = qed_rd(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA) > 0; } @@ -4769,7 +4833,7 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn, dwords_read); out: /* Dump last section */ - offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump); + offset += qed_dump_last_section(dump_buf, offset, dump); *num_dumped_dwords = offset; @@ -4782,7 +4846,7 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn, u32 *dump_buf, bool dump, u32 *num_dumped_dwords) { - u32 dwords_read, size_param_offset, offset = 0; + u32 dwords_read, size_param_offset, offset = 0, addr, len; bool fifo_has_data; *num_dumped_dwords = 0; @@ -4818,16 +4882,19 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn, * buffer size since more entries could be added to the buffer as we are * emptying it. */ + addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY); + len = IGU_FIFO_ELEMENT_DWORDS; for (dwords_read = 0; fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS; - dwords_read += IGU_FIFO_ELEMENT_DWORDS, offset += - IGU_FIFO_ELEMENT_DWORDS) { - if (qed_dmae_grc2host(p_hwfn, p_ptt, - IGU_REG_ERROR_HANDLING_MEMORY, - (u64)(uintptr_t)(&dump_buf[offset]), - IGU_FIFO_ELEMENT_DWORDS, 0)) - return DBG_STATUS_DMAE_FAILED; - fifo_has_data = qed_rd(p_hwfn, p_ptt, + dwords_read += IGU_FIFO_ELEMENT_DWORDS) { + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + true, + addr, + len, + true); + fifo_has_data = qed_rd(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_DATA_VALID) > 0; } @@ -4835,7 +4902,7 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn, dwords_read); out: /* Dump last section */ - offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump); + offset += qed_dump_last_section(dump_buf, offset, dump); *num_dumped_dwords = offset; @@ -4849,7 +4916,7 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn, bool dump, u32 *num_dumped_dwords) { - u32 size_param_offset, override_window_dwords, offset = 0; + u32 size_param_offset, override_window_dwords, offset = 0, addr; *num_dumped_dwords = 0; @@ -4875,20 +4942,21 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn, /* Add override window info to buffer */ override_window_dwords = - qed_rd(p_hwfn, p_ptt, - GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) * - PROTECTION_OVERRIDE_ELEMENT_DWORDS; - if (qed_dmae_grc2host(p_hwfn, p_ptt, - GRC_REG_PROTECTION_OVERRIDE_WINDOW, - (u64)(uintptr_t)(dump_buf + offset), - override_window_dwords, 0)) - return DBG_STATUS_DMAE_FAILED; - offset += override_window_dwords; + qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) * + PROTECTION_OVERRIDE_ELEMENT_DWORDS; + addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW); + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + true, + addr, + override_window_dwords, + true); qed_dump_num_param(dump_buf + size_param_offset, dump, "size", override_window_dwords); out: /* Dump last section */ - offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump); + offset += qed_dump_last_section(dump_buf, offset, dump); *num_dumped_dwords = offset; @@ -4952,9 +5020,9 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, next_list_idx_addr = fw_asserts_section_addr + DWORDS_TO_BYTES(asserts->list_next_index_dword_offset); next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr); - last_list_idx = (next_list_idx > 0 - ? next_list_idx - : asserts->list_num_elements) - 1; + last_list_idx = (next_list_idx > 0 ? + next_list_idx : + asserts->list_num_elements) - 1; addr = BYTES_TO_DWORDS(fw_asserts_section_addr) + asserts->list_dword_offset + last_list_idx * asserts->list_element_dword_size; @@ -4967,7 +5035,7 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, } /* Dump last section */ - offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump); + offset += qed_dump_last_section(dump_buf, offset, dump); return offset; } @@ -5596,10 +5664,6 @@ struct igu_fifo_addr_data { #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4 -/********************************* Macros ************************************/ - -#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD) - /***************************** Constant Arrays *******************************/ struct user_dbg_array { @@ -5698,6 +5762,7 @@ static struct block_info s_block_info_arr[] = { {"phy_pcie", BLOCK_PHY_PCIE}, {"led", BLOCK_LED}, {"avs_wrap", BLOCK_AVS_WRAP}, + {"pxpreqbus", BLOCK_PXPREQBUS}, {"misc_aeu", BLOCK_MISC_AEU}, {"bar0_map", BLOCK_BAR0_MAP} }; @@ -5830,8 +5895,8 @@ static const char * const s_status_str[] = { /* DBG_STATUS_MCP_COULD_NOT_RESUME */ "Failed to resume MCP after halt", - /* DBG_STATUS_DMAE_FAILED */ - "DMAE transaction failed", + /* DBG_STATUS_RESERVED2 */ + "Reserved debug status - shouldn't be returned", /* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */ "Failed to empty SEMI sync FIFO", @@ -6109,6 +6174,7 @@ static u32 qed_read_param(u32 *dump_buf, if (*(char_buf + offset++)) { /* String param */ *param_str_val = char_buf + offset; + *param_num_val = 0; offset += strlen(*param_str_val) + 1; if (offset & 0x3) offset += (4 - (offset & 0x3)); @@ -6177,8 +6243,7 @@ static u32 qed_print_section_params(u32 *dump_buf, /* Parses the idle check rules and returns the number of characters printed. * In case of parsing error, returns 0. */ -static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn, - u32 *dump_buf, +static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf, u32 *dump_buf_end, u32 num_rules, bool print_fw_idle_chk, @@ -6322,8 +6387,7 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn, * parsed_results_bytes. * The parsing status is returned. */ -static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn, - u32 *dump_buf, +static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf, u32 num_dumped_dwords, char *results_buf, u32 *parsed_results_bytes, @@ -6375,13 +6439,16 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn, results_offset), "FW_IDLE_CHECK:\n"); rules_print_size = - qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf, - dump_buf_end, num_rules, + qed_parse_idle_chk_dump_rules(dump_buf, + dump_buf_end, + num_rules, true, results_buf ? results_buf + - results_offset : NULL, - num_errors, num_warnings); + results_offset : + NULL, + num_errors, + num_warnings); results_offset += rules_print_size; if (!rules_print_size) return DBG_STATUS_IDLE_CHK_PARSE_FAILED; @@ -6392,13 +6459,16 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn, results_offset), "\nLSI_IDLE_CHECK:\n"); rules_print_size = - qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf, - dump_buf_end, num_rules, + qed_parse_idle_chk_dump_rules(dump_buf, + dump_buf_end, + num_rules, false, results_buf ? results_buf + - results_offset : NULL, - num_errors, num_warnings); + results_offset : + NULL, + num_errors, + num_warnings); results_offset += rules_print_size; if (!rules_print_size) return DBG_STATUS_IDLE_CHK_PARSE_FAILED; @@ -6537,7 +6607,6 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn, */ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, u32 *dump_buf, - u32 num_dumped_dwords, char *results_buf, u32 *parsed_results_bytes) { @@ -6725,9 +6794,7 @@ free_mem: * parsed_results_bytes. * The parsing status is returned. */ -static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, +static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf, char *results_buf, u32 *parsed_results_bytes) { @@ -6834,8 +6901,7 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn, static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element *element, char *results_buf, - u32 *results_offset, - u32 *parsed_results_bytes) + u32 *results_offset) { const struct igu_fifo_addr_data *found_addr = NULL; u8 source, err_type, i, is_cleanup; @@ -6933,9 +6999,9 @@ static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ", prod_cons, update_flag ? "update" : "nop", - en_dis_int_for_sb - ? (en_dis_int_for_sb == 1 ? "disable" : "nop") - : "enable", + en_dis_int_for_sb ? + (en_dis_int_for_sb == 1 ? "disable" : "nop") : + "enable", segment ? "attn" : "regular", timer_mask); } @@ -6969,9 +7035,7 @@ out: * parsed_results_bytes. * The parsing status is returned. */ -static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, +static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf, char *results_buf, u32 *parsed_results_bytes) { @@ -7011,8 +7075,7 @@ static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn, for (i = 0; i < num_elements; i++) { status = qed_parse_igu_fifo_element(&elements[i], results_buf, - &results_offset, - parsed_results_bytes); + &results_offset); if (status != DBG_STATUS_OK) return status; } @@ -7028,9 +7091,7 @@ static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn, } static enum dbg_status -qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, +qed_parse_protection_override_dump(u32 *dump_buf, char *results_buf, u32 *parsed_results_bytes) { @@ -7105,9 +7166,7 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn, * parsed_results_bytes. * The parsing status is returned. */ -static enum dbg_status qed_parse_fw_asserts_dump(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, +static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf, char *results_buf, u32 *parsed_results_bytes) { @@ -7209,8 +7268,7 @@ enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn, { u32 num_errors, num_warnings; - return qed_parse_idle_chk_dump(p_hwfn, - dump_buf, + return qed_parse_idle_chk_dump(dump_buf, num_dumped_dwords, NULL, results_buf_size, @@ -7221,12 +7279,12 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, char *results_buf, - u32 *num_errors, u32 *num_warnings) + u32 *num_errors, + u32 *num_warnings) { u32 parsed_buf_size; - return qed_parse_idle_chk_dump(p_hwfn, - dump_buf, + return qed_parse_idle_chk_dump(dump_buf, num_dumped_dwords, results_buf, &parsed_buf_size, @@ -7245,9 +7303,7 @@ enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, u32 *results_buf_size) { return qed_parse_mcp_trace_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - NULL, results_buf_size); + dump_buf, NULL, results_buf_size); } enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, @@ -7259,7 +7315,6 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, - num_dumped_dwords, results_buf, &parsed_buf_size); } @@ -7268,10 +7323,7 @@ enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn, u32 num_dumped_dwords, u32 *results_buf_size) { - return qed_parse_reg_fifo_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - NULL, results_buf_size); + return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size); } enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn, @@ -7281,10 +7333,7 @@ enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn, { u32 parsed_buf_size; - return qed_parse_reg_fifo_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - results_buf, &parsed_buf_size); + return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size); } enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn, @@ -7292,10 +7341,7 @@ enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn, u32 num_dumped_dwords, u32 *results_buf_size) { - return qed_parse_igu_fifo_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - NULL, results_buf_size); + return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size); } enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn, @@ -7305,10 +7351,7 @@ enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn, { u32 parsed_buf_size; - return qed_parse_igu_fifo_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - results_buf, &parsed_buf_size); + return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size); } enum dbg_status @@ -7317,9 +7360,7 @@ qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn, u32 num_dumped_dwords, u32 *results_buf_size) { - return qed_parse_protection_override_dump(p_hwfn, - dump_buf, - num_dumped_dwords, + return qed_parse_protection_override_dump(dump_buf, NULL, results_buf_size); } @@ -7330,9 +7371,7 @@ enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn, { u32 parsed_buf_size; - return qed_parse_protection_override_dump(p_hwfn, - dump_buf, - num_dumped_dwords, + return qed_parse_protection_override_dump(dump_buf, results_buf, &parsed_buf_size); } @@ -7342,10 +7381,7 @@ enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn, u32 num_dumped_dwords, u32 *results_buf_size) { - return qed_parse_fw_asserts_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - NULL, results_buf_size); + return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size); } enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn, @@ -7355,9 +7391,7 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn, { u32 parsed_buf_size; - return qed_parse_fw_asserts_dump(p_hwfn, - dump_buf, - num_dumped_dwords, + return qed_parse_fw_asserts_dump(dump_buf, results_buf, &parsed_buf_size); } @@ -7386,30 +7420,30 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, /* Go over registers with a non-zero attention status */ for (i = 0; i < num_regs; i++) { + struct dbg_attn_bit_mapping *bit_mapping; struct dbg_attn_reg_result *reg_result; - struct dbg_attn_bit_mapping *mapping; u8 num_reg_attn, bit_idx = 0; reg_result = &results->reg_results[i]; num_reg_attn = GET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_NUM_REG_ATTN); block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES]; - mapping = &((struct dbg_attn_bit_mapping *) - block_attn->ptr)[reg_result->block_attn_offset]; + bit_mapping = &((struct dbg_attn_bit_mapping *) + block_attn->ptr)[reg_result->block_attn_offset]; pstrings = &s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS]; /* Go over attention status bits */ for (j = 0; j < num_reg_attn; j++) { - u16 attn_idx_val = GET_FIELD(mapping[j].data, + u16 attn_idx_val = GET_FIELD(bit_mapping[j].data, DBG_ATTN_BIT_MAPPING_VAL); const char *attn_name, *attn_type_str, *masked_str; - u32 name_offset, sts_addr; + u32 attn_name_offset, sts_addr; /* Check if bit mask should be advanced (due to unused * bits). */ - if (GET_FIELD(mapping[j].data, + if (GET_FIELD(bit_mapping[j].data, DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) { bit_idx += (u8)attn_idx_val; continue; @@ -7422,9 +7456,10 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, } /* Find attention name */ - name_offset = block_attn_name_offsets[attn_idx_val]; + attn_name_offset = + block_attn_name_offsets[attn_idx_val]; attn_name = &((const char *) - pstrings->ptr)[name_offset]; + pstrings->ptr)[attn_name_offset]; attn_type_str = attn_type == ATTN_TYPE_INTERRUPT ? "Interrupt" : "Parity"; masked_str = reg_result->mask_val & BIT(bit_idx) ? diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 58a689fb04db..553a6d17260e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -758,7 +758,7 @@ static void qed_init_qm_info(struct qed_hwfn *p_hwfn) /* This function reconfigures the QM pf on the fly. * For this purpose we: * 1. reconfigure the QM database - * 2. set new values to runtime arrat + * 2. set new values to runtime array * 3. send an sdm_qm_cmd through the rbc interface to stop the QM * 4. activate init tool in QM_PF stage * 5. send an sdm_qm_cmd through rbc interface to release the QM @@ -784,7 +784,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) qed_init_clear_rt_data(p_hwfn); /* prepare QM portion of runtime array */ - qed_qm_init_pf(p_hwfn, p_ptt); + qed_qm_init_pf(p_hwfn, p_ptt, false); /* activate init tool on runtime array */ rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id, @@ -1515,7 +1515,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1); } - /* Protocl Configuration */ + /* Protocol Configuration */ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, (p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0); STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, @@ -1527,6 +1527,11 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, if (rc) return rc; + /* Sanity check before the PF init sequence that uses DMAE */ + rc = qed_dmae_sanity(p_hwfn, p_ptt, "pf_phase"); + if (rc) + return rc; + /* PF Init sequence */ rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode); if (rc) @@ -2192,7 +2197,7 @@ qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) /* No need for a case for QED_CMDQS_CQS since * CNQ/CMDQS are the same resource. */ - resc_max_val = NUM_OF_CMDQS_CQS; + resc_max_val = NUM_OF_GLOBAL_QUEUES; break; case QED_RDMA_STATS_QUEUE: resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 @@ -2267,7 +2272,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn, case QED_RDMA_CNQ_RAM: case QED_CMDQS_CQS: /* CNQ/CMDQS are the same resource */ - *p_resc_num = NUM_OF_CMDQS_CQS / num_funcs; + *p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs; break; case QED_RDMA_STATS_QUEUE: *p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 : diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c index df195c02b711..2dc9b312a795 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c @@ -115,7 +115,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, struct qed_fcoe_pf_params *fcoe_pf_params = NULL; struct fcoe_init_ramrod_params *p_ramrod = NULL; struct fcoe_init_func_ramrod_data *p_data; - struct fcoe_conn_context *p_cxt = NULL; + struct e4_fcoe_conn_context *p_cxt = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; struct qed_cxt_info cxt_info; @@ -167,7 +167,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, } p_cxt = cxt_info.p_cxt; SET_FIELD(p_cxt->tstorm_ag_context.flags3, - TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1); + E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1); fcoe_pf_params->dummy_icid = (u16)dummy_cid; @@ -568,7 +568,7 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn) void qed_fcoe_setup(struct qed_hwfn *p_hwfn) { - struct fcoe_task_context *p_task_ctx = NULL; + struct e4_fcoe_task_context *p_task_ctx = NULL; int rc; u32 i; @@ -580,13 +580,13 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn) if (rc) continue; - memset(p_task_ctx, 0, sizeof(struct fcoe_task_context)); + memset(p_task_ctx, 0, sizeof(struct e4_fcoe_task_context)); SET_FIELD(p_task_ctx->timer_context.logical_client_0, TIMERS_CONTEXT_VALIDLC0, 1); SET_FIELD(p_task_ctx->timer_context.logical_client_1, TIMERS_CONTEXT_VALIDLC1, 1); SET_FIELD(p_task_ctx->tstorm_ag_context.flags0, - TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1); + E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1); } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 3427fe7049b5..de873d770575 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -54,7 +54,7 @@ struct qed_hwfn; struct qed_ptt; -/* opcodes for the event ring */ +/* Opcodes for the event ring */ enum common_event_opcode { COMMON_EVENT_PF_START, COMMON_EVENT_PF_STOP, @@ -82,487 +82,7 @@ enum common_ramrod_cmd_id { MAX_COMMON_RAMROD_CMD_ID }; -/* The core storm context for the Ystorm */ -struct ystorm_core_conn_st_ctx { - __le32 reserved[4]; -}; - -/* The core storm context for the Pstorm */ -struct pstorm_core_conn_st_ctx { - __le32 reserved[4]; -}; - -/* Core Slowpath Connection storm context of Xstorm */ -struct xstorm_core_conn_st_ctx { - __le32 spq_base_lo; - __le32 spq_base_hi; - struct regpair consolid_base_addr; - __le16 spq_cons; - __le16 consolid_cons; - __le32 reserved0[55]; -}; - -struct xstorm_core_conn_ag_ctx { - u8 reserved0; - u8 core_state; - u8 flags0; -#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7 - u8 flags1; -#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 - u8 flags2; -#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6 - u8 flags3; -#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6 - u8 flags4; -#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6 - u8 flags5; -#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6 - u8 flags6; -#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 - u8 flags7; -#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7 - u8 flags8; -#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7 - u8 flags9; -#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7 - u8 flags10; -#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7 - u8 flags11; -#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7 - u8 flags12; -#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7 - u8 flags13; -#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 - u8 flags14; -#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6 - u8 byte2; - __le16 physical_q0; - __le16 consolid_prod; - __le16 reserved16; - __le16 tx_bd_cons; - __le16 tx_bd_or_spq_prod; - __le16 word5; - __le16 conn_dpi; - u8 byte3; - u8 byte4; - u8 byte5; - u8 byte6; - __le32 reg0; - __le32 reg1; - __le32 reg2; - __le32 reg3; - __le32 reg4; - __le32 reg5; - __le32 reg6; - __le16 word7; - __le16 word8; - __le16 word9; - __le16 word10; - __le32 reg7; - __le32 reg8; - __le32 reg9; - u8 byte7; - u8 byte8; - u8 byte9; - u8 byte10; - u8 byte11; - u8 byte12; - u8 byte13; - u8 byte14; - u8 byte15; - u8 e5_reserved; - __le16 word11; - __le32 reg10; - __le32 reg11; - __le32 reg12; - __le32 reg13; - __le32 reg14; - __le32 reg15; - __le32 reg16; - __le32 reg17; - __le32 reg18; - __le32 reg19; - __le16 word12; - __le16 word13; - __le16 word14; - __le16 word15; -}; - -struct tstorm_core_conn_ag_ctx { - u8 byte0; - u8 byte1; - u8 flags0; -#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 -#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ -#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 - u8 flags1; -#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ -#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ -#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ -#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ -#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 - u8 flags2; -#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ -#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ -#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ -#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ -#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 - u8 flags3; -#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ -#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ -#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 -#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ -#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 - u8 flags4; -#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ -#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ -#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 -#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ -#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ -#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 -#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ -#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ -#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ -#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 -#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 - u8 flags5; -#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 - __le32 reg0; - __le32 reg1; - __le32 reg2; - __le32 reg3; - __le32 reg4; - __le32 reg5; - __le32 reg6; - __le32 reg7; - __le32 reg8; - u8 byte2; - u8 byte3; - __le16 word0; - u8 byte4; - u8 byte5; - __le16 word1; - __le16 word2; - __le16 word3; - __le32 reg9; - __le32 reg10; -}; - -struct ustorm_core_conn_ag_ctx { - u8 reserved; - u8 byte1; - u8 flags0; -#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 -#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 -#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 -#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 -#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 -#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 - u8 flags1; -#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 -#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0 -#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 -#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2 -#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 -#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4 -#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 -#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6 - u8 flags2; -#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4 -#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5 -#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6 -#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 - u8 flags3; -#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 - u8 byte2; - u8 byte3; - __le16 word0; - __le16 word1; - __le32 rx_producers; - __le32 reg1; - __le32 reg2; - __le32 reg3; - __le16 word2; - __le16 word3; -}; - -/* The core storm context for the Mstorm */ -struct mstorm_core_conn_st_ctx { - __le32 reserved[24]; -}; - -/* The core storm context for the Ustorm */ -struct ustorm_core_conn_st_ctx { - __le32 reserved[4]; -}; - -/* core connection context */ -struct core_conn_context { - struct ystorm_core_conn_st_ctx ystorm_st_context; - struct regpair ystorm_st_padding[2]; - struct pstorm_core_conn_st_ctx pstorm_st_context; - struct regpair pstorm_st_padding[2]; - struct xstorm_core_conn_st_ctx xstorm_st_context; - struct xstorm_core_conn_ag_ctx xstorm_ag_context; - struct tstorm_core_conn_ag_ctx tstorm_ag_context; - struct ustorm_core_conn_ag_ctx ustorm_ag_context; - struct mstorm_core_conn_st_ctx mstorm_st_context; - struct ustorm_core_conn_st_ctx ustorm_st_context; - struct regpair ustorm_st_padding[2]; -}; - +/* How ll2 should deal with packet upon errors */ enum core_error_handle { LL2_DROP_PACKET, LL2_DO_NOTHING, @@ -570,21 +90,25 @@ enum core_error_handle { MAX_CORE_ERROR_HANDLE }; +/* Opcodes for the event ring */ enum core_event_opcode { CORE_EVENT_TX_QUEUE_START, CORE_EVENT_TX_QUEUE_STOP, CORE_EVENT_RX_QUEUE_START, CORE_EVENT_RX_QUEUE_STOP, CORE_EVENT_RX_QUEUE_FLUSH, + CORE_EVENT_TX_QUEUE_UPDATE, MAX_CORE_EVENT_OPCODE }; +/* The L4 pseudo checksum mode for Core */ enum core_l4_pseudo_checksum_mode { CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH, CORE_L4_PSEUDO_CSUM_ZERO_LENGTH, MAX_CORE_L4_PSEUDO_CHECKSUM_MODE }; +/* Light-L2 RX Producers in Tstorm RAM */ struct core_ll2_port_stats { struct regpair gsi_invalid_hdr; struct regpair gsi_invalid_pkt_length; @@ -592,6 +116,7 @@ struct core_ll2_port_stats { struct regpair gsi_crcchksm_error; }; +/* Ethernet TX Per Queue Stats */ struct core_ll2_pstorm_per_queue_stat { struct regpair sent_ucast_bytes; struct regpair sent_mcast_bytes; @@ -601,6 +126,7 @@ struct core_ll2_pstorm_per_queue_stat { struct regpair sent_bcast_pkts; }; +/* Light-L2 RX Producers in Tstorm RAM */ struct core_ll2_rx_prod { __le16 bd_prod; __le16 cqe_prod; @@ -621,6 +147,7 @@ struct core_ll2_ustorm_per_queue_stat { struct regpair rcv_bcast_pkts; }; +/* Core Ramrod Command IDs (light L2) */ enum core_ramrod_cmd_id { CORE_RAMROD_UNUSED, CORE_RAMROD_RX_QUEUE_START, @@ -628,53 +155,64 @@ enum core_ramrod_cmd_id { CORE_RAMROD_RX_QUEUE_STOP, CORE_RAMROD_TX_QUEUE_STOP, CORE_RAMROD_RX_QUEUE_FLUSH, + CORE_RAMROD_TX_QUEUE_UPDATE, MAX_CORE_RAMROD_CMD_ID }; +/* Core RX CQE Type for Light L2 */ enum core_roce_flavor_type { CORE_ROCE, CORE_RROCE, MAX_CORE_ROCE_FLAVOR_TYPE }; +/* Specifies how ll2 should deal with packets errors: packet_too_big and + * no_buff. + */ struct core_rx_action_on_error { u8 error_type; #define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK 0x3 -#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0 -#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3 -#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2 -#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF -#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4 +#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0 +#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3 +#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2 +#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF +#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4 }; +/* Core RX BD for Light L2 */ struct core_rx_bd { struct regpair addr; __le16 reserved[4]; }; +/* Core RX CM offload BD for Light L2 */ struct core_rx_bd_with_buff_len { struct regpair addr; __le16 buff_length; __le16 reserved[3]; }; +/* Core RX CM offload BD for Light L2 */ union core_rx_bd_union { struct core_rx_bd rx_bd; struct core_rx_bd_with_buff_len rx_bd_with_len; }; +/* Opaque Data for Light L2 RX CQE */ struct core_rx_cqe_opaque_data { __le32 data[2]; }; +/* Core RX CQE Type for Light L2 */ enum core_rx_cqe_type { - CORE_RX_CQE_ILLIGAL_TYPE, + CORE_RX_CQE_ILLEGAL_TYPE, CORE_RX_CQE_TYPE_REGULAR, CORE_RX_CQE_TYPE_GSI_OFFLOAD, CORE_RX_CQE_TYPE_SLOW_PATH, MAX_CORE_RX_CQE_TYPE }; +/* Core RX CQE for Light L2 */ struct core_rx_fast_path_cqe { u8 type; u8 placement_offset; @@ -687,6 +225,7 @@ struct core_rx_fast_path_cqe { __le32 reserved1[3]; }; +/* Core Rx CM offload CQE */ struct core_rx_gsi_offload_cqe { u8 type; u8 data_length_error; @@ -696,9 +235,11 @@ struct core_rx_gsi_offload_cqe { __le32 src_mac_addrhi; __le16 src_mac_addrlo; __le16 qp_id; - __le32 gid_dst[4]; + __le32 src_qp; + __le32 reserved[3]; }; +/* Core RX CQE for Light L2 */ struct core_rx_slow_path_cqe { u8 type; u8 ramrod_cmd_id; @@ -707,12 +248,14 @@ struct core_rx_slow_path_cqe { __le32 reserved1[5]; }; +/* Core RX CM offload BD for Light L2 */ union core_rx_cqe_union { struct core_rx_fast_path_cqe rx_cqe_fp; struct core_rx_gsi_offload_cqe rx_cqe_gsi; struct core_rx_slow_path_cqe rx_cqe_sp; }; +/* Ramrod data for rx queue start ramrod */ struct core_rx_start_ramrod_data { struct regpair bd_base; struct regpair cqe_pbl_addr; @@ -723,16 +266,18 @@ struct core_rx_start_ramrod_data { u8 complete_event_flg; u8 drop_ttl0_flg; __le16 num_of_pbl_pages; - u8 inner_vlan_removal_en; + u8 inner_vlan_stripping_en; + u8 report_outer_vlan; u8 queue_id; u8 main_func_queue; u8 mf_si_bcast_accept_all; u8 mf_si_mcast_accept_all; struct core_rx_action_on_error action_on_error; u8 gsi_offload_flag; - u8 reserved[7]; + u8 reserved[6]; }; +/* Ramrod data for rx queue stop ramrod */ struct core_rx_stop_ramrod_data { u8 complete_cqe_flg; u8 complete_event_flg; @@ -741,46 +286,51 @@ struct core_rx_stop_ramrod_data { __le16 reserved2[2]; }; +/* Flags for Core TX BD */ struct core_tx_bd_data { __le16 as_bitfield; -#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1 -#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0 -#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1 -#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1 -#define CORE_TX_BD_DATA_START_BD_MASK 0x1 -#define CORE_TX_BD_DATA_START_BD_SHIFT 2 -#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1 -#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3 -#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1 -#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4 -#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1 -#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5 -#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1 -#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6 +#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1 +#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0 +#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1 +#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1 +#define CORE_TX_BD_DATA_START_BD_MASK 0x1 +#define CORE_TX_BD_DATA_START_BD_SHIFT 2 +#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1 +#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3 +#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1 +#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4 +#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1 +#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5 +#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1 +#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6 #define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1 -#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7 -#define CORE_TX_BD_DATA_NBDS_MASK 0xF -#define CORE_TX_BD_DATA_NBDS_SHIFT 8 -#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1 -#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12 -#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1 -#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13 -#define CORE_TX_BD_DATA_RESERVED0_MASK 0x3 -#define CORE_TX_BD_DATA_RESERVED0_SHIFT 14 -}; - +#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7 +#define CORE_TX_BD_DATA_NBDS_MASK 0xF +#define CORE_TX_BD_DATA_NBDS_SHIFT 8 +#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1 +#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12 +#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1 +#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13 +#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_MASK 0x1 +#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_SHIFT 14 +#define CORE_TX_BD_DATA_RESERVED0_MASK 0x1 +#define CORE_TX_BD_DATA_RESERVED0_SHIFT 15 +}; + +/* Core TX BD for Light L2 */ struct core_tx_bd { struct regpair addr; __le16 nbytes; __le16 nw_vlan_or_lb_echo; struct core_tx_bd_data bd_data; __le16 bitfield1; -#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF -#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0 -#define CORE_TX_BD_TX_DST_MASK 0x3 -#define CORE_TX_BD_TX_DST_SHIFT 14 +#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF +#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0 +#define CORE_TX_BD_TX_DST_MASK 0x3 +#define CORE_TX_BD_TX_DST_SHIFT 14 }; +/* Light L2 TX Destination */ enum core_tx_dest { CORE_TX_DEST_NW, CORE_TX_DEST_LB, @@ -789,6 +339,7 @@ enum core_tx_dest { MAX_CORE_TX_DEST }; +/* Ramrod data for tx queue start ramrod */ struct core_tx_start_ramrod_data { struct regpair pbl_base_addr; __le16 mtu; @@ -803,10 +354,20 @@ struct core_tx_start_ramrod_data { u8 resrved[3]; }; +/* Ramrod data for tx queue stop ramrod */ struct core_tx_stop_ramrod_data { __le32 reserved0[2]; }; +/* Ramrod data for tx queue update ramrod */ +struct core_tx_update_ramrod_data { + u8 update_qm_pq_id_flg; + u8 reserved0; + __le16 qm_pq_id; + __le32 reserved1[1]; +}; + +/* Enum flag for what type of dcb data to update */ enum dcb_dscp_update_mode { DONT_UPDATE_DCB_DSCP, UPDATE_DCB, @@ -815,6 +376,487 @@ enum dcb_dscp_update_mode { MAX_DCB_DSCP_UPDATE_MODE }; +/* The core storm context for the Ystorm */ +struct ystorm_core_conn_st_ctx { + __le32 reserved[4]; +}; + +/* The core storm context for the Pstorm */ +struct pstorm_core_conn_st_ctx { + __le32 reserved[4]; +}; + +/* Core Slowpath Connection storm context of Xstorm */ +struct xstorm_core_conn_st_ctx { + __le32 spq_base_lo; + __le32 spq_base_hi; + struct regpair consolid_base_addr; + __le16 spq_cons; + __le16 consolid_cons; + __le32 reserved0[55]; +}; + +struct e4_xstorm_core_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7 + u8 flags1; +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5 +#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 + u8 flags2; +#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6 + u8 flags3; +#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2 +#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 + u8 flags7; +#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5 +#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7 + u8 flags10; +#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7 + u8 flags11; +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 consolid_prod; + __le16 reserved16; + __le16 tx_bd_cons; + __le16 tx_bd_or_spq_prod; + __le16 word5; + __le16 conn_dpi; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le16 word7; + __le16 word8; + __le16 word9; + __le16 word10; + __le32 reg7; + __le32 reg8; + __le32 reg9; + u8 byte7; + u8 byte8; + u8 byte9; + u8 byte10; + u8 byte11; + u8 byte12; + u8 byte13; + u8 byte14; + u8 byte15; + u8 e5_reserved; + __le16 word11; + __le32 reg10; + __le32 reg11; + __le32 reg12; + __le32 reg13; + __le32 reg14; + __le32 reg15; + __le32 reg16; + __le32 reg17; + __le32 reg18; + __le32 reg19; + __le16 word12; + __le16 word13; + __le16 word14; + __le16 word15; +}; + +struct e4_tstorm_core_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 +#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 +#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 +#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 + u8 flags2; +#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 + u8 flags4; +#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le32 reg7; + __le32 reg8; + u8 byte2; + u8 byte3; + __le16 word0; + u8 byte4; + u8 byte5; + __le16 word1; + __le16 word2; + __le16 word3; + __le32 reg9; + __le32 reg10; +}; + +struct e4_ustorm_core_conn_ag_ctx { + u8 reserved; + u8 byte1; + u8 flags0; +#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define E4_USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0 +#define E4_USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2 +#define E4_USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4 +#define E4_USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3 +#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4 +#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5 +#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le16 word1; + __le32 rx_producers; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le16 word2; + __le16 word3; +}; + +/* The core storm context for the Mstorm */ +struct mstorm_core_conn_st_ctx { + __le32 reserved[24]; +}; + +/* The core storm context for the Ustorm */ +struct ustorm_core_conn_st_ctx { + __le32 reserved[4]; +}; + +/* core connection context */ +struct e4_core_conn_context { + struct ystorm_core_conn_st_ctx ystorm_st_context; + struct regpair ystorm_st_padding[2]; + struct pstorm_core_conn_st_ctx pstorm_st_context; + struct regpair pstorm_st_padding[2]; + struct xstorm_core_conn_st_ctx xstorm_st_context; + struct e4_xstorm_core_conn_ag_ctx xstorm_ag_context; + struct e4_tstorm_core_conn_ag_ctx tstorm_ag_context; + struct e4_ustorm_core_conn_ag_ctx ustorm_ag_context; + struct mstorm_core_conn_st_ctx mstorm_st_context; + struct ustorm_core_conn_st_ctx ustorm_st_context; + struct regpair ustorm_st_padding[2]; +}; + struct eth_mstorm_per_pf_stat { struct regpair gre_discard_pkts; struct regpair vxlan_discard_pkts; @@ -896,6 +938,50 @@ struct eth_ustorm_per_queue_stat { struct regpair rcv_bcast_pkts; }; +/* Event Ring VF-PF Channel data */ +struct vf_pf_channel_eqe_data { + struct regpair msg_addr; +}; + +/* Event Ring malicious VF data */ +struct malicious_vf_eqe_data { + u8 vf_id; + u8 err_id; + __le16 reserved[3]; +}; + +/* Event Ring initial cleanup data */ +struct initial_cleanup_eqe_data { + u8 vf_id; + u8 reserved[7]; +}; + +/* Event Data Union */ +union event_ring_data { + u8 bytes[8]; + struct vf_pf_channel_eqe_data vf_pf_channel; + struct iscsi_eqe_data iscsi_info; + struct iscsi_connect_done_results iscsi_conn_done_info; + union rdma_eqe_data rdma_data; + struct malicious_vf_eqe_data malicious_vf; + struct initial_cleanup_eqe_data vf_init_cleanup; +}; + +/* Event Ring Entry */ +struct event_ring_entry { + u8 protocol_id; + u8 opcode; + __le16 reserved0; + __le16 echo; + u8 fw_return_code; + u8 flags; +#define EVENT_RING_ENTRY_ASYNC_MASK 0x1 +#define EVENT_RING_ENTRY_ASYNC_SHIFT 0 +#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F +#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1 + union event_ring_data data; +}; + /* Event Ring Next Page Address */ struct event_ring_next_addr { struct regpair addr; @@ -908,12 +994,21 @@ union event_ring_element { struct event_ring_next_addr next_addr; }; +/* Ports mode */ enum fw_flow_ctrl_mode { flow_ctrl_pause, flow_ctrl_pfc, MAX_FW_FLOW_CTRL_MODE }; +/* GFT profile type */ +enum gft_profile_type { + GFT_PROFILE_TYPE_4_TUPLE, + GFT_PROFILE_TYPE_L4_DST_PORT, + GFT_PROFILE_TYPE_IP_DST_PORT, + MAX_GFT_PROFILE_TYPE +}; + /* Major and Minor hsi Versions */ struct hsi_fp_ver_struct { u8 minor_ver_arr[2]; @@ -921,14 +1016,14 @@ struct hsi_fp_ver_struct { }; enum iwarp_ll2_tx_queues { - IWARP_LL2_IN_ORDER_TX_QUEUE = 1, + IWARP_LL2_IN_ORDER_TX_QUEUE = 1, IWARP_LL2_ALIGNED_TX_QUEUE, IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE, IWARP_LL2_ERROR, MAX_IWARP_LL2_TX_QUEUES }; -/* Mstorm non-triggering VF zone */ +/* Malicious VF error ID */ enum malicious_vf_error_id { MALICIOUS_VF_NO_ERROR, VF_PF_CHANNEL_NOT_READY, @@ -951,9 +1046,11 @@ enum malicious_vf_error_id { ETH_TUNN_IPV6_EXT_NBD_ERR, ETH_CONTROL_PACKET_VIOLATION, ETH_ANTI_SPOOFING_ERR, + ETH_PACKET_SIZE_TOO_LARGE, MAX_MALICIOUS_VF_ERROR_ID }; +/* Mstorm non-triggering VF zone */ struct mstorm_non_trigger_vf_zone { struct eth_mstorm_per_queue_stat eth_queue_stat; struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD]; @@ -962,7 +1059,21 @@ struct mstorm_non_trigger_vf_zone { /* Mstorm VF zone */ struct mstorm_vf_zone { struct mstorm_non_trigger_vf_zone non_trigger; +}; + +/* vlan header including TPID and TCI fields */ +struct vlan_header { + __le16 tpid; + __le16 tci; +}; +/* outer tag configurations */ +struct outer_tag_config_struct { + u8 enable_stag_pri_change; + u8 pri_map_valid; + u8 reserved[2]; + struct vlan_header outer_tag; + u8 inner_to_outer_pri_map[8]; }; /* personality per PF */ @@ -974,7 +1085,7 @@ enum personality_type { PERSONALITY_RDMA, PERSONALITY_CORE, PERSONALITY_ETH, - PERSONALITY_RESERVED4, + PERSONALITY_RESERVED, MAX_PERSONALITY_TYPE }; @@ -997,7 +1108,6 @@ struct pf_start_ramrod_data { struct regpair event_ring_pbl_addr; struct regpair consolid_q_pbl_addr; struct pf_start_tunnel_config tunnel_config; - __le32 reserved; __le16 event_ring_sb_id; u8 base_vf_id; u8 num_vfs; @@ -1011,21 +1121,22 @@ struct pf_start_ramrod_data { u8 mf_mode; u8 integ_phase; u8 allow_npar_tx_switching; - u8 inner_to_outer_pri_map[8]; - u8 pri_map_valid; - __le32 outer_tag; + u8 reserved0; struct hsi_fp_ver_struct hsi_fp_ver; + struct outer_tag_config_struct outer_tag_config; }; +/* Data for port update ramrod */ struct protocol_dcb_data { u8 dcb_enable_flag; - u8 reserved_a; + u8 dscp_enable_flag; u8 dcb_priority; u8 dcb_tc; - u8 reserved_b; + u8 dscp_val; u8 reserved0; }; +/* Update tunnel configuration */ struct pf_update_tunnel_config { u8 update_rx_pf_clss; u8 update_rx_def_ucast_clss; @@ -1042,8 +1153,8 @@ struct pf_update_tunnel_config { __le16 reserved; }; +/* Data for port update ramrod */ struct pf_update_ramrod_data { - u8 pf_id; u8 update_eth_dcb_data_mode; u8 update_fcoe_dcb_data_mode; u8 update_iscsi_dcb_data_mode; @@ -1051,6 +1162,7 @@ struct pf_update_ramrod_data { u8 update_rroce_dcb_data_mode; u8 update_iwarp_dcb_data_mode; u8 update_mf_vlan_flag; + u8 update_enable_stag_pri_change; struct protocol_dcb_data eth_dcb_data; struct protocol_dcb_data fcoe_dcb_data; struct protocol_dcb_data iscsi_dcb_data; @@ -1058,7 +1170,8 @@ struct pf_update_ramrod_data { struct protocol_dcb_data rroce_dcb_data; struct protocol_dcb_data iwarp_dcb_data; __le16 mf_vlan; - __le16 reserved; + u8 enable_stag_pri_change; + u8 reserved; struct pf_update_tunnel_config tunnel_config; }; @@ -1079,11 +1192,13 @@ enum protocol_version_array_key { MAX_PROTOCOL_VERSION_ARRAY_KEY }; +/* RDMA TX Stats */ struct rdma_sent_stats { struct regpair sent_bytes; struct regpair sent_pkts; }; +/* Pstorm non-triggering VF zone */ struct pstorm_non_trigger_vf_zone { struct eth_pstorm_per_queue_stat eth_queue_stat; struct rdma_sent_stats rdma_stats; @@ -1103,11 +1218,34 @@ struct ramrod_header { __le16 echo; }; +/* RDMA RX Stats */ struct rdma_rcv_stats { struct regpair rcv_bytes; struct regpair rcv_pkts; }; +/* Data for update QCN/DCQCN RL ramrod */ +struct rl_update_ramrod_data { + u8 qcn_update_param_flg; + u8 dcqcn_update_param_flg; + u8 rl_init_flg; + u8 rl_start_flg; + u8 rl_stop_flg; + u8 rl_id_first; + u8 rl_id_last; + u8 rl_dc_qcn_flg; + __le32 rl_bc_rate; + __le16 rl_max_rate; + __le16 rl_r_ai; + __le16 rl_r_hai; + __le16 dcqcn_g; + __le32 dcqcn_k_us; + __le32 dcqcn_timeuot_us; + __le32 qcn_timeuot_us; + __le32 reserved[2]; +}; + +/* Slowpath Element (SPQE) */ struct slow_path_element { struct ramrod_header hdr; struct regpair data_ptr; @@ -1130,11 +1268,12 @@ struct tstorm_per_port_stat { struct regpair roce_irregular_pkt; struct regpair iwarp_irregular_pkt; struct regpair eth_irregular_pkt; - struct regpair reserved1; + struct regpair toe_irregular_pkt; struct regpair preroce_irregular_pkt; struct regpair eth_gre_tunn_filter_discard; struct regpair eth_vxlan_tunn_filter_discard; struct regpair eth_geneve_tunn_filter_discard; + struct regpair eth_gft_drop_pkt; }; /* Tstorm VF zone */ @@ -1197,6 +1336,7 @@ struct vf_stop_ramrod_data { __le32 reserved2; }; +/* VF zone size mode */ enum vf_zone_size_mode { VF_ZONE_SIZE_MODE_DEFAULT, VF_ZONE_SIZE_MODE_DOUBLE, @@ -1204,6 +1344,7 @@ enum vf_zone_size_mode { MAX_VF_ZONE_SIZE_MODE }; +/* Attentions status block */ struct atten_status_block { __le32 atten_bits; __le32 atten_ack; @@ -1212,12 +1353,6 @@ struct atten_status_block { __le32 reserved1; }; -enum command_type_bit { - IGU_COMMAND_TYPE_NOP = 0, - IGU_COMMAND_TYPE_SET = 1, - MAX_COMMAND_TYPE_BIT -}; - /* DMAE command */ struct dmae_cmd { __le32 opcode; @@ -1327,74 +1462,74 @@ enum dmae_cmd_src_enum { MAX_DMAE_CMD_SRC_ENUM }; -struct mstorm_core_conn_ag_ctx { +struct e4_mstorm_core_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 -#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 -#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 -#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 -#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 -#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; __le32 reg1; }; -struct ystorm_core_conn_ag_ctx { +struct e4_ystorm_core_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 -#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 -#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 -#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 -#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 -#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -1545,22 +1680,22 @@ struct qm_rf_opportunistic_mask { }; /* QM hardware structure of QM map memory */ -struct qm_rf_pq_map { +struct qm_rf_pq_map_e4 { __le32 reg; -#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 -#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0 -#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF -#define QM_RF_PQ_MAP_RL_ID_SHIFT 1 -#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF -#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9 -#define QM_RF_PQ_MAP_VOQ_MASK 0x1F -#define QM_RF_PQ_MAP_VOQ_SHIFT 18 -#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 -#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23 -#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 -#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25 -#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F -#define QM_RF_PQ_MAP_RESERVED_SHIFT 26 +#define QM_RF_PQ_MAP_E4_PQ_VALID_MASK 0x1 +#define QM_RF_PQ_MAP_E4_PQ_VALID_SHIFT 0 +#define QM_RF_PQ_MAP_E4_RL_ID_MASK 0xFF +#define QM_RF_PQ_MAP_E4_RL_ID_SHIFT 1 +#define QM_RF_PQ_MAP_E4_VP_PQ_ID_MASK 0x1FF +#define QM_RF_PQ_MAP_E4_VP_PQ_ID_SHIFT 9 +#define QM_RF_PQ_MAP_E4_VOQ_MASK 0x1F +#define QM_RF_PQ_MAP_E4_VOQ_SHIFT 18 +#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_MASK 0x3 +#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_SHIFT 23 +#define QM_RF_PQ_MAP_E4_RL_VALID_MASK 0x1 +#define QM_RF_PQ_MAP_E4_RL_VALID_SHIFT 25 +#define QM_RF_PQ_MAP_E4_RESERVED_MASK 0x3F +#define QM_RF_PQ_MAP_E4_RESERVED_SHIFT 26 }; /* Completion params for aggregated interrupt completion */ @@ -1643,8 +1778,8 @@ enum block_addr { GRCBASE_MULD = 0x4e0000, GRCBASE_YULD = 0x4c8000, GRCBASE_XYLD = 0x4c0000, - GRCBASE_PTLD = 0x590000, - GRCBASE_YPLD = 0x5b0000, + GRCBASE_PTLD = 0x5a0000, + GRCBASE_YPLD = 0x5c0000, GRCBASE_PRM = 0x230000, GRCBASE_PBF_PB1 = 0xda0000, GRCBASE_PBF_PB2 = 0xda4000, @@ -1675,6 +1810,7 @@ enum block_addr { GRCBASE_PHY_PCIE = 0x620000, GRCBASE_LED = 0x6b8000, GRCBASE_AVS_WRAP = 0x6b0000, + GRCBASE_PXPREQBUS = 0x56000, GRCBASE_MISC_AEU = 0x8000, GRCBASE_BAR0_MAP = 0x1c00000, MAX_BLOCK_ADDR @@ -1766,6 +1902,7 @@ enum block_id { BLOCK_PHY_PCIE, BLOCK_LED, BLOCK_AVS_WRAP, + BLOCK_PXPREQBUS, BLOCK_MISC_AEU, BLOCK_BAR0_MAP, MAX_BLOCK_ID @@ -1841,7 +1978,7 @@ struct dbg_attn_block_result { struct dbg_attn_reg_result reg_results[15]; }; -/* mode header */ +/* Mode header */ struct dbg_mode_hdr { __le16 data; #define DBG_MODE_HDR_EVAL_MODE_MASK 0x1 @@ -1863,80 +2000,83 @@ struct dbg_attn_reg { __le32 mask_address; }; -/* attention types */ +/* Attention types */ enum dbg_attn_type { ATTN_TYPE_INTERRUPT, ATTN_TYPE_PARITY, MAX_DBG_ATTN_TYPE }; +/* Debug Bus block data */ struct dbg_bus_block { u8 num_of_lines; u8 has_latency_events; __le16 lines_offset; }; +/* Debug Bus block user data */ struct dbg_bus_block_user_data { u8 num_of_lines; u8 has_latency_events; __le16 names_offset; }; +/* Block Debug line data */ struct dbg_bus_line { u8 data; -#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF -#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0 -#define DBG_BUS_LINE_IS_256B_MASK 0x1 -#define DBG_BUS_LINE_IS_256B_SHIFT 4 -#define DBG_BUS_LINE_RESERVED_MASK 0x7 -#define DBG_BUS_LINE_RESERVED_SHIFT 5 +#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF +#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0 +#define DBG_BUS_LINE_IS_256B_MASK 0x1 +#define DBG_BUS_LINE_IS_256B_SHIFT 4 +#define DBG_BUS_LINE_RESERVED_MASK 0x7 +#define DBG_BUS_LINE_RESERVED_SHIFT 5 u8 group_sizes; }; -/* condition header for registers dump */ +/* Condition header for registers dump */ struct dbg_dump_cond_hdr { struct dbg_mode_hdr mode; /* Mode header */ u8 block_id; /* block ID */ u8 data_size; /* size in dwords of the data following this header */ }; -/* memory data for registers dump */ +/* Memory data for registers dump */ struct dbg_dump_mem { __le32 dword0; -#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF -#define DBG_DUMP_MEM_ADDRESS_SHIFT 0 -#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF -#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24 +#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF +#define DBG_DUMP_MEM_ADDRESS_SHIFT 0 +#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF +#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24 __le32 dword1; -#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF -#define DBG_DUMP_MEM_LENGTH_SHIFT 0 -#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1 -#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24 -#define DBG_DUMP_MEM_RESERVED_MASK 0x7F -#define DBG_DUMP_MEM_RESERVED_SHIFT 25 +#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF +#define DBG_DUMP_MEM_LENGTH_SHIFT 0 +#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1 +#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24 +#define DBG_DUMP_MEM_RESERVED_MASK 0x7F +#define DBG_DUMP_MEM_RESERVED_SHIFT 25 }; -/* register data for registers dump */ +/* Register data for registers dump */ struct dbg_dump_reg { __le32 data; -#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF /* register address (in dwords) */ -#define DBG_DUMP_REG_ADDRESS_SHIFT 0 -#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1 /* indicates register is wide-bus */ -#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23 -#define DBG_DUMP_REG_LENGTH_MASK 0xFF /* register size (in dwords) */ -#define DBG_DUMP_REG_LENGTH_SHIFT 24 +#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF +#define DBG_DUMP_REG_ADDRESS_SHIFT 0 +#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1 +#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23 +#define DBG_DUMP_REG_LENGTH_MASK 0xFF +#define DBG_DUMP_REG_LENGTH_SHIFT 24 }; -/* split header for registers dump */ +/* Split header for registers dump */ struct dbg_dump_split_hdr { __le32 hdr; -#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF -#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0 -#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF -#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24 +#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF +#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0 +#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF +#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24 }; -/* condition header for idle check */ +/* Condition header for idle check */ struct dbg_idle_chk_cond_hdr { struct dbg_mode_hdr mode; /* Mode header */ __le16 data_size; /* size in dwords of the data following this header */ @@ -1945,12 +2085,12 @@ struct dbg_idle_chk_cond_hdr { /* Idle Check condition register */ struct dbg_idle_chk_cond_reg { __le32 data; -#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF -#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0 -#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1 -#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23 -#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF -#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24 +#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF +#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0 +#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1 +#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23 +#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF +#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24 __le16 num_entries; u8 entry_size; u8 start_entry; @@ -1959,12 +2099,12 @@ struct dbg_idle_chk_cond_reg { /* Idle Check info register */ struct dbg_idle_chk_info_reg { __le32 data; -#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF -#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0 -#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1 -#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23 -#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF -#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24 +#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF +#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0 +#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1 +#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23 +#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF +#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24 __le16 size; /* register size in dwords */ struct dbg_mode_hdr mode; /* Mode header */ }; @@ -2016,13 +2156,13 @@ struct dbg_idle_chk_rule { /* Idle Check rule parsing data */ struct dbg_idle_chk_rule_parsing_data { __le32 data; -#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1 -#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0 -#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF -#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1 +#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1 +#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0 +#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF +#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1 }; -/* idle check severity types */ +/* Idle check severity types */ enum dbg_idle_chk_severity_types { /* idle check failure should cause an error */ IDLE_CHK_SEVERITY_ERROR, @@ -2036,14 +2176,14 @@ enum dbg_idle_chk_severity_types { /* Debug Bus block data */ struct dbg_bus_block_data { __le16 data; -#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK 0xF -#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT 0 -#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK 0xF -#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT 4 -#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK 0xF -#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT 8 -#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK 0xF -#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT 12 +#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK 0xF +#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT 0 +#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK 0xF +#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT 4 +#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK 0xF +#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT 8 +#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK 0xF +#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT 12 u8 line_num; u8 hw_id; }; @@ -2072,6 +2212,7 @@ enum dbg_bus_clients { MAX_DBG_BUS_CLIENTS }; +/* Debug Bus constraint operation types */ enum dbg_bus_constraint_ops { DBG_BUS_CONSTRAINT_OP_EQ, DBG_BUS_CONSTRAINT_OP_NE, @@ -2086,12 +2227,13 @@ enum dbg_bus_constraint_ops { MAX_DBG_BUS_CONSTRAINT_OPS }; +/* Debug Bus trigger state data */ struct dbg_bus_trigger_state_data { u8 data; -#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK 0xF -#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT 0 -#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK 0xF -#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT 4 +#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK 0xF +#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT 0 +#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK 0xF +#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT 4 }; /* Debug Bus memory address */ @@ -2165,6 +2307,7 @@ struct dbg_bus_data { struct dbg_bus_storm_data storms[6]; }; +/* Debug bus filter types */ enum dbg_bus_filter_types { DBG_BUS_FILTER_TYPE_OFF, DBG_BUS_FILTER_TYPE_PRE, @@ -2181,6 +2324,7 @@ enum dbg_bus_frame_modes { MAX_DBG_BUS_FRAME_MODES }; +/* Debug bus other engine mode */ enum dbg_bus_other_engine_modes { DBG_BUS_OTHER_ENGINE_MODE_NONE, DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX, @@ -2190,12 +2334,14 @@ enum dbg_bus_other_engine_modes { MAX_DBG_BUS_OTHER_ENGINE_MODES }; +/* Debug bus post-trigger recording types */ enum dbg_bus_post_trigger_types { DBG_BUS_POST_TRIGGER_RECORD, DBG_BUS_POST_TRIGGER_DROP, MAX_DBG_BUS_POST_TRIGGER_TYPES }; +/* Debug bus pre-trigger recording types */ enum dbg_bus_pre_trigger_types { DBG_BUS_PRE_TRIGGER_START_FROM_ZERO, DBG_BUS_PRE_TRIGGER_NUM_CHUNKS, @@ -2203,11 +2349,10 @@ enum dbg_bus_pre_trigger_types { MAX_DBG_BUS_PRE_TRIGGER_TYPES }; +/* Debug bus SEMI frame modes */ enum dbg_bus_semi_frame_modes { - DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = - 0, - DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = - 3, + DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0, + DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3, MAX_DBG_BUS_SEMI_FRAME_MODES }; @@ -2220,6 +2365,7 @@ enum dbg_bus_states { MAX_DBG_BUS_STATES }; +/* Debug Bus Storm modes */ enum dbg_bus_storm_modes { DBG_BUS_STORM_MODE_PRINTF, DBG_BUS_STORM_MODE_PRAM_ADDR, @@ -2352,7 +2498,7 @@ enum dbg_status { DBG_STATUS_MCP_TRACE_NO_META, DBG_STATUS_MCP_COULD_NOT_HALT, DBG_STATUS_MCP_COULD_NOT_RESUME, - DBG_STATUS_DMAE_FAILED, + DBG_STATUS_RESERVED2, DBG_STATUS_SEMI_FIFO_NOT_EMPTY, DBG_STATUS_IGU_FIFO_BAD_DATA, DBG_STATUS_MCP_COULD_NOT_MASK_PRTY, @@ -2396,7 +2542,8 @@ struct dbg_tools_data { u8 chip_id; u8 platform_id; u8 initialized; - u8 reserved; + u8 use_dmae; + __le32 num_regs_read; }; /********************************/ @@ -2406,6 +2553,7 @@ struct dbg_tools_data { /* Number of VLAN priorities */ #define NUM_OF_VLAN_PRIORITIES 8 +/* BRB RAM init requirements */ struct init_brb_ram_req { __le32 guranteed_per_tc; __le32 headroom_per_tc; @@ -2414,17 +2562,20 @@ struct init_brb_ram_req { u8 num_active_tcs[MAX_NUM_PORTS]; }; +/* ETS per-TC init requirements */ struct init_ets_tc_req { u8 use_sp; u8 use_wfq; __le16 weight; }; +/* ETS init requirements */ struct init_ets_req { __le32 mtu; struct init_ets_tc_req tc_req[NUM_OF_TCS]; }; +/* NIG LB RL init requirements */ struct init_nig_lb_rl_req { __le16 lb_mac_rate; __le16 lb_rate; @@ -2432,15 +2583,18 @@ struct init_nig_lb_rl_req { __le16 tc_rate[NUM_OF_PHYS_TCS]; }; +/* NIG TC mapping for each priority */ struct init_nig_pri_tc_map_entry { u8 tc_id; u8 valid; }; +/* NIG priority to TC map init requirements */ struct init_nig_pri_tc_map_req { struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES]; }; +/* QM per-port init parameters */ struct init_qm_port_params { u8 active; u8 active_phys_tcs; @@ -2563,7 +2717,7 @@ struct bin_buffer_hdr { __le32 length; }; -/* binary init buffer types */ +/* Binary init buffer types */ enum bin_init_buffer_type { BIN_BUF_INIT_FW_VER_INFO, BIN_BUF_INIT_CMD, @@ -2793,6 +2947,7 @@ struct iro { }; /***************************** Public Functions *******************************/ + /** * @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug * arrays. @@ -2802,6 +2957,18 @@ struct iro { enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr); /** + * @brief qed_read_regs - Reads registers into a buffer (using GRC). + * + * @param p_hwfn - HW device data + * @param p_ptt - Ptt window used for writing the registers. + * @param buf - Destination buffer. + * @param addr - Source GRC address in dwords. + * @param len - Number of registers to read. + */ +void qed_read_regs(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len); + +/** * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their * default value. * @@ -3119,6 +3286,7 @@ enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn, #define MAX_NAME_LEN 16 /***************************** Public Functions *******************************/ + /** * @brief qed_dbg_user_set_bin_ptr - Sets a pointer to the binary data with * debug arrays. @@ -3172,6 +3340,18 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, u32 *num_warnings); /** + * @brief qed_dbg_mcp_trace_set_meta_data - Sets a pointer to the MCP Trace + * meta data. + * + * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to + * no NVRAM access). + * + * @param data - pointer to MCP Trace meta data + * @param size - size of MCP Trace meta data in dwords + */ +void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size); + +/** * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size * for MCP Trace results (in bytes). * @@ -3607,6 +3787,9 @@ static const u32 dbg_bus_blocks[] = { 0x00000000, /* bar0_map, bb, 0 lines */ 0x00000000, /* bar0_map, k2, 0 lines */ 0x00000000, + 0x00000000, /* bar0_map, bb, 0 lines */ + 0x00000000, /* bar0_map, k2, 0 lines */ + 0x00000000, }; /* Win 2 */ @@ -3645,7 +3828,6 @@ static const u32 dbg_bus_blocks[] = { * Returns the required host memory size in 4KB units. * Must be called before all QM init HSI functions. * - * @param pf_id - physical function ID * @param num_pf_cids - number of connections used by this PF * @param num_vf_cids - number of connections used by VFs of this PF * @param num_tids - number of tasks used by this PF @@ -3654,8 +3836,7 @@ static const u32 dbg_bus_blocks[] = { * * @return The required host memory size in 4KB units. */ -u32 qed_qm_pf_mem_size(u8 pf_id, - u32 num_pf_cids, +u32 qed_qm_pf_mem_size(u32 num_pf_cids, u32 num_vf_cids, u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs); @@ -3676,7 +3857,7 @@ struct qed_qm_pf_rt_init_params { u8 port_id; u8 pf_id; u8 max_phys_tcs_per_port; - bool is_first_pf; + bool is_pf_loading; u32 num_pf_cids; u32 num_vf_cids; u32 num_tids; @@ -3687,6 +3868,7 @@ struct qed_qm_pf_rt_init_params { u8 num_vports; u16 pf_wfq; u32 pf_rl; + u32 link_speed; struct init_qm_pq_params *pq_params; struct init_qm_vport_params *vport_params; }; @@ -3744,11 +3926,14 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, * @param p_ptt - ptt window used for writing the registers * @param vport_id - VPORT ID * @param vport_rl - rate limit in Mb/sec units + * @param link_speed - link speed in Mbps. * * @return 0 on success, -1 on error. */ int qed_init_vport_rl(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl); + struct qed_ptt *p_ptt, + u8 vport_id, u32 vport_rl, u32 link_speed); + /** * @brief qed_send_qm_stop_cmd Sends a stop command to the QM * @@ -3759,7 +3944,8 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn, * @param start_pq - first PQ ID to stop * @param num_pqs - Number of PQs to stop, starting from start_pq. * - * @return bool, true if successful, false if timeout occured while waiting for QM command done. + * @return bool, true if successful, false if timeout occurred while waiting for + * QM command done. */ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3769,6 +3955,7 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, /** * @brief qed_set_vxlan_dest_port - initializes vxlan tunnel destination udp port * + * @param p_hwfn * @param p_ptt - ptt window used for writing the registers. * @param dest_port - vxlan destination udp port. */ @@ -3778,6 +3965,7 @@ void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn, /** * @brief qed_set_vxlan_enable - enable or disable VXLAN tunnel in HW * + * @param p_hwfn * @param p_ptt - ptt window used for writing the registers. * @param vxlan_enable - vxlan enable flag. */ @@ -3787,6 +3975,7 @@ void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, /** * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW * + * @param p_hwfn * @param p_ptt - ptt window used for writing the registers. * @param eth_gre_enable - eth GRE enable enable flag. * @param ip_gre_enable - IP GRE enable enable flag. @@ -3798,6 +3987,7 @@ void qed_set_gre_enable(struct qed_hwfn *p_hwfn, /** * @brief qed_set_geneve_dest_port - initializes geneve tunnel destination udp port * + * @param p_hwfn * @param p_ptt - ptt window used for writing the registers. * @param dest_port - geneve destination udp port. */ @@ -3814,612 +4004,921 @@ void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn, void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool eth_geneve_enable, bool ip_geneve_enable); -void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u16 pf_id); -void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u16 pf_id, bool tcp, bool udp, - bool ipv4, bool ipv6); - -#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) -#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) -#define TSTORM_PORT_STAT_OFFSET(port_id) \ + +/** + * @brief qed_gft_disable - Disable GFT + * + * @param p_hwfn + * @param p_ptt - ptt window used for writing the registers. + * @param pf_id - pf on which to disable GFT. + */ +void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id); + +/** + * @brief qed_gft_config - Enable and configure HW for GFT + * + * @param p_hwfn + * @param p_ptt - ptt window used for writing the registers. + * @param pf_id - pf on which to enable GFT. + * @param tcp - set profile tcp packets. + * @param udp - set profile udp packet. + * @param ipv4 - set profile ipv4 packet. + * @param ipv6 - set profile ipv6 packet. + * @param profile_type - define packet same fields. Use enum gft_profile_type. + */ +void qed_gft_config(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 pf_id, + bool tcp, + bool udp, + bool ipv4, bool ipv6, enum gft_profile_type profile_type); + +/** + * @brief qed_enable_context_validation - Enable and configure context + * validation. + * + * @param p_hwfn + * @param p_ptt - ptt window used for writing the registers. + */ +void qed_enable_context_validation(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * @brief qed_calc_session_ctx_validation - Calcualte validation byte for + * session context. + * + * @param p_ctx_mem - pointer to context memory. + * @param ctx_size - context size. + * @param ctx_type - context type. + * @param cid - context cid. + */ +void qed_calc_session_ctx_validation(void *p_ctx_mem, + u16 ctx_size, u8 ctx_type, u32 cid); + +/** + * @brief qed_calc_task_ctx_validation - Calcualte validation byte for task + * context. + * + * @param p_ctx_mem - pointer to context memory. + * @param ctx_size - context size. + * @param ctx_type - context type. + * @param tid - context tid. + */ +void qed_calc_task_ctx_validation(void *p_ctx_mem, + u16 ctx_size, u8 ctx_type, u32 tid); + +/** + * @brief qed_memset_session_ctx - Memset session context to 0 while + * preserving validation bytes. + * + * @param p_hwfn - + * @param p_ctx_mem - pointer to context memory. + * @param ctx_size - size to initialzie. + * @param ctx_type - context type. + */ +void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); + +/** + * @brief qed_memset_task_ctx - Memset task context to 0 while preserving + * validation bytes. + * + * @param p_ctx_mem - pointer to context memory. + * @param ctx_size - size to initialzie. + * @param ctx_type - context type. + */ +void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); + +/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */ +#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) +#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) + +/* Tstorm port statistics */ +#define TSTORM_PORT_STAT_OFFSET(port_id) \ (IRO[1].base + ((port_id) * IRO[1].m1)) -#define TSTORM_PORT_STAT_SIZE (IRO[1].size) +#define TSTORM_PORT_STAT_SIZE (IRO[1].size) + +/* Tstorm ll2 port statistics */ #define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \ (IRO[2].base + ((port_id) * IRO[2].m1)) #define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size) -#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \ + +/* Ustorm VF-PF Channel ready flag */ +#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \ (IRO[3].base + ((vf_id) * IRO[3].m1)) -#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size) -#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \ - (IRO[4].base + (pf_id) * IRO[4].m1) -#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size) -#define USTORM_EQE_CONS_OFFSET(pf_id) \ +#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size) + +/* Ustorm Final flr cleanup ack */ +#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \ + (IRO[4].base + ((pf_id) * IRO[4].m1)) +#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size) + +/* Ustorm Event ring consumer */ +#define USTORM_EQE_CONS_OFFSET(pf_id) \ (IRO[5].base + ((pf_id) * IRO[5].m1)) -#define USTORM_EQE_CONS_SIZE (IRO[5].size) -#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \ +#define USTORM_EQE_CONS_SIZE (IRO[5].size) + +/* Ustorm eth queue zone */ +#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \ (IRO[6].base + ((queue_zone_id) * IRO[6].m1)) -#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size) -#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \ +#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size) + +/* Ustorm Common Queue ring consumer */ +#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \ (IRO[7].base + ((queue_zone_id) * IRO[7].m1)) -#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size) +#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size) + +/* Xstorm Integration Test Data */ +#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base) +#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size) + +/* Ystorm Integration Test Data */ +#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base) +#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size) + +/* Pstorm Integration Test Data */ +#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base) +#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size) + +/* Tstorm Integration Test Data */ +#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base) +#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size) + +/* Mstorm Integration Test Data */ +#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base) +#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[12].size) + +/* Ustorm Integration Test Data */ +#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[13].base) +#define USTORM_INTEG_TEST_DATA_SIZE (IRO[13].size) + +/* Tstorm producers */ #define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \ - (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1)) + (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1)) #define TSTORM_LL2_RX_PRODS_SIZE (IRO[14].size) + +/* Tstorm LightL2 queue statistics */ #define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ (IRO[15].base + ((core_rx_queue_id) * IRO[15].m1)) #define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size) + +/* Ustorm LiteL2 queue statistics */ #define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ - (IRO[16].base + ((core_rx_queue_id) * IRO[16].m1)) + (IRO[16].base + ((core_rx_queue_id) * IRO[16].m1)) #define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[16].size) + +/* Pstorm LiteL2 queue statistics */ #define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \ - (IRO[17].base + ((core_tx_stats_id) * IRO[17].m1)) -#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[17]. size) -#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ + (IRO[17].base + ((core_tx_stats_id) * IRO[17].m1)) +#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[17].size) + +/* Mstorm queue statistics */ +#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ (IRO[18].base + ((stat_counter_id) * IRO[18].m1)) -#define MSTORM_QUEUE_STAT_SIZE (IRO[18].size) -#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \ +#define MSTORM_QUEUE_STAT_SIZE (IRO[18].size) + +/* Mstorm ETH PF queues producers */ +#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \ (IRO[19].base + ((queue_id) * IRO[19].m1)) -#define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size) +#define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size) + +/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone size + * mode. + */ #define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \ - (IRO[20].base + ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2)) + (IRO[20].base + ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2)) #define MSTORM_ETH_VF_PRODS_SIZE (IRO[20].size) -#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[21].base) -#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size) -#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \ + +/* TPA agregation timeout in us resolution (on ASIC) */ +#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[21].base) +#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size) + +/* Mstorm pf statistics */ +#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \ (IRO[22].base + ((pf_id) * IRO[22].m1)) -#define MSTORM_ETH_PF_STAT_SIZE (IRO[22].size) -#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ +#define MSTORM_ETH_PF_STAT_SIZE (IRO[22].size) + +/* Ustorm queue statistics */ +#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ (IRO[23].base + ((stat_counter_id) * IRO[23].m1)) -#define USTORM_QUEUE_STAT_SIZE (IRO[23].size) -#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \ +#define USTORM_QUEUE_STAT_SIZE (IRO[23].size) + +/* Ustorm pf statistics */ +#define USTORM_ETH_PF_STAT_OFFSET(pf_id)\ (IRO[24].base + ((pf_id) * IRO[24].m1)) -#define USTORM_ETH_PF_STAT_SIZE (IRO[24].size) -#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ +#define USTORM_ETH_PF_STAT_SIZE (IRO[24].size) + +/* Pstorm queue statistics */ +#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ (IRO[25].base + ((stat_counter_id) * IRO[25].m1)) -#define PSTORM_QUEUE_STAT_SIZE (IRO[25].size) -#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \ +#define PSTORM_QUEUE_STAT_SIZE (IRO[25].size) + +/* Pstorm pf statistics */ +#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \ (IRO[26].base + ((pf_id) * IRO[26].m1)) -#define PSTORM_ETH_PF_STAT_SIZE (IRO[26].size) -#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethtype) \ - (IRO[27].base + ((ethtype) * IRO[27].m1)) -#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[27].size) -#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[28].base) -#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[28].size) -#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \ +#define PSTORM_ETH_PF_STAT_SIZE (IRO[26].size) + +/* Control frame's EthType configuration for TX control frame security */ +#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(eth_type_id) \ + (IRO[27].base + ((eth_type_id) * IRO[27].m1)) +#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[27].size) + +/* Tstorm last parser message */ +#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[28].base) +#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[28].size) + +/* Tstorm Eth limit Rx rate */ +#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \ (IRO[29].base + ((pf_id) * IRO[29].m1)) -#define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size) -#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ +#define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size) + +/* Xstorm queue zone */ +#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ (IRO[30].base + ((queue_id) * IRO[30].m1)) -#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size) +#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size) + +/* Ystorm cqe producer */ +#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \ + (IRO[31].base + ((rss_id) * IRO[31].m1)) +#define YSTORM_TOE_CQ_PROD_SIZE (IRO[31].size) + +/* Ustorm cqe producer */ +#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \ + (IRO[32].base + ((rss_id) * IRO[32].m1)) +#define USTORM_TOE_CQ_PROD_SIZE (IRO[32].size) + +/* Ustorm grq producer */ +#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \ + (IRO[33].base + ((pf_id) * IRO[33].m1)) +#define USTORM_TOE_GRQ_PROD_SIZE (IRO[33].size) + +/* Tstorm cmdq-cons of given command queue-id */ #define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \ - (IRO[34].base + ((cmdq_queue_id) * IRO[34].m1)) -#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size) + (IRO[34].base + ((cmdq_queue_id) * IRO[34].m1)) +#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size) + +/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID, + * BDqueue-id. + */ #define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ - (IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2)) -#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size) + (IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2)) +#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size) + +/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */ #define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ - (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2)) -#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size) + (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2)) +#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size) + +/* Tstorm iSCSI RX stats */ #define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[37].base + ((pf_id) * IRO[37].m1)) -#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size) + (IRO[37].base + ((pf_id) * IRO[37].m1)) +#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size) + +/* Mstorm iSCSI RX stats */ #define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[38].base + ((pf_id) * IRO[38].m1)) -#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size) + (IRO[38].base + ((pf_id) * IRO[38].m1)) +#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size) + +/* Ustorm iSCSI RX stats */ #define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[39].base + ((pf_id) * IRO[39].m1)) -#define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size) + (IRO[39].base + ((pf_id) * IRO[39].m1)) +#define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size) + +/* Xstorm iSCSI TX stats */ #define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[40].base + ((pf_id) * IRO[40].m1)) -#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size) + (IRO[40].base + ((pf_id) * IRO[40].m1)) +#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size) + +/* Ystorm iSCSI TX stats */ #define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[41].base + ((pf_id) * IRO[41].m1)) -#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size) + (IRO[41].base + ((pf_id) * IRO[41].m1)) +#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size) + +/* Pstorm iSCSI TX stats */ #define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[42].base + ((pf_id) * IRO[42].m1)) -#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size) -#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ - (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1)) -#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size) -#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ - (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1)) -#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size) + (IRO[42].base + ((pf_id) * IRO[42].m1)) +#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size) + +/* Tstorm FCoE RX stats */ #define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \ - (IRO[43].base + ((pf_id) * IRO[43].m1)) + (IRO[43].base + ((pf_id) * IRO[43].m1)) +#define TSTORM_FCOE_RX_STATS_SIZE (IRO[43].size) + +/* Pstorm FCoE TX stats */ #define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \ (IRO[44].base + ((pf_id) * IRO[44].m1)) +#define PSTORM_FCOE_TX_STATS_SIZE (IRO[44].size) + +/* Pstorm RDMA queue statistics */ +#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ + (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1)) +#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size) -static const struct iro iro_arr[49] = { +/* Tstorm RDMA queue statistics */ +#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ + (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1)) +#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size) + +/* Xstorm iWARP rxmit stats */ +#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \ + (IRO[47].base + ((pf_id) * IRO[47].m1)) +#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[47].size) + +/* Tstorm RoCE Event Statistics */ +#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \ + (IRO[48].base + ((roce_pf_id) * IRO[48].m1)) +#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[48].size) + +/* DCQCN Received Statistics */ +#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \ + (IRO[49].base + ((roce_pf_id) * IRO[49].m1)) +#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[49].size) + +/* DCQCN Sent Statistics */ +#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \ + (IRO[50].base + ((roce_pf_id) * IRO[50].m1)) +#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[50].size) + +static const struct iro iro_arr[51] = { {0x0, 0x0, 0x0, 0x0, 0x8}, - {0x4cb0, 0x80, 0x0, 0x0, 0x80}, - {0x6518, 0x20, 0x0, 0x0, 0x20}, + {0x4cb8, 0x88, 0x0, 0x0, 0x88}, + {0x6530, 0x20, 0x0, 0x0, 0x20}, {0xb00, 0x8, 0x0, 0x0, 0x4}, {0xa80, 0x8, 0x0, 0x0, 0x4}, {0x0, 0x8, 0x0, 0x0, 0x2}, {0x80, 0x8, 0x0, 0x0, 0x4}, {0x84, 0x8, 0x0, 0x0, 0x2}, + {0x4c48, 0x0, 0x0, 0x0, 0x78}, + {0x3e18, 0x0, 0x0, 0x0, 0x78}, + {0x2b58, 0x0, 0x0, 0x0, 0x78}, {0x4c40, 0x0, 0x0, 0x0, 0x78}, - {0x3df0, 0x0, 0x0, 0x0, 0x78}, - {0x29b0, 0x0, 0x0, 0x0, 0x78}, - {0x4c38, 0x0, 0x0, 0x0, 0x78}, - {0x4990, 0x0, 0x0, 0x0, 0x78}, - {0x7f48, 0x0, 0x0, 0x0, 0x78}, + {0x4998, 0x0, 0x0, 0x0, 0x78}, + {0x7f50, 0x0, 0x0, 0x0, 0x78}, {0xa28, 0x8, 0x0, 0x0, 0x8}, - {0x61f8, 0x10, 0x0, 0x0, 0x10}, - {0xbd20, 0x30, 0x0, 0x0, 0x30}, - {0x95b8, 0x30, 0x0, 0x0, 0x30}, - {0x4b60, 0x80, 0x0, 0x0, 0x40}, + {0x6210, 0x10, 0x0, 0x0, 0x10}, + {0xb820, 0x30, 0x0, 0x0, 0x30}, + {0x96c0, 0x30, 0x0, 0x0, 0x30}, + {0x4b68, 0x80, 0x0, 0x0, 0x40}, {0x1f8, 0x4, 0x0, 0x0, 0x4}, - {0x53a0, 0x80, 0x4, 0x0, 0x4}, - {0xc7c8, 0x0, 0x0, 0x0, 0x4}, - {0x4ba0, 0x80, 0x0, 0x0, 0x20}, - {0x8150, 0x40, 0x0, 0x0, 0x30}, - {0xec70, 0x60, 0x0, 0x0, 0x60}, - {0x2b48, 0x80, 0x0, 0x0, 0x38}, - {0xf1b0, 0x78, 0x0, 0x0, 0x78}, + {0x53a8, 0x80, 0x4, 0x0, 0x4}, + {0xc7d0, 0x0, 0x0, 0x0, 0x4}, + {0x4ba8, 0x80, 0x0, 0x0, 0x20}, + {0x8158, 0x40, 0x0, 0x0, 0x30}, + {0xe770, 0x60, 0x0, 0x0, 0x60}, + {0x2cf0, 0x80, 0x0, 0x0, 0x38}, + {0xf2b8, 0x78, 0x0, 0x0, 0x78}, {0x1f8, 0x4, 0x0, 0x0, 0x4}, - {0xaef8, 0x0, 0x0, 0x0, 0xf0}, - {0xafe8, 0x8, 0x0, 0x0, 0x8}, + {0xaf20, 0x0, 0x0, 0x0, 0xf0}, + {0xb010, 0x8, 0x0, 0x0, 0x8}, {0x1f8, 0x8, 0x0, 0x0, 0x8}, {0xac0, 0x8, 0x0, 0x0, 0x8}, {0x2578, 0x8, 0x0, 0x0, 0x8}, {0x24f8, 0x8, 0x0, 0x0, 0x8}, {0x0, 0x8, 0x0, 0x0, 0x8}, - {0x200, 0x10, 0x8, 0x0, 0x8}, - {0xb78, 0x10, 0x8, 0x0, 0x2}, - {0xd9a8, 0x38, 0x0, 0x0, 0x24}, - {0x12988, 0x10, 0x0, 0x0, 0x8}, - {0x11fa0, 0x38, 0x0, 0x0, 0x18}, - {0xa580, 0x38, 0x0, 0x0, 0x10}, - {0x86f8, 0x30, 0x0, 0x0, 0x18}, - {0x101f8, 0x10, 0x0, 0x0, 0x10}, - {0xde28, 0x48, 0x0, 0x0, 0x38}, - {0x10660, 0x20, 0x0, 0x0, 0x20}, - {0x2b80, 0x80, 0x0, 0x0, 0x10}, - {0x5020, 0x10, 0x0, 0x0, 0x10}, - {0xc9b0, 0x30, 0x0, 0x0, 0x10}, - {0xeec0, 0x10, 0x0, 0x0, 0x10}, + {0x400, 0x18, 0x8, 0x0, 0x8}, + {0xb78, 0x18, 0x8, 0x0, 0x2}, + {0xd898, 0x50, 0x0, 0x0, 0x3c}, + {0x12908, 0x18, 0x0, 0x0, 0x10}, + {0x11aa8, 0x40, 0x0, 0x0, 0x18}, + {0xa588, 0x50, 0x0, 0x0, 0x20}, + {0x8700, 0x40, 0x0, 0x0, 0x28}, + {0x10300, 0x18, 0x0, 0x0, 0x10}, + {0xde48, 0x48, 0x0, 0x0, 0x38}, + {0x10768, 0x20, 0x0, 0x0, 0x20}, + {0x2d28, 0x80, 0x0, 0x0, 0x10}, + {0x5048, 0x10, 0x0, 0x0, 0x10}, + {0xc9b8, 0x30, 0x0, 0x0, 0x10}, + {0xeee0, 0x10, 0x0, 0x0, 0x10}, + {0xa3a0, 0x10, 0x0, 0x0, 0x10}, + {0x13108, 0x8, 0x0, 0x0, 0x8}, }; /* Runtime array offsets */ -#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0 -#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1 -#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2 -#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3 -#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4 -#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5 -#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6 -#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7 -#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8 -#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9 -#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10 -#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11 -#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12 -#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13 -#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14 -#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15 -#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16 -#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17 -#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18 -#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19 -#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20 -#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21 -#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22 -#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23 -#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24 -#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761 -#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 -#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761 -#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 -#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497 -#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736 -#define CAU_REG_PI_MEMORY_RT_OFFSET 2233 -#define CAU_REG_PI_MEMORY_RT_SIZE 4416 -#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649 -#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650 -#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651 -#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652 -#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653 -#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654 -#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655 -#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656 -#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657 -#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658 -#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659 -#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660 -#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661 -#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662 -#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663 -#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664 -#define SRC_REG_FIRSTFREE_RT_OFFSET 6665 -#define SRC_REG_FIRSTFREE_RT_SIZE 2 -#define SRC_REG_LASTFREE_RT_OFFSET 6667 -#define SRC_REG_LASTFREE_RT_SIZE 2 -#define SRC_REG_COUNTFREE_RT_OFFSET 6669 -#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670 -#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671 -#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672 -#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673 -#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674 -#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675 -#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676 -#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677 -#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678 -#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679 -#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680 -#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681 -#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682 -#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683 -#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684 -#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685 -#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686 -#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687 -#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688 -#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689 -#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690 -#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691 -#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692 -#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693 -#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694 -#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695 -#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696 -#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697 -#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698 -#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699 -#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6700 -#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6701 -#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6702 -#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000 -#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28702 -#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28703 -#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28704 -#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705 -#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706 -#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707 -#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708 -#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709 -#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710 -#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711 -#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712 -#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713 -#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714 -#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416 -#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130 -#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608 -#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29738 -#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29739 -#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29740 -#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29741 -#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29742 -#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29743 -#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29744 -#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29745 -#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29746 -#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29747 -#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29748 -#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29749 -#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29750 -#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29751 -#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29752 -#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29753 -#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29754 -#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29755 -#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29756 -#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29757 -#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29758 -#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29759 -#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29760 -#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29761 -#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29762 -#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29763 -#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29764 -#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29765 -#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29766 -#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29767 -#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29768 -#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29769 -#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29770 -#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29771 -#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29772 -#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29773 -#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29774 -#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29775 -#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29776 -#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29777 -#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29778 -#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29779 -#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29780 -#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29781 -#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29782 -#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29783 -#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29784 -#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29785 -#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29786 -#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29787 -#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29788 -#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29789 -#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29790 -#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29791 -#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29792 -#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29793 -#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29794 -#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29795 -#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29796 -#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29797 -#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29798 -#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29799 -#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29800 -#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29801 -#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29802 -#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29803 -#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29804 -#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29805 -#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128 -#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29933 -#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29934 -#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29935 -#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29936 -#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29937 -#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29938 -#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29939 -#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29940 -#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29941 -#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29942 -#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29943 -#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29944 -#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29945 -#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29946 -#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29947 -#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29948 -#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29949 -#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29950 -#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29951 -#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29952 -#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29953 -#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29954 -#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29955 -#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29956 -#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29957 -#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29958 -#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29959 -#define QM_REG_PQTX2PF_0_RT_OFFSET 29960 -#define QM_REG_PQTX2PF_1_RT_OFFSET 29961 -#define QM_REG_PQTX2PF_2_RT_OFFSET 29962 -#define QM_REG_PQTX2PF_3_RT_OFFSET 29963 -#define QM_REG_PQTX2PF_4_RT_OFFSET 29964 -#define QM_REG_PQTX2PF_5_RT_OFFSET 29965 -#define QM_REG_PQTX2PF_6_RT_OFFSET 29966 -#define QM_REG_PQTX2PF_7_RT_OFFSET 29967 -#define QM_REG_PQTX2PF_8_RT_OFFSET 29968 -#define QM_REG_PQTX2PF_9_RT_OFFSET 29969 -#define QM_REG_PQTX2PF_10_RT_OFFSET 29970 -#define QM_REG_PQTX2PF_11_RT_OFFSET 29971 -#define QM_REG_PQTX2PF_12_RT_OFFSET 29972 -#define QM_REG_PQTX2PF_13_RT_OFFSET 29973 -#define QM_REG_PQTX2PF_14_RT_OFFSET 29974 -#define QM_REG_PQTX2PF_15_RT_OFFSET 29975 -#define QM_REG_PQTX2PF_16_RT_OFFSET 29976 -#define QM_REG_PQTX2PF_17_RT_OFFSET 29977 -#define QM_REG_PQTX2PF_18_RT_OFFSET 29978 -#define QM_REG_PQTX2PF_19_RT_OFFSET 29979 -#define QM_REG_PQTX2PF_20_RT_OFFSET 29980 -#define QM_REG_PQTX2PF_21_RT_OFFSET 29981 -#define QM_REG_PQTX2PF_22_RT_OFFSET 29982 -#define QM_REG_PQTX2PF_23_RT_OFFSET 29983 -#define QM_REG_PQTX2PF_24_RT_OFFSET 29984 -#define QM_REG_PQTX2PF_25_RT_OFFSET 29985 -#define QM_REG_PQTX2PF_26_RT_OFFSET 29986 -#define QM_REG_PQTX2PF_27_RT_OFFSET 29987 -#define QM_REG_PQTX2PF_28_RT_OFFSET 29988 -#define QM_REG_PQTX2PF_29_RT_OFFSET 29989 -#define QM_REG_PQTX2PF_30_RT_OFFSET 29990 -#define QM_REG_PQTX2PF_31_RT_OFFSET 29991 -#define QM_REG_PQTX2PF_32_RT_OFFSET 29992 -#define QM_REG_PQTX2PF_33_RT_OFFSET 29993 -#define QM_REG_PQTX2PF_34_RT_OFFSET 29994 -#define QM_REG_PQTX2PF_35_RT_OFFSET 29995 -#define QM_REG_PQTX2PF_36_RT_OFFSET 29996 -#define QM_REG_PQTX2PF_37_RT_OFFSET 29997 -#define QM_REG_PQTX2PF_38_RT_OFFSET 29998 -#define QM_REG_PQTX2PF_39_RT_OFFSET 29999 -#define QM_REG_PQTX2PF_40_RT_OFFSET 30000 -#define QM_REG_PQTX2PF_41_RT_OFFSET 30001 -#define QM_REG_PQTX2PF_42_RT_OFFSET 30002 -#define QM_REG_PQTX2PF_43_RT_OFFSET 30003 -#define QM_REG_PQTX2PF_44_RT_OFFSET 30004 -#define QM_REG_PQTX2PF_45_RT_OFFSET 30005 -#define QM_REG_PQTX2PF_46_RT_OFFSET 30006 -#define QM_REG_PQTX2PF_47_RT_OFFSET 30007 -#define QM_REG_PQTX2PF_48_RT_OFFSET 30008 -#define QM_REG_PQTX2PF_49_RT_OFFSET 30009 -#define QM_REG_PQTX2PF_50_RT_OFFSET 30010 -#define QM_REG_PQTX2PF_51_RT_OFFSET 30011 -#define QM_REG_PQTX2PF_52_RT_OFFSET 30012 -#define QM_REG_PQTX2PF_53_RT_OFFSET 30013 -#define QM_REG_PQTX2PF_54_RT_OFFSET 30014 -#define QM_REG_PQTX2PF_55_RT_OFFSET 30015 -#define QM_REG_PQTX2PF_56_RT_OFFSET 30016 -#define QM_REG_PQTX2PF_57_RT_OFFSET 30017 -#define QM_REG_PQTX2PF_58_RT_OFFSET 30018 -#define QM_REG_PQTX2PF_59_RT_OFFSET 30019 -#define QM_REG_PQTX2PF_60_RT_OFFSET 30020 -#define QM_REG_PQTX2PF_61_RT_OFFSET 30021 -#define QM_REG_PQTX2PF_62_RT_OFFSET 30022 -#define QM_REG_PQTX2PF_63_RT_OFFSET 30023 -#define QM_REG_PQOTHER2PF_0_RT_OFFSET 30024 -#define QM_REG_PQOTHER2PF_1_RT_OFFSET 30025 -#define QM_REG_PQOTHER2PF_2_RT_OFFSET 30026 -#define QM_REG_PQOTHER2PF_3_RT_OFFSET 30027 -#define QM_REG_PQOTHER2PF_4_RT_OFFSET 30028 -#define QM_REG_PQOTHER2PF_5_RT_OFFSET 30029 -#define QM_REG_PQOTHER2PF_6_RT_OFFSET 30030 -#define QM_REG_PQOTHER2PF_7_RT_OFFSET 30031 -#define QM_REG_PQOTHER2PF_8_RT_OFFSET 30032 -#define QM_REG_PQOTHER2PF_9_RT_OFFSET 30033 -#define QM_REG_PQOTHER2PF_10_RT_OFFSET 30034 -#define QM_REG_PQOTHER2PF_11_RT_OFFSET 30035 -#define QM_REG_PQOTHER2PF_12_RT_OFFSET 30036 -#define QM_REG_PQOTHER2PF_13_RT_OFFSET 30037 -#define QM_REG_PQOTHER2PF_14_RT_OFFSET 30038 -#define QM_REG_PQOTHER2PF_15_RT_OFFSET 30039 -#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 30040 -#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 30041 -#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 30042 -#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 30043 -#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 30044 -#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 30045 -#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 30046 -#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 30047 -#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 30048 -#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 30049 -#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 30050 -#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 30051 -#define QM_REG_RLGLBLINCVAL_RT_OFFSET 30052 -#define QM_REG_RLGLBLINCVAL_RT_SIZE 256 -#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30308 -#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256 -#define QM_REG_RLGLBLCRD_RT_OFFSET 30564 -#define QM_REG_RLGLBLCRD_RT_SIZE 256 -#define QM_REG_RLGLBLENABLE_RT_OFFSET 30820 -#define QM_REG_RLPFPERIOD_RT_OFFSET 30821 -#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30822 -#define QM_REG_RLPFINCVAL_RT_OFFSET 30823 -#define QM_REG_RLPFINCVAL_RT_SIZE 16 -#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30839 -#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16 -#define QM_REG_RLPFCRD_RT_OFFSET 30855 -#define QM_REG_RLPFCRD_RT_SIZE 16 -#define QM_REG_RLPFENABLE_RT_OFFSET 30871 -#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30872 -#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30873 -#define QM_REG_WFQPFWEIGHT_RT_SIZE 16 -#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30889 -#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16 -#define QM_REG_WFQPFCRD_RT_OFFSET 30905 -#define QM_REG_WFQPFCRD_RT_SIZE 256 -#define QM_REG_WFQPFENABLE_RT_OFFSET 31161 -#define QM_REG_WFQVPENABLE_RT_OFFSET 31162 -#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31163 -#define QM_REG_BASEADDRTXPQ_RT_SIZE 512 -#define QM_REG_TXPQMAP_RT_OFFSET 31675 -#define QM_REG_TXPQMAP_RT_SIZE 512 -#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32187 -#define QM_REG_WFQVPWEIGHT_RT_SIZE 512 -#define QM_REG_WFQVPCRD_RT_OFFSET 32699 -#define QM_REG_WFQVPCRD_RT_SIZE 512 -#define QM_REG_WFQVPMAP_RT_OFFSET 33211 -#define QM_REG_WFQVPMAP_RT_SIZE 512 -#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33723 -#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320 -#define QM_REG_VOQCRDLINE_RT_OFFSET 34043 -#define QM_REG_VOQCRDLINE_RT_SIZE 36 -#define QM_REG_VOQINITCRDLINE_RT_OFFSET 34079 -#define QM_REG_VOQINITCRDLINE_RT_SIZE 36 -#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34115 -#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34116 -#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34117 -#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34118 -#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34119 -#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34120 -#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34121 -#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34122 -#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34126 -#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34130 -#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34134 -#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34135 -#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 -#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34167 -#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34183 -#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34199 -#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34215 -#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 -#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34231 -#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 34232 -#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34233 -#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34234 -#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34235 -#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34236 -#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34237 -#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34238 -#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34239 -#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34240 -#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34241 -#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34242 -#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34243 -#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34244 -#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34245 -#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34246 -#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34247 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34248 -#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34249 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34250 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34251 -#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34252 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34253 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34254 -#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34255 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34256 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34257 -#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34258 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34259 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34260 -#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34261 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34262 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34263 -#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34264 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34265 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34266 -#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34267 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34268 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34269 -#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34270 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34271 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34272 -#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34273 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34274 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34275 -#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34276 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34277 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34278 -#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34279 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34280 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34281 -#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34282 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34283 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34284 -#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34285 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34286 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34287 -#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34288 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34289 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34290 -#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34291 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34292 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34293 -#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34294 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34295 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34296 -#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34297 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34298 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34299 -#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34300 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34301 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34302 -#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34303 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34304 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34305 -#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34306 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34307 -#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34308 - -#define RUNTIME_ARRAY_SIZE 34309 +#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0 +#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1 +#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2 +#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3 +#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4 +#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5 +#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6 +#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7 +#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8 +#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9 +#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10 +#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11 +#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12 +#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13 +#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14 +#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15 +#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16 +#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17 +#define DORQ_REG_GLB_MAX_ICID_0_RT_OFFSET 18 +#define DORQ_REG_GLB_MAX_ICID_1_RT_OFFSET 19 +#define DORQ_REG_GLB_RANGE2CONN_TYPE_0_RT_OFFSET 20 +#define DORQ_REG_GLB_RANGE2CONN_TYPE_1_RT_OFFSET 21 +#define DORQ_REG_PRV_PF_MAX_ICID_2_RT_OFFSET 22 +#define DORQ_REG_PRV_PF_MAX_ICID_3_RT_OFFSET 23 +#define DORQ_REG_PRV_PF_MAX_ICID_4_RT_OFFSET 24 +#define DORQ_REG_PRV_PF_MAX_ICID_5_RT_OFFSET 25 +#define DORQ_REG_PRV_VF_MAX_ICID_2_RT_OFFSET 26 +#define DORQ_REG_PRV_VF_MAX_ICID_3_RT_OFFSET 27 +#define DORQ_REG_PRV_VF_MAX_ICID_4_RT_OFFSET 28 +#define DORQ_REG_PRV_VF_MAX_ICID_5_RT_OFFSET 29 +#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_2_RT_OFFSET 30 +#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_3_RT_OFFSET 31 +#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_4_RT_OFFSET 32 +#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_5_RT_OFFSET 33 +#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_2_RT_OFFSET 34 +#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_3_RT_OFFSET 35 +#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_4_RT_OFFSET 36 +#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_5_RT_OFFSET 37 +#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 38 +#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 39 +#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 40 +#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 41 +#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 42 +#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 43 +#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 44 +#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 45 +#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 1024 +#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1069 +#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 1024 +#define CAU_REG_PI_MEMORY_RT_OFFSET 2093 +#define CAU_REG_PI_MEMORY_RT_SIZE 4416 +#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6509 +#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6510 +#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6511 +#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6512 +#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6513 +#define PRS_REG_SEARCH_TCP_RT_OFFSET 6514 +#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6515 +#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6516 +#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6517 +#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6518 +#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6519 +#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6520 +#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6521 +#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6522 +#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6523 +#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6524 +#define SRC_REG_FIRSTFREE_RT_OFFSET 6525 +#define SRC_REG_FIRSTFREE_RT_SIZE 2 +#define SRC_REG_LASTFREE_RT_OFFSET 6527 +#define SRC_REG_LASTFREE_RT_SIZE 2 +#define SRC_REG_COUNTFREE_RT_OFFSET 6529 +#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6530 +#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6531 +#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6532 +#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6533 +#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6534 +#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6535 +#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6536 +#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6537 +#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6538 +#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6539 +#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6540 +#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6541 +#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6542 +#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6543 +#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6544 +#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6545 +#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6546 +#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6547 +#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6548 +#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6549 +#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6550 +#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6551 +#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6552 +#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6553 +#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6554 +#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6555 +#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6556 +#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6557 +#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6558 +#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6559 +#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6560 +#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6561 +#define PSWRQ2_REG_TGSRC_FIRST_ILT_RT_OFFSET 6562 +#define PSWRQ2_REG_RGSRC_FIRST_ILT_RT_OFFSET 6563 +#define PSWRQ2_REG_TGSRC_LAST_ILT_RT_OFFSET 6564 +#define PSWRQ2_REG_RGSRC_LAST_ILT_RT_OFFSET 6565 +#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6566 +#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 26414 +#define PGLUE_REG_B_VF_BASE_RT_OFFSET 32980 +#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 32981 +#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 32982 +#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 32983 +#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 32984 +#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 32985 +#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 32986 +#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 32987 +#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 32988 +#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 32989 +#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 32990 +#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 32991 +#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 32992 +#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416 +#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 33408 +#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608 +#define QM_REG_MAXPQSIZE_0_RT_OFFSET 34016 +#define QM_REG_MAXPQSIZE_1_RT_OFFSET 34017 +#define QM_REG_MAXPQSIZE_2_RT_OFFSET 34018 +#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 34019 +#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 34020 +#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 34021 +#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 34022 +#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 34023 +#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 34024 +#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 34025 +#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 34026 +#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 34027 +#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 34028 +#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 34029 +#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 34030 +#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 34031 +#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 34032 +#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 34033 +#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 34034 +#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 34035 +#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 34036 +#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 34037 +#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 34038 +#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 34039 +#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 34040 +#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 34041 +#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 34042 +#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 34043 +#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 34044 +#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 34045 +#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 34046 +#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 34047 +#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 34048 +#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 34049 +#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 34050 +#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 34051 +#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 34052 +#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 34053 +#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 34054 +#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 34055 +#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 34056 +#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 34057 +#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 34058 +#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 34059 +#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 34060 +#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 34061 +#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 34062 +#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 34063 +#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 34064 +#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 34065 +#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 34066 +#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 34067 +#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 34068 +#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 34069 +#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 34070 +#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 34071 +#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 34072 +#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 34073 +#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 34074 +#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 34075 +#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 34076 +#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 34077 +#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 34078 +#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 34079 +#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 34080 +#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 34081 +#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 34082 +#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 34083 +#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128 +#define QM_REG_PTRTBLOTHER_RT_OFFSET 34211 +#define QM_REG_PTRTBLOTHER_RT_SIZE 256 +#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 34467 +#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 34468 +#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 34469 +#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 34470 +#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 34471 +#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 34472 +#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 34473 +#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 34474 +#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 34475 +#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 34476 +#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 34477 +#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 34478 +#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 34479 +#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 34480 +#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 34481 +#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 34482 +#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 34483 +#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 34484 +#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 34485 +#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 34486 +#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 34487 +#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 34488 +#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 34489 +#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 34490 +#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 34491 +#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 34492 +#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 34493 +#define QM_REG_PQTX2PF_0_RT_OFFSET 34494 +#define QM_REG_PQTX2PF_1_RT_OFFSET 34495 +#define QM_REG_PQTX2PF_2_RT_OFFSET 34496 +#define QM_REG_PQTX2PF_3_RT_OFFSET 34497 +#define QM_REG_PQTX2PF_4_RT_OFFSET 34498 +#define QM_REG_PQTX2PF_5_RT_OFFSET 34499 +#define QM_REG_PQTX2PF_6_RT_OFFSET 34500 +#define QM_REG_PQTX2PF_7_RT_OFFSET 34501 +#define QM_REG_PQTX2PF_8_RT_OFFSET 34502 +#define QM_REG_PQTX2PF_9_RT_OFFSET 34503 +#define QM_REG_PQTX2PF_10_RT_OFFSET 34504 +#define QM_REG_PQTX2PF_11_RT_OFFSET 34505 +#define QM_REG_PQTX2PF_12_RT_OFFSET 34506 +#define QM_REG_PQTX2PF_13_RT_OFFSET 34507 +#define QM_REG_PQTX2PF_14_RT_OFFSET 34508 +#define QM_REG_PQTX2PF_15_RT_OFFSET 34509 +#define QM_REG_PQTX2PF_16_RT_OFFSET 34510 +#define QM_REG_PQTX2PF_17_RT_OFFSET 34511 +#define QM_REG_PQTX2PF_18_RT_OFFSET 34512 +#define QM_REG_PQTX2PF_19_RT_OFFSET 34513 +#define QM_REG_PQTX2PF_20_RT_OFFSET 34514 +#define QM_REG_PQTX2PF_21_RT_OFFSET 34515 +#define QM_REG_PQTX2PF_22_RT_OFFSET 34516 +#define QM_REG_PQTX2PF_23_RT_OFFSET 34517 +#define QM_REG_PQTX2PF_24_RT_OFFSET 34518 +#define QM_REG_PQTX2PF_25_RT_OFFSET 34519 +#define QM_REG_PQTX2PF_26_RT_OFFSET 34520 +#define QM_REG_PQTX2PF_27_RT_OFFSET 34521 +#define QM_REG_PQTX2PF_28_RT_OFFSET 34522 +#define QM_REG_PQTX2PF_29_RT_OFFSET 34523 +#define QM_REG_PQTX2PF_30_RT_OFFSET 34524 +#define QM_REG_PQTX2PF_31_RT_OFFSET 34525 +#define QM_REG_PQTX2PF_32_RT_OFFSET 34526 +#define QM_REG_PQTX2PF_33_RT_OFFSET 34527 +#define QM_REG_PQTX2PF_34_RT_OFFSET 34528 +#define QM_REG_PQTX2PF_35_RT_OFFSET 34529 +#define QM_REG_PQTX2PF_36_RT_OFFSET 34530 +#define QM_REG_PQTX2PF_37_RT_OFFSET 34531 +#define QM_REG_PQTX2PF_38_RT_OFFSET 34532 +#define QM_REG_PQTX2PF_39_RT_OFFSET 34533 +#define QM_REG_PQTX2PF_40_RT_OFFSET 34534 +#define QM_REG_PQTX2PF_41_RT_OFFSET 34535 +#define QM_REG_PQTX2PF_42_RT_OFFSET 34536 +#define QM_REG_PQTX2PF_43_RT_OFFSET 34537 +#define QM_REG_PQTX2PF_44_RT_OFFSET 34538 +#define QM_REG_PQTX2PF_45_RT_OFFSET 34539 +#define QM_REG_PQTX2PF_46_RT_OFFSET 34540 +#define QM_REG_PQTX2PF_47_RT_OFFSET 34541 +#define QM_REG_PQTX2PF_48_RT_OFFSET 34542 +#define QM_REG_PQTX2PF_49_RT_OFFSET 34543 +#define QM_REG_PQTX2PF_50_RT_OFFSET 34544 +#define QM_REG_PQTX2PF_51_RT_OFFSET 34545 +#define QM_REG_PQTX2PF_52_RT_OFFSET 34546 +#define QM_REG_PQTX2PF_53_RT_OFFSET 34547 +#define QM_REG_PQTX2PF_54_RT_OFFSET 34548 +#define QM_REG_PQTX2PF_55_RT_OFFSET 34549 +#define QM_REG_PQTX2PF_56_RT_OFFSET 34550 +#define QM_REG_PQTX2PF_57_RT_OFFSET 34551 +#define QM_REG_PQTX2PF_58_RT_OFFSET 34552 +#define QM_REG_PQTX2PF_59_RT_OFFSET 34553 +#define QM_REG_PQTX2PF_60_RT_OFFSET 34554 +#define QM_REG_PQTX2PF_61_RT_OFFSET 34555 +#define QM_REG_PQTX2PF_62_RT_OFFSET 34556 +#define QM_REG_PQTX2PF_63_RT_OFFSET 34557 +#define QM_REG_PQOTHER2PF_0_RT_OFFSET 34558 +#define QM_REG_PQOTHER2PF_1_RT_OFFSET 34559 +#define QM_REG_PQOTHER2PF_2_RT_OFFSET 34560 +#define QM_REG_PQOTHER2PF_3_RT_OFFSET 34561 +#define QM_REG_PQOTHER2PF_4_RT_OFFSET 34562 +#define QM_REG_PQOTHER2PF_5_RT_OFFSET 34563 +#define QM_REG_PQOTHER2PF_6_RT_OFFSET 34564 +#define QM_REG_PQOTHER2PF_7_RT_OFFSET 34565 +#define QM_REG_PQOTHER2PF_8_RT_OFFSET 34566 +#define QM_REG_PQOTHER2PF_9_RT_OFFSET 34567 +#define QM_REG_PQOTHER2PF_10_RT_OFFSET 34568 +#define QM_REG_PQOTHER2PF_11_RT_OFFSET 34569 +#define QM_REG_PQOTHER2PF_12_RT_OFFSET 34570 +#define QM_REG_PQOTHER2PF_13_RT_OFFSET 34571 +#define QM_REG_PQOTHER2PF_14_RT_OFFSET 34572 +#define QM_REG_PQOTHER2PF_15_RT_OFFSET 34573 +#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 34574 +#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 34575 +#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 34576 +#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 34577 +#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 34578 +#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 34579 +#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 34580 +#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 34581 +#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 34582 +#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 34583 +#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 34584 +#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 34585 +#define QM_REG_RLGLBLINCVAL_RT_OFFSET 34586 +#define QM_REG_RLGLBLINCVAL_RT_SIZE 256 +#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 34842 +#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256 +#define QM_REG_RLGLBLCRD_RT_OFFSET 35098 +#define QM_REG_RLGLBLCRD_RT_SIZE 256 +#define QM_REG_RLGLBLENABLE_RT_OFFSET 35354 +#define QM_REG_RLPFPERIOD_RT_OFFSET 35355 +#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 35356 +#define QM_REG_RLPFINCVAL_RT_OFFSET 35357 +#define QM_REG_RLPFINCVAL_RT_SIZE 16 +#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 35373 +#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16 +#define QM_REG_RLPFCRD_RT_OFFSET 35389 +#define QM_REG_RLPFCRD_RT_SIZE 16 +#define QM_REG_RLPFENABLE_RT_OFFSET 35405 +#define QM_REG_RLPFVOQENABLE_RT_OFFSET 35406 +#define QM_REG_WFQPFWEIGHT_RT_OFFSET 35407 +#define QM_REG_WFQPFWEIGHT_RT_SIZE 16 +#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 35423 +#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16 +#define QM_REG_WFQPFCRD_RT_OFFSET 35439 +#define QM_REG_WFQPFCRD_RT_SIZE 256 +#define QM_REG_WFQPFENABLE_RT_OFFSET 35695 +#define QM_REG_WFQVPENABLE_RT_OFFSET 35696 +#define QM_REG_BASEADDRTXPQ_RT_OFFSET 35697 +#define QM_REG_BASEADDRTXPQ_RT_SIZE 512 +#define QM_REG_TXPQMAP_RT_OFFSET 36209 +#define QM_REG_TXPQMAP_RT_SIZE 512 +#define QM_REG_WFQVPWEIGHT_RT_OFFSET 36721 +#define QM_REG_WFQVPWEIGHT_RT_SIZE 512 +#define QM_REG_WFQVPCRD_RT_OFFSET 37233 +#define QM_REG_WFQVPCRD_RT_SIZE 512 +#define QM_REG_WFQVPMAP_RT_OFFSET 37745 +#define QM_REG_WFQVPMAP_RT_SIZE 512 +#define QM_REG_PTRTBLTX_RT_OFFSET 38257 +#define QM_REG_PTRTBLTX_RT_SIZE 1024 +#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 39281 +#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320 +#define QM_REG_VOQCRDLINE_RT_OFFSET 39601 +#define QM_REG_VOQCRDLINE_RT_SIZE 36 +#define QM_REG_VOQINITCRDLINE_RT_OFFSET 39637 +#define QM_REG_VOQINITCRDLINE_RT_SIZE 36 +#define QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET 39673 +#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 39674 +#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 39675 +#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 39676 +#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 39677 +#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 39678 +#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 39679 +#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 39680 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 39681 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 39685 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 39689 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 39721 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 39737 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 39753 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 39769 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 +#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 39785 +#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 39786 +#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39787 +#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39795 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40819 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41331 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41843 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42355 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512 +#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42867 +#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32 +#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42899 +#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42900 +#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42901 +#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42902 +#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42903 +#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42904 +#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42905 +#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42906 +#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42907 +#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42908 +#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42909 +#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42910 +#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42911 +#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42912 +#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42913 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42914 +#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42915 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42916 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42917 +#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42918 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42919 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42920 +#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42921 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42922 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42923 +#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42924 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42925 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42926 +#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42927 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42928 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42929 +#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42930 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42931 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42932 +#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42933 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42934 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42935 +#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42936 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42937 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42938 +#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42939 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42940 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42941 +#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42942 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42943 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42944 +#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42945 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42946 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42947 +#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42948 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42949 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42950 +#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42951 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42952 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42953 +#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42954 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42955 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42956 +#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42957 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42958 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42959 +#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42960 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42961 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42962 +#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42963 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42964 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42965 +#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42966 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42967 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42968 +#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42969 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42970 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42971 +#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42972 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42973 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42974 +#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42975 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42976 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42977 +#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42978 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42979 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42980 +#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42981 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42982 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42983 +#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42984 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42985 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42986 +#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42987 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42988 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42989 +#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42990 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42991 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42992 +#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42993 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42994 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42995 +#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42996 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42997 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42998 +#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42999 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 43000 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43001 +#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43002 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43003 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43004 +#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43005 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43006 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43007 +#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43008 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43009 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43010 +#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43011 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43012 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43013 +#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43014 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43015 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43016 +#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43017 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43018 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43019 +#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43020 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43021 +#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43022 + +#define RUNTIME_ARRAY_SIZE 43023 + +/* Init Callbacks */ +#define DMAE_READY_CB 0 /* The eth storm context for the Tstorm */ struct tstorm_eth_conn_st_ctx { @@ -4436,219 +4935,219 @@ struct xstorm_eth_conn_st_ctx { __le32 reserved[60]; }; -struct xstorm_eth_conn_ag_ctx { +struct e4_xstorm_eth_conn_ag_ctx { u8 reserved0; - u8 eth_state; + u8 state; u8 flags0; -#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7 +#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7 u8 flags1; -#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3 +#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 +#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 +#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 u8 flags2; -#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6 u8 flags4; -#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 +#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 +#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 +#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 u8 flags7; -#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7 +#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5 +#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 +#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 u8 flags10; -#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7 +#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7 u8 flags11; -#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 +#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 +#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 +#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 +#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 +#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 +#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 u8 edpm_event_id; __le16 physical_q0; - __le16 ereserved1; + __le16 e5_reserved1; __le16 edpm_num_bds; __le16 tx_bd_cons; __le16 tx_bd_prod; @@ -4681,7 +5180,7 @@ struct xstorm_eth_conn_ag_ctx { u8 byte13; u8 byte14; u8 byte15; - u8 ereserved; + u8 e5_reserved; __le16 word11; __le32 reg10; __le32 reg11; @@ -4704,37 +5203,37 @@ struct ystorm_eth_conn_st_ctx { __le32 reserved[8]; }; -struct ystorm_eth_conn_ag_ctx { +struct e4_ystorm_eth_conn_ag_ctx { u8 byte0; u8 state; u8 flags0; -#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 -#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 -#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 -#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 -#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2 -#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 -#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4 -#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 -#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 +#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2 +#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 +#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4 +#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 -#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0 -#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1 -#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1 -#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 -#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 -#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 +#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0 +#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1 +#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1 +#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 tx_q0_int_coallecing_timeset; u8 byte3; __le16 word0; @@ -4748,89 +5247,89 @@ struct ystorm_eth_conn_ag_ctx { __le32 reg3; }; -struct tstorm_eth_conn_ag_ctx { +struct e4_tstorm_eth_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3 -#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5 -#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 -#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6 +#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2 +#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3 +#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4 +#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 -#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 -#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 -#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 -#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6 u8 flags2; -#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 -#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 -#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 -#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 -#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 -#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 -#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5 -#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6 -#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7 u8 flags4; -#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1 -#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3 -#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5 -#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6 -#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5 -#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; __le32 reg2; @@ -4852,63 +5351,63 @@ struct tstorm_eth_conn_ag_ctx { __le32 reg10; }; -struct ustorm_eth_conn_ag_ctx { +struct e4_ustorm_eth_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 -#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3 -#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2 -#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3 -#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4 -#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 -#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3 +#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2 +#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3 +#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4 +#define E4_USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 -#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0 -#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 -#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2 -#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 -#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4 -#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 -#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6 +#define E4_USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0 +#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 +#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2 +#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 +#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4 +#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 +#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6 u8 flags2; -#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0 -#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1 -#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4 -#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5 -#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6 -#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0 +#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1 +#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3 +#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4 +#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5 +#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -4932,20 +5431,21 @@ struct mstorm_eth_conn_st_ctx { }; /* eth connection context */ -struct eth_conn_context { +struct e4_eth_conn_context { struct tstorm_eth_conn_st_ctx tstorm_st_context; struct regpair tstorm_st_padding[2]; struct pstorm_eth_conn_st_ctx pstorm_st_context; struct xstorm_eth_conn_st_ctx xstorm_st_context; - struct xstorm_eth_conn_ag_ctx xstorm_ag_context; + struct e4_xstorm_eth_conn_ag_ctx xstorm_ag_context; struct ystorm_eth_conn_st_ctx ystorm_st_context; - struct ystorm_eth_conn_ag_ctx ystorm_ag_context; - struct tstorm_eth_conn_ag_ctx tstorm_ag_context; - struct ustorm_eth_conn_ag_ctx ustorm_ag_context; + struct e4_ystorm_eth_conn_ag_ctx ystorm_ag_context; + struct e4_tstorm_eth_conn_ag_ctx tstorm_ag_context; + struct e4_ustorm_eth_conn_ag_ctx ustorm_ag_context; struct ustorm_eth_conn_st_ctx ustorm_st_context; struct mstorm_eth_conn_st_ctx mstorm_st_context; }; +/* Ethernet filter types: mac/vlan/pair */ enum eth_error_code { ETH_OK = 0x00, ETH_FILTERS_MAC_ADD_FAIL_FULL, @@ -4972,6 +5472,7 @@ enum eth_error_code { MAX_ETH_ERROR_CODE }; +/* Opcodes for the event ring */ enum eth_event_opcode { ETH_EVENT_UNUSED, ETH_EVENT_VPORT_START, @@ -4983,13 +5484,14 @@ enum eth_event_opcode { ETH_EVENT_RX_QUEUE_UPDATE, ETH_EVENT_RX_QUEUE_STOP, ETH_EVENT_FILTERS_UPDATE, - ETH_EVENT_RESERVED, - ETH_EVENT_RESERVED2, - ETH_EVENT_RESERVED3, + ETH_EVENT_RX_ADD_OPENFLOW_FILTER, + ETH_EVENT_RX_DELETE_OPENFLOW_FILTER, + ETH_EVENT_RX_CREATE_OPENFLOW_ACTION, ETH_EVENT_RX_ADD_UDP_FILTER, ETH_EVENT_RX_DELETE_UDP_FILTER, - ETH_EVENT_RESERVED4, - ETH_EVENT_RESERVED5, + ETH_EVENT_RX_CREATE_GFT_ACTION, + ETH_EVENT_RX_GFT_UPDATE_FILTER, + ETH_EVENT_TX_QUEUE_UPDATE, MAX_ETH_EVENT_OPCODE }; @@ -5039,6 +5541,7 @@ enum eth_filter_type { MAX_ETH_FILTER_TYPE }; +/* Eth IPv4 Fragment Type */ enum eth_ipv4_frag_type { ETH_IPV4_NOT_FRAG, ETH_IPV4_FIRST_FRAG, @@ -5046,12 +5549,14 @@ enum eth_ipv4_frag_type { MAX_ETH_IPV4_FRAG_TYPE }; +/* eth IPv4 Fragment Type */ enum eth_ip_type { ETH_IPV4, ETH_IPV6, MAX_ETH_IP_TYPE }; +/* Ethernet Ramrod Command IDs */ enum eth_ramrod_cmd_id { ETH_RAMROD_UNUSED, ETH_RAMROD_VPORT_START, @@ -5070,10 +5575,11 @@ enum eth_ramrod_cmd_id { ETH_RAMROD_RX_DELETE_UDP_FILTER, ETH_RAMROD_RX_CREATE_GFT_ACTION, ETH_RAMROD_GFT_UPDATE_FILTER, + ETH_RAMROD_TX_QUEUE_UPDATE, MAX_ETH_RAMROD_CMD_ID }; -/* return code from eth sp ramrods */ +/* Return code from eth sp ramrods */ struct eth_return_code { u8 value; #define ETH_RETURN_CODE_ERR_CODE_MASK 0x1F @@ -5209,18 +5715,14 @@ struct eth_vport_tx_mode { __le16 reserved2[3]; }; +/* GFT filter update action type */ enum gft_filter_update_action { GFT_ADD_FILTER, GFT_DELETE_FILTER, MAX_GFT_FILTER_UPDATE_ACTION }; -enum gft_logic_filter_type { - GFT_FILTER_TYPE, - RFS_FILTER_TYPE, - MAX_GFT_LOGIC_FILTER_TYPE -}; - +/* Ramrod data for rx add openflow filter */ struct rx_add_openflow_filter_data { __le16 action_icid; u8 priority; @@ -5244,11 +5746,13 @@ struct rx_add_openflow_filter_data { __le16 l4_src_port; }; +/* Ramrod data for rx create gft action */ struct rx_create_gft_action_data { u8 vport_id; u8 reserved[7]; }; +/* Ramrod data for rx create openflow action */ struct rx_create_openflow_action_data { u8 vport_id; u8 reserved[7]; @@ -5286,7 +5790,7 @@ struct rx_queue_start_ramrod_data { struct regpair reserved2; }; -/* Ramrod data for rx queue start ramrod */ +/* Ramrod data for rx queue stop ramrod */ struct rx_queue_stop_ramrod_data { __le16 rx_queue_id; u8 complete_cqe_flg; @@ -5324,14 +5828,22 @@ struct rx_udp_filter_data { __le32 tenant_id; }; +/* Add or delete GFT filter - filter is packet header of type of packet wished + * to pass certain FW flow. + */ struct rx_update_gft_filter_data { struct regpair pkt_hdr_addr; __le16 pkt_hdr_length; - __le16 rx_qid_or_action_icid; - u8 vport_id; - u8 filter_type; + __le16 action_icid; + __le16 rx_qid; + __le16 flow_id; + __le16 vport_id; + u8 action_icid_valid; + u8 rx_qid_valid; + u8 flow_id_valid; u8 filter_action; u8 assert_on_error; + u8 reserved; }; /* Ramrod data for rx queue start ramrod */ @@ -5377,6 +5889,14 @@ struct tx_queue_stop_ramrod_data { __le16 reserved[4]; }; +/* Ramrod data for tx queue update ramrod */ +struct tx_queue_update_ramrod_data { + __le16 update_qm_pq_id_flg; + __le16 qm_pq_id; + __le32 reserved0; + struct regpair reserved1[5]; +}; + /* Ramrod data for vport update ramrod */ struct vport_filter_update_ramrod_data { struct eth_filter_cmd_header filter_cmd_hdr; @@ -5477,219 +5997,219 @@ struct vport_update_ramrod_data { struct eth_vport_rss_config rss_config; }; -struct xstorm_eth_conn_agctxdq_ext_ldpart { +struct e4_xstorm_eth_conn_ag_ctx_dq_ext_ldpart { u8 reserved0; - u8 eth_state; + u8 state; u8 flags0; -#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT 1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT 2 -#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT 4 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT 5 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT 6 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT 7 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT 1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT 5 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT 7 u8 flags1; -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT 0 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT 1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT 2 -#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT 3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT 4 -#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT 5 -#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT 6 -#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT 7 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT 1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT 3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED2_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED2_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED3_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED3_SHIFT 5 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT 7 u8 flags2; -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT 0 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT 2 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT 4 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT 6 u8 flags3; -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT 0 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT 2 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT 4 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT 6 u8 flags4; -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT 0 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT 2 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT 4 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT 6 u8 flags5; -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT 0 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT 2 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT 4 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT 6 u8 flags6; -#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0 -#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2 -#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT 4 -#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT 6 u8 flags7; -#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT 0 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT 2 -#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7 u8 flags8; -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT 5 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT 5 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7 u8 flags9; -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5 -#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT 6 -#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT 7 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT 7 u8 flags10; -#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT 0 -#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT 1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT 2 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT 3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4 -#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT 6 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT 7 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT 1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT 3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT 7 u8 flags11; -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT 0 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT 1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT 2 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT 1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7 u8 flags12; -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7 u8 flags13; -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7 u8 flags14; -#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT 0 -#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT 1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT 2 -#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT 3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT 4 -#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5 -#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT 1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT 3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6 u8 edpm_event_id; __le16 physical_q0; - __le16 ereserved1; + __le16 e5_reserved1; __le16 edpm_num_bds; __le16 tx_bd_cons; __le16 tx_bd_prod; @@ -5706,256 +6226,256 @@ struct xstorm_eth_conn_agctxdq_ext_ldpart { __le32 reg4; }; -struct mstorm_eth_conn_ag_ctx { +struct e4_mstorm_eth_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 -#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2 -#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 -#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4 -#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 -#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0 -#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1 -#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 -#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; __le32 reg1; }; -struct xstorm_eth_hw_conn_ag_ctx { +struct e4_xstorm_eth_hw_conn_ag_ctx { u8 reserved0; - u8 eth_state; + u8 state; u8 flags0; -#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7 u8 flags1; -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 u8 flags2; -#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6 u8 flags4; -#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 u8 flags7; -#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 u8 flags10; -#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7 u8 flags11; -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 u8 edpm_event_id; __le16 physical_q0; - __le16 ereserved1; + __le16 e5_reserved1; __le16 edpm_num_bds; __le16 tx_bd_cons; __le16 tx_bd_prod; @@ -5963,6 +6483,7 @@ struct xstorm_eth_hw_conn_ag_ctx { __le16 conn_dpi; }; +/* GFT CAM line struct */ struct gft_cam_line { __le32 camline; #define GFT_CAM_LINE_VALID_MASK 0x1 @@ -5975,6 +6496,7 @@ struct gft_cam_line { #define GFT_CAM_LINE_RESERVED1_SHIFT 29 }; +/* GFT CAM line struct with fields breakout */ struct gft_cam_line_mapped { __le32 camline; #define GFT_CAM_LINE_MAPPED_VALID_MASK 0x1 @@ -6008,28 +6530,31 @@ union gft_cam_line_union { struct gft_cam_line_mapped cam_line_mapped; }; +/* Used in gft_profile_key: Indication for ip version */ enum gft_profile_ip_version { GFT_PROFILE_IPV4 = 0, GFT_PROFILE_IPV6 = 1, MAX_GFT_PROFILE_IP_VERSION }; +/* Profile key stucr fot GFT logic in Prs */ struct gft_profile_key { __le16 profile_key; -#define GFT_PROFILE_KEY_IP_VERSION_MASK 0x1 -#define GFT_PROFILE_KEY_IP_VERSION_SHIFT 0 -#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK 0x1 -#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT 1 -#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK 0xF -#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT 2 -#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK 0xF -#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT 6 -#define GFT_PROFILE_KEY_PF_ID_MASK 0xF -#define GFT_PROFILE_KEY_PF_ID_SHIFT 10 -#define GFT_PROFILE_KEY_RESERVED0_MASK 0x3 -#define GFT_PROFILE_KEY_RESERVED0_SHIFT 14 -}; - +#define GFT_PROFILE_KEY_IP_VERSION_MASK 0x1 +#define GFT_PROFILE_KEY_IP_VERSION_SHIFT 0 +#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK 0x1 +#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT 1 +#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK 0xF +#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT 2 +#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK 0xF +#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT 6 +#define GFT_PROFILE_KEY_PF_ID_MASK 0xF +#define GFT_PROFILE_KEY_PF_ID_SHIFT 10 +#define GFT_PROFILE_KEY_RESERVED0_MASK 0x3 +#define GFT_PROFILE_KEY_RESERVED0_SHIFT 14 +}; + +/* Used in gft_profile_key: Indication for tunnel type */ enum gft_profile_tunnel_type { GFT_PROFILE_NO_TUNNEL = 0, GFT_PROFILE_VXLAN_TUNNEL = 1, @@ -6040,6 +6565,7 @@ enum gft_profile_tunnel_type { MAX_GFT_PROFILE_TUNNEL_TYPE }; +/* Used in gft_profile_key: Indication for protocol type */ enum gft_profile_upper_protocol_type { GFT_PROFILE_ROCE_PROTOCOL = 0, GFT_PROFILE_RROCE_PROTOCOL = 1, @@ -6060,6 +6586,7 @@ enum gft_profile_upper_protocol_type { MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE }; +/* GFT RAM line struct */ struct gft_ram_line { __le32 lo; #define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3 @@ -6149,6 +6676,7 @@ struct gft_ram_line { #define GFT_RAM_LINE_RESERVED1_SHIFT 10 }; +/* Used in the first 2 bits for gft_ram_line: Indication for vlan mask */ enum gft_vlan_select { INNER_PROVIDER_VLAN = 0, INNER_VLAN = 1, @@ -6157,10 +6685,205 @@ enum gft_vlan_select { MAX_GFT_VLAN_SELECT }; +/* The rdma task context of Mstorm */ +struct ystorm_rdma_task_st_ctx { + struct regpair temp[4]; +}; + +struct e4_ystorm_rdma_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 msem_ctx_upd_seq; + u8 flags0; +#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6 +#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7 + u8 flags1; +#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 +#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0 +#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 +#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2 +#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 +#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 +#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6 +#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0 +#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 key; + __le32 mw_cnt; + u8 ref_cnt_seq; + u8 ctx_upd_seq; + __le16 dif_flags; + __le16 tx_ref_count; + __le16 last_used_ltid; + __le16 parent_mr_lo; + __le16 parent_mr_hi; + __le32 fbo_lo; + __le32 fbo_hi; +}; + +struct e4_mstorm_rdma_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 icid; + u8 flags0; +#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6 +#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7 + u8 flags1; +#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 +#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0 +#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 +#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2 +#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 +#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4 +#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6 +#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0 +#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 key; + __le32 mw_cnt; + u8 ref_cnt_seq; + u8 ctx_upd_seq; + __le16 dif_flags; + __le16 tx_ref_count; + __le16 last_used_ltid; + __le16 parent_mr_lo; + __le16 parent_mr_hi; + __le32 fbo_lo; + __le32 fbo_hi; +}; + +/* The roce task context of Mstorm */ struct mstorm_rdma_task_st_ctx { struct regpair temp[4]; }; +/* The roce task context of Ustorm */ +struct ustorm_rdma_task_st_ctx { + struct regpair temp[2]; +}; + +struct e4_ustorm_rdma_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 icid; + u8 flags0; +#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT 5 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6 + u8 flags1; +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2 +#define E4_USTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3 +#define E4_USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 4 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 + u8 flags2; +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0 +#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1 +#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2 +#define E4_USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 3 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5 +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6 +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7 + u8 flags3; +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 0 +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1 +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 2 +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 + __le32 dif_err_intervals; + __le32 dif_error_1st_interval; + __le32 reg2; + __le32 dif_runt_value; + __le32 reg4; + __le32 reg5; +}; + +/* RDMA task context */ +struct e4_rdma_task_context { + struct ystorm_rdma_task_st_ctx ystorm_st_context; + struct e4_ystorm_rdma_task_ag_ctx ystorm_ag_context; + struct tdif_task_context tdif_context; + struct e4_mstorm_rdma_task_ag_ctx mstorm_ag_context; + struct mstorm_rdma_task_st_ctx mstorm_st_context; + struct rdif_task_context rdif_context; + struct ustorm_rdma_task_st_ctx ustorm_st_context; + struct regpair ustorm_st_padding[2]; + struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context; +}; + +/* rdma function init ramrod data */ struct rdma_close_func_ramrod_data { u8 cnq_start_offset; u8 num_cnqs; @@ -6169,6 +6892,7 @@ struct rdma_close_func_ramrod_data { u8 reserved[4]; }; +/* rdma function init CNQ parameters */ struct rdma_cnq_params { __le16 sb_num; u8 sb_index; @@ -6179,6 +6903,7 @@ struct rdma_cnq_params { u8 reserved1[6]; }; +/* rdma create cq ramrod data */ struct rdma_create_cq_ramrod_data { struct regpair cq_handle; struct regpair pbl_addr; @@ -6193,21 +6918,25 @@ struct rdma_create_cq_ramrod_data { __le16 reserved1; }; +/* rdma deregister tid ramrod data */ struct rdma_deregister_tid_ramrod_data { __le32 itid; __le32 reserved; }; +/* rdma destroy cq output params */ struct rdma_destroy_cq_output_params { __le16 cnq_num; __le16 reserved0; __le32 reserved1; }; +/* rdma destroy cq ramrod data */ struct rdma_destroy_cq_ramrod_data { struct regpair output_params_addr; }; +/* RDMA slow path EQ cmd IDs */ enum rdma_event_opcode { RDMA_EVENT_UNUSED, RDMA_EVENT_FUNC_INIT, @@ -6223,6 +6952,7 @@ enum rdma_event_opcode { MAX_RDMA_EVENT_OPCODE }; +/* RDMA FW return code for slow path ramrods */ enum rdma_fw_return_code { RDMA_RETURN_OK = 0, RDMA_RETURN_REGISTER_MR_BAD_STATE_ERR, @@ -6232,20 +6962,24 @@ enum rdma_fw_return_code { MAX_RDMA_FW_RETURN_CODE }; +/* rdma function init header */ struct rdma_init_func_hdr { u8 cnq_start_offset; u8 num_cnqs; u8 cq_ring_mode; u8 vf_id; u8 vf_valid; - u8 reserved[3]; + u8 relaxed_ordering; + u8 reserved[2]; }; +/* rdma function init ramrod data */ struct rdma_init_func_ramrod_data { struct rdma_init_func_hdr params_header; struct rdma_cnq_params cnq_params[NUM_OF_GLOBAL_QUEUES]; }; +/* RDMA ramrod command IDs */ enum rdma_ramrod_cmd_id { RDMA_RAMROD_UNUSED, RDMA_RAMROD_FUNC_INIT, @@ -6261,42 +6995,43 @@ enum rdma_ramrod_cmd_id { MAX_RDMA_RAMROD_CMD_ID }; +/* rdma register tid ramrod data */ struct rdma_register_tid_ramrod_data { __le16 flags; #define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_MASK 0x1F #define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_SHIFT 0 #define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_MASK 0x1 #define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_SHIFT 5 -#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 6 -#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT 7 -#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT 8 -#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 6 +#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT 7 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT 8 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK 0x1 #define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_SHIFT 9 #define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_MASK 0x1 #define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_SHIFT 10 -#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT 11 -#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 12 +#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT 11 +#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 12 #define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_MASK 0x1 #define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_SHIFT 13 -#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_MASK 0x3 -#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_SHIFT 14 +#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_MASK 0x3 +#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_SHIFT 14 u8 flags1; #define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_MASK 0x1F -#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT 0 -#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK 0x7 -#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT 5 +#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT 0 +#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK 0x7 +#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT 5 u8 flags2; -#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT 0 +#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT 0 #define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_MASK 0x1 #define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_SHIFT 1 -#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK 0x3F -#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT 2 +#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK 0x3F +#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT 2 u8 key; u8 length_hi; u8 vf_id; @@ -6313,19 +7048,21 @@ struct rdma_register_tid_ramrod_data { __le32 reserved4[2]; }; +/* rdma resize cq output params */ struct rdma_resize_cq_output_params { __le32 old_cq_cons; __le32 old_cq_prod; }; +/* rdma resize cq ramrod data */ struct rdma_resize_cq_ramrod_data { u8 flags; -#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK 0x1 -#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_SHIFT 0 -#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_MASK 0x1 -#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_SHIFT 1 -#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK 0x3F -#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT 2 +#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK 0x1 +#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_SHIFT 0 +#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_MASK 0x1 +#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_SHIFT 1 +#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK 0x3F +#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT 2 u8 pbl_log_page_size; __le16 pbl_num_pages; __le32 max_cqes; @@ -6333,10 +7070,12 @@ struct rdma_resize_cq_ramrod_data { struct regpair output_params_addr; }; +/* The rdma storm context of Mstorm */ struct rdma_srq_context { struct regpair temp[8]; }; +/* rdma create qp requester ramrod data */ struct rdma_srq_create_ramrod_data { struct regpair pbl_base_addr; __le16 pages_in_srq_pbl; @@ -6348,206 +7087,19 @@ struct rdma_srq_create_ramrod_data { struct regpair producers_addr; }; +/* rdma create qp requester ramrod data */ struct rdma_srq_destroy_ramrod_data { struct rdma_srq_id srq_id; __le32 reserved; }; +/* rdma create qp requester ramrod data */ struct rdma_srq_modify_ramrod_data { struct rdma_srq_id srq_id; __le32 wqe_limit; }; -struct ystorm_rdma_task_st_ctx { - struct regpair temp[4]; -}; - -struct ystorm_rdma_task_ag_ctx { - u8 reserved; - u8 byte1; - __le16 msem_ctx_upd_seq; - u8 flags0; -#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 -#define YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 -#define YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1 -#define YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6 -#define YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 -#define YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7 - u8 flags1; -#define YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 -#define YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0 -#define YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 -#define YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2 -#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 -#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 -#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 -#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6 -#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 -#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7 - u8 flags2; -#define YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 -#define YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0 -#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6 -#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 - u8 key; - __le32 mw_cnt; - u8 ref_cnt_seq; - u8 ctx_upd_seq; - __le16 dif_flags; - __le16 tx_ref_count; - __le16 last_used_ltid; - __le16 parent_mr_lo; - __le16 parent_mr_hi; - __le32 fbo_lo; - __le32 fbo_hi; -}; - -struct mstorm_rdma_task_ag_ctx { - u8 reserved; - u8 byte1; - __le16 icid; - u8 flags0; -#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 -#define MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 -#define MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 -#define MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6 -#define MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 -#define MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7 - u8 flags1; -#define MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 -#define MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0 -#define MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 -#define MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2 -#define MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 -#define MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4 -#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 -#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6 -#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7 - u8 flags2; -#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0 -#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6 -#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 - u8 key; - __le32 mw_cnt; - u8 ref_cnt_seq; - u8 ctx_upd_seq; - __le16 dif_flags; - __le16 tx_ref_count; - __le16 last_used_ltid; - __le16 parent_mr_lo; - __le16 parent_mr_hi; - __le32 fbo_lo; - __le32 fbo_hi; -}; - -struct ustorm_rdma_task_st_ctx { - struct regpair temp[2]; -}; - -struct ustorm_rdma_task_ag_ctx { - u8 reserved; - u8 byte1; - __le16 icid; - u8 flags0; -#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK 0x1 -#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT 5 -#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3 -#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6 - u8 flags1; -#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3 -#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0 -#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3 -#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2 -#define USTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3 -#define USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 4 -#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 -#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 - u8 flags2; -#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1 -#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0 -#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1 -#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1 -#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1 -#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2 -#define USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1 -#define USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 -#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 -#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5 -#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6 -#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7 - u8 flags3; -#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 0 -#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1 -#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 2 -#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3 -#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF -#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 - __le32 dif_err_intervals; - __le32 dif_error_1st_interval; - __le32 reg2; - __le32 dif_runt_value; - __le32 reg4; - __le32 reg5; -}; - -struct rdma_task_context { - struct ystorm_rdma_task_st_ctx ystorm_st_context; - struct ystorm_rdma_task_ag_ctx ystorm_ag_context; - struct tdif_task_context tdif_context; - struct mstorm_rdma_task_ag_ctx mstorm_ag_context; - struct mstorm_rdma_task_st_ctx mstorm_st_context; - struct rdif_task_context rdif_context; - struct ustorm_rdma_task_st_ctx ustorm_st_context; - struct regpair ustorm_st_padding[2]; - struct ustorm_rdma_task_ag_ctx ustorm_ag_context; -}; - +/* RDMA Tid type enumeration (for register_tid ramrod) */ enum rdma_tid_type { RDMA_TID_REGISTERED_MR, RDMA_TID_FMR, @@ -6556,214 +7108,214 @@ enum rdma_tid_type { MAX_RDMA_TID_TYPE }; -struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part { +struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part { u8 reserved0; u8 state; u8 flags0; -#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT 1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT 5 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT 6 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT 7 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT 7 u8 flags1; -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT 1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_SHIFT 5 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6 -#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7 u8 flags2; -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT 6 u8 flags3; -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT 6 u8 flags4; -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT 6 u8 flags5; -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT 6 u8 flags6; -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT 6 u8 flags7; -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7 u8 flags8; -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT 5 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7 u8 flags9; -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT 6 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT 7 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT 7 u8 flags10; -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT 1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT 3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT 5 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT 6 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT 7 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT 7 u8 flags11; -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT 1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7 u8 flags12; -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7 u8 flags13; -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7 u8 flags14; -#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT 1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 word1; @@ -6783,126 +7335,126 @@ struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part { __le32 reg4; }; -struct mstorm_rdma_conn_ag_ctx { +struct e4_mstorm_rdma_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define MSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1 -#define MSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0 -#define MSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 -#define MSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 -#define MSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 -#define MSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2 -#define MSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 -#define MSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 -#define MSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 -#define MSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 -#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0 -#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 -#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 -#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; __le32 reg1; }; -struct tstorm_rdma_conn_ag_ctx { +struct e4_tstorm_rdma_conn_ag_ctx { u8 reserved0; u8 byte1; u8 flags0; -#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define TSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 -#define TSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2 -#define TSTORM_RDMA_CONN_AG_CTX_BIT3_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_BIT3_SHIFT 3 -#define TSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4 -#define TSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5 -#define TSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 6 +#define E4_TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2 +#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT3_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT3_SHIFT 3 +#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4 +#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define TSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 0 -#define TSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 2 -#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 -#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 0 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 2 +#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 +#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 +#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags2; -#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 -#define TSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 2 -#define TSTORM_RDMA_CONN_AG_CTX_CF7_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_CF7_SHIFT 4 -#define TSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 6 +#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 +#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 2 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7_MASK 0x3 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7_SHIFT 4 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define TSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 0 -#define TSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 2 -#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 4 -#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 5 -#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 6 -#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 0 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 2 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 4 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 5 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 6 +#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 u8 flags4; -#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 -#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1 -#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 2 -#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_SHIFT 3 -#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 4 -#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 5 -#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 6 -#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 2 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7EN_SHIFT 3 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 4 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 5 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 6 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; __le32 reg2; @@ -6924,73 +7476,73 @@ struct tstorm_rdma_conn_ag_ctx { __le32 reg10; }; -struct tstorm_rdma_task_ag_ctx { +struct e4_tstorm_rdma_task_ag_ctx { u8 byte0; u8 byte1; __le16 word0; u8 flags0; -#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF -#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0 -#define TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4 -#define TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 -#define TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6 -#define TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7 +#define E4_TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define E4_TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4 +#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6 +#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7 u8 flags1; -#define TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0 -#define TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1 -#define TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 -#define TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2 -#define TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 -#define TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4 -#define TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 -#define TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6 +#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0 +#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6 u8 flags2; -#define TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3 -#define TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0 -#define TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3 -#define TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2 -#define TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3 -#define TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4 -#define TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3 -#define TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6 u8 flags3; -#define TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3 -#define TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0 -#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2 -#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3 -#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4 -#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5 -#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6 -#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7 u8 flags4; -#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0 -#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1 -#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2 -#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3 -#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4 -#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5 -#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6 -#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2 +#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3 +#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4 +#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5 +#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6 +#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7 u8 byte2; __le16 word1; __le32 reg0; @@ -7003,63 +7555,63 @@ struct tstorm_rdma_task_ag_ctx { __le32 reg2; }; -struct ustorm_rdma_conn_ag_ctx { +struct e4_ustorm_rdma_conn_ag_ctx { u8 reserved; u8 byte1; u8 flags0; -#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define USTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 -#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2 -#define USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 -#define USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 -#define USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 -#define USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_USTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3 -#define USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0 -#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3 -#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2 -#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3 -#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4 -#define USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 -#define USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0 +#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3 +#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2 +#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3 +#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 -#define USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 -#define USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4 -#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5 -#define USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6 -#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7 +#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3 +#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4 +#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6 +#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7 u8 flags3; -#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0 -#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0 +#define E4_USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define E4_USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 conn_dpi; @@ -7072,214 +7624,214 @@ struct ustorm_rdma_conn_ag_ctx { __le16 word3; }; -struct xstorm_rdma_conn_ag_ctx { +struct e4_xstorm_rdma_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define XSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2 -#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define XSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5 -#define XSTORM_RDMA_CONN_AG_CTX_BIT6_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT6_SHIFT 6 -#define XSTORM_RDMA_CONN_AG_CTX_BIT7_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT7_SHIFT 7 +#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2 +#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT6_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT6_SHIFT 6 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT7_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT7_SHIFT 7 u8 flags1; -#define XSTORM_RDMA_CONN_AG_CTX_BIT8_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT8_SHIFT 0 -#define XSTORM_RDMA_CONN_AG_CTX_BIT9_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT9_SHIFT 1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT10_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT10_SHIFT 2 -#define XSTORM_RDMA_CONN_AG_CTX_BIT11_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT 3 -#define XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 5 -#define XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT 6 -#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT8_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT8_SHIFT 0 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT9_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT9_SHIFT 1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT10_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT10_SHIFT 2 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT11_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT 3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT 4 +#define E4_XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 5 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT 6 +#define E4_XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 u8 flags2; -#define XSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 0 -#define XSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 2 -#define XSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 6 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 0 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 2 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 4 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define XSTORM_RDMA_CONN_AG_CTX_CF4_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF4_SHIFT 0 -#define XSTORM_RDMA_CONN_AG_CTX_CF5_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF5_SHIFT 2 -#define XSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4_SHIFT 0 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5_SHIFT 2 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 4 +#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags4; -#define XSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 0 -#define XSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 2 -#define XSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_CF11_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF11_SHIFT 6 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 0 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 2 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 4 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define XSTORM_RDMA_CONN_AG_CTX_CF12_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF12_SHIFT 0 -#define XSTORM_RDMA_CONN_AG_CTX_CF13_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF13_SHIFT 2 -#define XSTORM_RDMA_CONN_AG_CTX_CF14_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF14_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_CF15_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF15_SHIFT 6 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12_SHIFT 0 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13_SHIFT 2 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14_SHIFT 4 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define XSTORM_RDMA_CONN_AG_CTX_CF16_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF16_SHIFT 0 -#define XSTORM_RDMA_CONN_AG_CTX_CF17_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF17_SHIFT 2 -#define XSTORM_RDMA_CONN_AG_CTX_CF18_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF18_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_CF19_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF19_SHIFT 6 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16_SHIFT 0 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17_SHIFT 2 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18_SHIFT 4 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19_SHIFT 6 u8 flags7; -#define XSTORM_RDMA_CONN_AG_CTX_CF20_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF20_SHIFT 0 -#define XSTORM_RDMA_CONN_AG_CTX_CF21_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF21_SHIFT 2 -#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 6 -#define XSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 7 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20_SHIFT 0 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21_SHIFT 2 +#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 6 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define XSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 0 -#define XSTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 1 -#define XSTORM_RDMA_CONN_AG_CTX_CF4EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF4EN_SHIFT 2 -#define XSTORM_RDMA_CONN_AG_CTX_CF5EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF5EN_SHIFT 3 -#define XSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 -#define XSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 6 -#define XSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 7 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 0 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4EN_SHIFT 2 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5EN_SHIFT 3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 4 +#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 6 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define XSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 0 -#define XSTORM_RDMA_CONN_AG_CTX_CF11EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF11EN_SHIFT 1 -#define XSTORM_RDMA_CONN_AG_CTX_CF12EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF12EN_SHIFT 2 -#define XSTORM_RDMA_CONN_AG_CTX_CF13EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF13EN_SHIFT 3 -#define XSTORM_RDMA_CONN_AG_CTX_CF14EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF14EN_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_CF15EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF15EN_SHIFT 5 -#define XSTORM_RDMA_CONN_AG_CTX_CF16EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF16EN_SHIFT 6 -#define XSTORM_RDMA_CONN_AG_CTX_CF17EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF17EN_SHIFT 7 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 0 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11EN_SHIFT 1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12EN_SHIFT 2 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13EN_SHIFT 3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14EN_SHIFT 4 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15EN_SHIFT 5 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16EN_SHIFT 6 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define XSTORM_RDMA_CONN_AG_CTX_CF18EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF18EN_SHIFT 0 -#define XSTORM_RDMA_CONN_AG_CTX_CF19EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF19EN_SHIFT 1 -#define XSTORM_RDMA_CONN_AG_CTX_CF20EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF20EN_SHIFT 2 -#define XSTORM_RDMA_CONN_AG_CTX_CF21EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF21EN_SHIFT 3 -#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_CF23EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF23EN_SHIFT 5 -#define XSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 6 -#define XSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 7 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18EN_SHIFT 0 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19EN_SHIFT 1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20EN_SHIFT 2 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21EN_SHIFT 3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23EN_SHIFT 5 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 7 u8 flags11; -#define XSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 0 -#define XSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 2 -#define XSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define XSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define XSTORM_RDMA_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 0 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 2 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define XSTORM_RDMA_CONN_AG_CTX_RULE10EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE10EN_SHIFT 0 -#define XSTORM_RDMA_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define XSTORM_RDMA_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define XSTORM_RDMA_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define XSTORM_RDMA_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define XSTORM_RDMA_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define XSTORM_RDMA_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define XSTORM_RDMA_CONN_AG_CTX_MIGRATION_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_MIGRATION_SHIFT 0 -#define XSTORM_RDMA_CONN_AG_CTX_BIT17_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT17_SHIFT 1 -#define XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2 -#define XSTORM_RDMA_CONN_AG_CTX_RESERVED_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RESERVED_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 -#define XSTORM_RDMA_CONN_AG_CTX_CF23_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF23_SHIFT 6 +#define E4_XSTORM_RDMA_CONN_AG_CTX_MIGRATION_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_MIGRATION_SHIFT 0 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT17_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT17_SHIFT 1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RESERVED_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RESERVED_SHIFT 4 +#define E4_XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 word1; @@ -7301,37 +7853,37 @@ struct xstorm_rdma_conn_ag_ctx { __le32 reg6; }; -struct ystorm_rdma_conn_ag_ctx { +struct e4_ystorm_rdma_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define YSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1 -#define YSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0 -#define YSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 -#define YSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 -#define YSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 -#define YSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2 -#define YSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 -#define YSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 -#define YSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 -#define YSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define YSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 -#define YSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0 -#define YSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 -#define YSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 -#define YSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 -#define YSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 -#define YSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define YSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define YSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define YSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define YSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define YSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define YSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define YSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define YSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define YSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -7345,62 +7897,70 @@ struct ystorm_rdma_conn_ag_ctx { __le32 reg3; }; -struct mstorm_roce_conn_st_ctx { - struct regpair temp[6]; +/* The roce storm context of Ystorm */ +struct ystorm_roce_conn_st_ctx { + struct regpair temp[2]; }; +/* The roce storm context of Mstorm */ struct pstorm_roce_conn_st_ctx { struct regpair temp[16]; }; -struct ystorm_roce_conn_st_ctx { - struct regpair temp[2]; -}; - +/* The roce storm context of Xstorm */ struct xstorm_roce_conn_st_ctx { struct regpair temp[24]; }; +/* The roce storm context of Tstorm */ struct tstorm_roce_conn_st_ctx { struct regpair temp[30]; }; +/* The roce storm context of Mstorm */ +struct mstorm_roce_conn_st_ctx { + struct regpair temp[6]; +}; + +/* The roce storm context of Ystorm */ struct ustorm_roce_conn_st_ctx { struct regpair temp[12]; }; -struct roce_conn_context { +/* roce connection context */ +struct e4_roce_conn_context { struct ystorm_roce_conn_st_ctx ystorm_st_context; struct regpair ystorm_st_padding[2]; struct pstorm_roce_conn_st_ctx pstorm_st_context; struct xstorm_roce_conn_st_ctx xstorm_st_context; struct regpair xstorm_st_padding[2]; - struct xstorm_rdma_conn_ag_ctx xstorm_ag_context; - struct tstorm_rdma_conn_ag_ctx tstorm_ag_context; + struct e4_xstorm_rdma_conn_ag_ctx xstorm_ag_context; + struct e4_tstorm_rdma_conn_ag_ctx tstorm_ag_context; struct timers_context timer_context; - struct ustorm_rdma_conn_ag_ctx ustorm_ag_context; + struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context; struct tstorm_roce_conn_st_ctx tstorm_st_context; struct mstorm_roce_conn_st_ctx mstorm_st_context; struct ustorm_roce_conn_st_ctx ustorm_st_context; struct regpair ustorm_st_padding[2]; }; +/* roce create qp requester ramrod data */ struct roce_create_qp_req_ramrod_data { __le16 flags; -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3 -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0 -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1 -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 2 -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_MASK 0x1 -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_SHIFT 3 -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK 0x7 -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT 4 -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x1 -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 7 -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 8 -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 12 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 2 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_MASK 0x1 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_SHIFT 3 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK 0x7 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT 4 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x1 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 7 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 8 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 12 u8 max_ord; u8 traffic_class; u8 hop_limit; @@ -7431,26 +7991,27 @@ struct roce_create_qp_req_ramrod_data { __le16 dpi; }; +/* roce create qp responder ramrod data */ struct roce_create_qp_resp_ramrod_data { __le16 flags; -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 4 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_MASK 0x1 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT 5 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK 0x1 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT 6 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_MASK 0x1 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_SHIFT 7 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK 0x7 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT 8 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 11 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 4 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT 5 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT 6 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_SHIFT 7 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK 0x7 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT 8 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 11 u8 max_ird; u8 traffic_class; u8 hop_limit; @@ -7482,24 +8043,40 @@ struct roce_create_qp_resp_ramrod_data { __le16 dpi; }; +/* roce DCQCN received statistics */ +struct roce_dcqcn_received_stats { + struct regpair ecn_pkt_rcv; + struct regpair cnp_pkt_rcv; +}; + +/* roce DCQCN sent statistics */ +struct roce_dcqcn_sent_stats { + struct regpair cnp_pkt_sent; +}; + +/* RoCE destroy qp requester output params */ struct roce_destroy_qp_req_output_params { __le32 num_bound_mw; __le32 cq_prod; }; +/* RoCE destroy qp requester ramrod data */ struct roce_destroy_qp_req_ramrod_data { struct regpair output_params_addr; }; +/* RoCE destroy qp responder output params */ struct roce_destroy_qp_resp_output_params { __le32 num_invalidated_mw; __le32 cq_prod; }; +/* RoCE destroy qp responder ramrod data */ struct roce_destroy_qp_resp_ramrod_data { struct regpair output_params_addr; }; +/* roce special events statistics */ struct roce_events_stats { __le16 silent_drops; __le16 rnr_naks_sent; @@ -7508,6 +8085,7 @@ struct roce_events_stats { __le32 reserved; }; +/* ROCE slow path EQ cmd IDs */ enum roce_event_opcode { ROCE_EVENT_CREATE_QP = 11, ROCE_EVENT_MODIFY_QP, @@ -7518,6 +8096,7 @@ enum roce_event_opcode { MAX_ROCE_EVENT_OPCODE }; +/* roce func init ramrod data */ struct roce_init_func_params { u8 ll2_queue_id; u8 cnp_vlan_priority; @@ -7526,42 +8105,46 @@ struct roce_init_func_params { __le32 cnp_send_timeout; }; +/* roce func init ramrod data */ struct roce_init_func_ramrod_data { struct rdma_init_func_ramrod_data rdma; struct roce_init_func_params roce; }; +/* roce modify qp requester ramrod data */ struct roce_modify_qp_req_ramrod_data { __le16 flags; -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_SHIFT 1 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_MASK 0x1 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_SHIFT 2 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_SHIFT 3 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 4 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_SHIFT 5 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_SHIFT 6 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_SHIFT 7 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_SHIFT 8 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT 9 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK 0x7 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x7 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 13 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_SHIFT 1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_SHIFT 2 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_SHIFT 3 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 4 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_SHIFT 5 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_SHIFT 6 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_SHIFT 7 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_SHIFT 8 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT 9 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK 0x7 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUES_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUES_FLG_SHIFT 13 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x3 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 14 u8 fields; -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 0 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 4 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 0 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 4 u8 max_ord; u8 traffic_class; u8 hop_limit; @@ -7570,66 +8153,76 @@ struct roce_modify_qp_req_ramrod_data { __le32 ack_timeout_val; __le16 mtu; __le16 reserved2; - __le32 reserved3[3]; + __le32 reserved3[2]; + __le16 low_latency_phy_queue; + __le16 regular_latency_phy_queue; __le32 src_gid[4]; __le32 dst_gid[4]; }; +/* roce modify qp responder ramrod data */ struct roce_modify_qp_resp_ramrod_data { __le16 flags; -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 1 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 2 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 3 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_SHIFT 4 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 5 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_SHIFT 6 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_SHIFT 7 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT 8 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0x3F -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 10 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 2 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 3 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_SHIFT 4 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 5 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_SHIFT 6 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_SHIFT 7 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT 8 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUES_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUES_FLG_SHIFT 10 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0x1F +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 11 u8 fields; -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK 0x7 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT 0 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 3 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK 0x7 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT 0 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 3 u8 max_ird; u8 traffic_class; u8 hop_limit; __le16 p_key; __le32 flow_label; __le16 mtu; - __le16 reserved2; + __le16 low_latency_phy_queue; + __le16 regular_latency_phy_queue; + u8 reserved2[6]; __le32 src_gid[4]; __le32 dst_gid[4]; }; +/* RoCE query qp requester output params */ struct roce_query_qp_req_output_params { __le32 psn; __le32 flags; -#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK 0x1 -#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT 0 -#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_MASK 0x1 -#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_SHIFT 1 -#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK 0x3FFFFFFF -#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 2 +#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK 0x1 +#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT 0 +#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_MASK 0x1 +#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_SHIFT 1 +#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK 0x3FFFFFFF +#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 2 }; +/* RoCE query qp requester ramrod data */ struct roce_query_qp_req_ramrod_data { struct regpair output_params_addr; }; +/* RoCE query qp responder output params */ struct roce_query_qp_resp_output_params { __le32 psn; __le32 err_flag; @@ -7639,10 +8232,12 @@ struct roce_query_qp_resp_output_params { #define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_SHIFT 1 }; +/* RoCE query qp responder ramrod data */ struct roce_query_qp_resp_ramrod_data { struct regpair output_params_addr; }; +/* ROCE ramrod command IDs */ enum roce_ramrod_cmd_id { ROCE_RAMROD_CREATE_QP = 11, ROCE_RAMROD_MODIFY_QP, @@ -7653,163 +8248,163 @@ enum roce_ramrod_cmd_id { MAX_ROCE_RAMROD_CMD_ID }; -struct mstorm_roce_req_conn_ag_ctx { +struct e4_mstorm_roce_req_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; __le32 reg1; }; -struct mstorm_roce_resp_conn_ag_ctx { +struct e4_mstorm_roce_resp_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; __le32 reg1; }; -struct tstorm_roce_req_conn_ag_ctx { +struct e4_tstorm_roce_req_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_SHIFT 1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_SHIFT 2 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_SHIFT 1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_SHIFT 2 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6 u8 flags1; -#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 0 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 0 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags2; -#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6 u8 flags3; -#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 5 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 5 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 u8 flags4; -#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 snd_nxt_psn; __le32 snd_max_psn; @@ -7825,95 +8420,95 @@ struct tstorm_roce_req_conn_ag_ctx { u8 byte4; u8 byte5; __le16 snd_sq_cons; - __le16 word2; + __le16 conn_dpi; __le16 word3; __le32 reg9; __le32 reg10; }; -struct tstorm_roce_resp_conn_ag_ctx { +struct e4_tstorm_roce_resp_conn_ag_ctx { u8 byte0; u8 state; u8 flags0; -#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags2; -#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 5 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 5 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7 u8 flags4; -#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 psn_and_rxmit_id_echo; __le32 reg1; __le32 reg2; @@ -7935,63 +8530,63 @@ struct tstorm_roce_resp_conn_ag_ctx { __le32 reg10; }; -struct ustorm_roce_req_conn_ag_ctx { +struct e4_ustorm_roce_req_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 -#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -8004,63 +8599,63 @@ struct ustorm_roce_req_conn_ag_ctx { __le16 word3; }; -struct ustorm_roce_resp_conn_ag_ctx { +struct e4_ustorm_roce_resp_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 -#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -8073,214 +8668,214 @@ struct ustorm_roce_resp_conn_ag_ctx { __le16 word3; }; -struct xstorm_roce_req_conn_ag_ctx { +struct e4_xstorm_roce_req_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7 u8 flags1; -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_SHIFT 4 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_SHIFT 5 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_SHIFT 5 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 u8 flags2; -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags4; -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_SHIFT 0 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_SHIFT 2 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6 u8 flags7; -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_SHIFT 6 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_SHIFT 7 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7 u8 flags11; -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7 u8 flags13; -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 word1; @@ -8302,224 +8897,224 @@ struct xstorm_roce_req_conn_ag_ctx { __le32 orq_cons; }; -struct xstorm_roce_resp_conn_ag_ctx { +struct e4_xstorm_roce_resp_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7 u8 flags1; -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_SHIFT 4 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_SHIFT 5 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_SHIFT 5 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 u8 flags2; -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags4; -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6 u8 flags7; -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7 u8 flags11; -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_SHIFT 0 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 0 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6 u8 byte2; __le16 physical_q0; - __le16 word1; - __le16 irq_prod; - __le16 word3; - __le16 word4; - __le16 ereserved1; + __le16 irq_prod_shadow; + __le16 word2; __le16 irq_cons; + __le16 irq_prod; + __le16 e5_reserved1; + __le16 conn_dpi; u8 rxmit_opcode; u8 byte4; u8 byte5; @@ -8533,37 +9128,37 @@ struct xstorm_roce_resp_conn_ag_ctx { __le32 msn_and_syndrome; }; -struct ystorm_roce_req_conn_ag_ctx { +struct e4_ystorm_roce_req_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -8577,37 +9172,37 @@ struct ystorm_roce_req_conn_ag_ctx { __le32 reg3; }; -struct ystorm_roce_resp_conn_ag_ctx { +struct e4_ystorm_roce_resp_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -8621,6 +9216,7 @@ struct ystorm_roce_resp_conn_ag_ctx { __le32 reg3; }; +/* Roce doorbell data */ enum roce_flavor { PLAIN_ROCE, RROCE_IPV4, @@ -8628,228 +9224,231 @@ enum roce_flavor { MAX_ROCE_FLAVOR }; +/* The iwarp storm context of Ystorm */ struct ystorm_iwarp_conn_st_ctx { __le32 reserved[4]; }; +/* The iwarp storm context of Pstorm */ struct pstorm_iwarp_conn_st_ctx { __le32 reserved[36]; }; +/* The iwarp storm context of Xstorm */ struct xstorm_iwarp_conn_st_ctx { __le32 reserved[44]; }; -struct xstorm_iwarp_conn_ag_ctx { +struct e4_xstorm_iwarp_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 -#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2 -#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4 -#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5 -#define XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6 -#define XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7 +#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2 +#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7 u8 flags1; -#define XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0 -#define XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1 -#define XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2 -#define XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3 -#define XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4 -#define XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5 -#define XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6 -#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7 u8 flags2; -#define XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0 -#define XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2 -#define XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4 -#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4 +#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 u8 flags3; -#define XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0 -#define XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2 -#define XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4 -#define XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6 u8 flags4; -#define XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0 -#define XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2 -#define XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4 -#define XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0 -#define XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2 -#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4 -#define XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2 +#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0 -#define XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2 -#define XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4 -#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4 +#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 u8 flags7; -#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2 -#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6 -#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7 +#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2 +#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0 -#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 -#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2 -#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3 -#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4 -#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5 -#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6 -#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0 +#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0 -#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1 -#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2 -#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3 -#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4 -#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5 -#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6 -#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5 +#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0 -#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 -#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3 -#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define XSTORM_IWARP_CONN_AG_CTX_CF23EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF23EN_SHIFT 5 -#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6 -#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0 +#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23EN_SHIFT 5 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7 u8 flags11; -#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0 -#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1 -#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2 -#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define E4_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0 -#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4 -#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0 -#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1 -#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2 -#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3 -#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5 -#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define E4_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0 +#define E4_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5 +#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0 -#define XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1 -#define XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2 -#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3 -#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 -#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 -#define XSTORM_IWARP_CONN_AG_CTX_CF23_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF23_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2 +#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 +#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 physical_q1; @@ -8897,89 +9496,89 @@ struct xstorm_iwarp_conn_ag_ctx { __le32 reg17; }; -struct tstorm_iwarp_conn_ag_ctx { +struct e4_tstorm_iwarp_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 -#define TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2 -#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 3 -#define TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4 -#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5 -#define TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 -#define TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6 +#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2 +#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 3 +#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3 -#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0 -#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3 -#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2 -#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 -#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 -#define TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 -#define TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0 +#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3 +#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2 +#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6 u8 flags2; -#define TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 -#define TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0 -#define TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 -#define TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2 -#define TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 -#define TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4 -#define TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 -#define TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3 -#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2 -#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4 -#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5 -#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6 -#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 +#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_MASK 0x3 +#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_SHIFT 0 +#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3 +#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5 +#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6 +#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 u8 flags4; -#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0 -#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1 -#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2 -#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3 -#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4 -#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5 -#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6 -#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4 +#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_SHIFT 5 +#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5 -#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; __le32 unaligned_nxt_seq; @@ -9001,51 +9600,56 @@ struct tstorm_iwarp_conn_ag_ctx { __le32 last_hq_sequence; }; +/* The iwarp storm context of Tstorm */ struct tstorm_iwarp_conn_st_ctx { __le32 reserved[60]; }; +/* The iwarp storm context of Mstorm */ struct mstorm_iwarp_conn_st_ctx { __le32 reserved[32]; }; +/* The iwarp storm context of Ustorm */ struct ustorm_iwarp_conn_st_ctx { __le32 reserved[24]; }; -struct iwarp_conn_context { +/* iwarp connection context */ +struct e4_iwarp_conn_context { struct ystorm_iwarp_conn_st_ctx ystorm_st_context; struct regpair ystorm_st_padding[2]; struct pstorm_iwarp_conn_st_ctx pstorm_st_context; struct regpair pstorm_st_padding[2]; struct xstorm_iwarp_conn_st_ctx xstorm_st_context; struct regpair xstorm_st_padding[2]; - struct xstorm_iwarp_conn_ag_ctx xstorm_ag_context; - struct tstorm_iwarp_conn_ag_ctx tstorm_ag_context; + struct e4_xstorm_iwarp_conn_ag_ctx xstorm_ag_context; + struct e4_tstorm_iwarp_conn_ag_ctx tstorm_ag_context; struct timers_context timer_context; - struct ustorm_rdma_conn_ag_ctx ustorm_ag_context; + struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context; struct tstorm_iwarp_conn_st_ctx tstorm_st_context; struct regpair tstorm_st_padding[2]; struct mstorm_iwarp_conn_st_ctx mstorm_st_context; struct ustorm_iwarp_conn_st_ctx ustorm_st_context; }; +/* iWARP create QP params passed by driver to FW in CreateQP Request Ramrod */ struct iwarp_create_qp_ramrod_data { u8 flags; #define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1 -#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 0 -#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_MASK 0x1 -#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_SHIFT 1 -#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1 -#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2 -#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1 -#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3 -#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1 -#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 4 -#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_MASK 0x1 -#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_SHIFT 5 -#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_MASK 0x3 -#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_SHIFT 6 +#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 0 +#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_SHIFT 1 +#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2 +#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3 +#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 4 +#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_SHIFT 5 +#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_MASK 0x3 +#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_SHIFT 6 u8 reserved1; __le16 pd; __le16 sq_num_pages; @@ -9061,6 +9665,7 @@ struct iwarp_create_qp_ramrod_data { u8 reserved2[6]; }; +/* iWARP completion queue types */ enum iwarp_eqe_async_opcode { IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE, IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED, @@ -9083,6 +9688,7 @@ struct iwarp_eqe_data_tcp_async_completion { u8 reserved[5]; }; +/* iWARP completion queue types */ enum iwarp_eqe_sync_opcode { IWARP_EVENT_TYPE_TCP_OFFLOAD = 11, @@ -9095,6 +9701,7 @@ enum iwarp_eqe_sync_opcode { MAX_IWARP_EQE_SYNC_OPCODE }; +/* iWARP EQE completion status */ enum iwarp_fw_return_code { IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET = 5, IWARP_CONN_ERROR_TCP_CONNECTION_RST, @@ -9125,54 +9732,60 @@ enum iwarp_fw_return_code { MAX_IWARP_FW_RETURN_CODE }; +/* unaligned opaque data received from LL2 */ struct iwarp_init_func_params { u8 ll2_ooo_q_index; u8 reserved1[7]; }; +/* iwarp func init ramrod data */ struct iwarp_init_func_ramrod_data { struct rdma_init_func_ramrod_data rdma; struct tcp_init_params tcp; struct iwarp_init_func_params iwarp; }; +/* iWARP QP - possible states to transition to */ enum iwarp_modify_qp_new_state_type { IWARP_MODIFY_QP_STATE_CLOSING = 1, - IWARP_MODIFY_QP_STATE_ERROR = - 2, + IWARP_MODIFY_QP_STATE_ERROR = 2, MAX_IWARP_MODIFY_QP_NEW_STATE_TYPE }; +/* iwarp modify qp responder ramrod data */ struct iwarp_modify_qp_ramrod_data { __le16 transition_to_state; __le16 flags; -#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1 -#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 0 -#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1 -#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 1 -#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1 -#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 2 -#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_MASK 0x1 +#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1 +#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 0 +#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1 +#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 1 +#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1 +#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 2 +#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_MASK 0x1 #define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_SHIFT 3 #define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1 -#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 4 -#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_MASK 0x7FF -#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_SHIFT 5 +#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 4 +#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_MASK 0x7FF +#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_SHIFT 5 __le32 reserved3[3]; __le32 reserved4[8]; }; +/* MPA params for Enhanced mode */ struct mpa_rq_params { __le32 ird; __le32 ord; }; +/* MPA host Address-Len for private data */ struct mpa_ulp_buffer { struct regpair addr; __le16 len; __le16 reserved[3]; }; +/* iWARP MPA offload params common to Basic and Enhanced modes */ struct mpa_outgoing_params { u8 crc_needed; u8 reject; @@ -9181,6 +9794,9 @@ struct mpa_outgoing_params { struct mpa_ulp_buffer outgoing_ulp_buffer; }; +/* iWARP MPA offload params passed by driver to FW in MPA Offload Request + * Ramrod. + */ struct iwarp_mpa_offload_ramrod_data { struct mpa_outgoing_params common; __le32 tcp_cid; @@ -9188,18 +9804,20 @@ struct iwarp_mpa_offload_ramrod_data { u8 tcp_connect_side; u8 rtr_pref; #define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_MASK 0x7 -#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_SHIFT 0 -#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_MASK 0x1F -#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_SHIFT 3 +#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_SHIFT 0 +#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_MASK 0x1F +#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_SHIFT 3 u8 reserved2; struct mpa_ulp_buffer incoming_ulp_buffer; struct regpair async_eqe_output_buf; struct regpair handle_for_async; struct regpair shared_queue_addr; + __le16 rcv_wnd; u8 stats_counter_id; - u8 reserved3[15]; + u8 reserved3[13]; }; +/* iWARP TCP connection offload params passed by driver to FW */ struct iwarp_offload_params { struct mpa_ulp_buffer incoming_ulp_buffer; struct regpair async_eqe_output_buf; @@ -9211,22 +9829,24 @@ struct iwarp_offload_params { u8 reserved[10]; }; +/* iWARP query QP output params */ struct iwarp_query_qp_output_params { __le32 flags; #define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_MASK 0x1 -#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0 +#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0 #define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF -#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_SHIFT 1 +#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_SHIFT 1 u8 reserved1[4]; }; +/* iWARP query QP ramrod data */ struct iwarp_query_qp_ramrod_data { struct regpair output_params_addr; }; +/* iWARP Ramrod Command IDs */ enum iwarp_ramrod_cmd_id { - IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = - 11, + IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = 11, IWARP_RAMROD_CMD_ID_MPA_OFFLOAD, IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR, IWARP_RAMROD_CMD_ID_CREATE_QP, @@ -9236,22 +9856,28 @@ enum iwarp_ramrod_cmd_id { MAX_IWARP_RAMROD_CMD_ID }; +/* Per PF iWARP retransmit path statistics */ struct iwarp_rxmit_stats_drv { struct regpair tx_go_to_slow_start_event_cnt; struct regpair tx_fast_retransmit_event_cnt; }; +/* iWARP and TCP connection offload params passed by driver to FW in iWARP + * offload ramrod. + */ struct iwarp_tcp_offload_ramrod_data { struct iwarp_offload_params iwarp; struct tcp_offload_params_opt2 tcp; }; +/* iWARP MPA negotiation types */ enum mpa_negotiation_mode { MPA_NEGOTIATION_TYPE_BASIC = 1, MPA_NEGOTIATION_TYPE_ENHANCED = 2, MAX_MPA_NEGOTIATION_MODE }; +/* iWARP MPA Enhanced mode RTR types */ enum mpa_rtr_type { MPA_RTR_TYPE_NONE = 0, MPA_RTR_TYPE_ZERO_SEND = 1, @@ -9264,113 +9890,114 @@ enum mpa_rtr_type { MAX_MPA_RTR_TYPE }; +/* unaligned opaque data received from LL2 */ struct unaligned_opaque_data { __le16 first_mpa_offset; u8 tcp_payload_offset; u8 flags; #define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_MASK 0x1 -#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_SHIFT 0 -#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_MASK 0x1 -#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_SHIFT 1 -#define UNALIGNED_OPAQUE_DATA_RESERVED_MASK 0x3F -#define UNALIGNED_OPAQUE_DATA_RESERVED_SHIFT 2 +#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_SHIFT 0 +#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_MASK 0x1 +#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_SHIFT 1 +#define UNALIGNED_OPAQUE_DATA_RESERVED_MASK 0x3F +#define UNALIGNED_OPAQUE_DATA_RESERVED_SHIFT 2 __le32 cid; }; -struct mstorm_iwarp_conn_ag_ctx { +struct e4_mstorm_iwarp_conn_ag_ctx { u8 reserved; u8 state; u8 flags0; -#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 -#define MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 -#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3 -#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2 -#define MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 -#define MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 -#define MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 -#define MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3 +#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2 +#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1 -#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0 -#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 -#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 -#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1 -#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6 -#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1 +#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0 +#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1 +#define E4_MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6 +#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 rcq_cons; __le16 rcq_cons_th; __le32 reg0; __le32 reg1; }; -struct ustorm_iwarp_conn_ag_ctx { +struct e4_ustorm_iwarp_conn_ag_ctx { u8 reserved; u8 byte1; u8 flags0; -#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 -#define USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 -#define USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2 -#define USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 -#define USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 -#define USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 -#define USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3 -#define USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0 -#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3 -#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2 -#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3 -#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4 -#define USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 -#define USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0 +#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3 +#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2 +#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3 +#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0 -#define USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 -#define USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4 -#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5 -#define USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6 -#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3 +#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4 +#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6 +#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7 u8 flags3; -#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0 -#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0 +#define E4_USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define E4_USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -9383,37 +10010,37 @@ struct ustorm_iwarp_conn_ag_ctx { __le16 word3; }; -struct ystorm_iwarp_conn_ag_ctx { +struct e4_ystorm_iwarp_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1 -#define YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0 -#define YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 -#define YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 -#define YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 -#define YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2 -#define YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 -#define YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 -#define YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 -#define YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0 -#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 -#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 -#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -9427,6 +10054,7 @@ struct ystorm_iwarp_conn_ag_ctx { __le32 reg3; }; +/* The fcoe storm context of Ystorm */ struct ystorm_fcoe_conn_st_ctx { u8 func_mode; u8 cos; @@ -9442,45 +10070,49 @@ struct ystorm_fcoe_conn_st_ctx { struct regpair reserved; __le16 min_frame_size; u8 protection_info_flags; -#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1 -#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 0 -#define YSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1 -#define YSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 1 -#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_MASK 0x3F -#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_SHIFT 2 +#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1 +#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 0 +#define YSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1 +#define YSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 1 +#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_MASK 0x3F +#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_SHIFT 2 u8 dst_protection_per_mss; u8 src_protection_per_mss; u8 ptu_log_page_size; u8 flags; -#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1 -#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 0 -#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1 -#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 1 -#define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x3F -#define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 2 +#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1 +#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 0 +#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1 +#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 1 +#define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x3F +#define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 2 u8 fcp_xfer_size; }; +/* FCoE 16-bits vlan structure */ struct fcoe_vlan_fields { __le16 fields; -#define FCOE_VLAN_FIELDS_VID_MASK 0xFFF -#define FCOE_VLAN_FIELDS_VID_SHIFT 0 -#define FCOE_VLAN_FIELDS_CLI_MASK 0x1 -#define FCOE_VLAN_FIELDS_CLI_SHIFT 12 -#define FCOE_VLAN_FIELDS_PRI_MASK 0x7 -#define FCOE_VLAN_FIELDS_PRI_SHIFT 13 +#define FCOE_VLAN_FIELDS_VID_MASK 0xFFF +#define FCOE_VLAN_FIELDS_VID_SHIFT 0 +#define FCOE_VLAN_FIELDS_CLI_MASK 0x1 +#define FCOE_VLAN_FIELDS_CLI_SHIFT 12 +#define FCOE_VLAN_FIELDS_PRI_MASK 0x7 +#define FCOE_VLAN_FIELDS_PRI_SHIFT 13 }; +/* FCoE 16-bits vlan union */ union fcoe_vlan_field_union { struct fcoe_vlan_fields fields; __le16 val; }; +/* FCoE 16-bits vlan, vif union */ union fcoe_vlan_vif_field_union { union fcoe_vlan_field_union vlan; __le16 vif; }; +/* Ethernet context section */ struct pstorm_fcoe_eth_context_section { u8 remote_addr_3; u8 remote_addr_2; @@ -9500,6 +10132,7 @@ struct pstorm_fcoe_eth_context_section { __le16 inner_eth_type; }; +/* The fcoe storm context of Pstorm */ struct pstorm_fcoe_conn_st_ctx { u8 func_mode; u8 cos; @@ -9513,16 +10146,18 @@ struct pstorm_fcoe_conn_st_ctx { u8 sid_1; u8 sid_0; u8 flags; -#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_MASK 0x1 -#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_SHIFT 0 -#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_MASK 0x1 -#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_SHIFT 1 -#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1 -#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 2 -#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1 -#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 3 -#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0xF -#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 4 +#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_MASK 0x1 +#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_SHIFT 0 +#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_MASK 0x1 +#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_SHIFT 1 +#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1 +#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 2 +#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1 +#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 3 +#define PSTORM_FCOE_CONN_ST_CTX_SINGLE_VLAN_FLAG_MASK 0x1 +#define PSTORM_FCOE_CONN_ST_CTX_SINGLE_VLAN_FLAG_SHIFT 4 +#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0x7 +#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 5 u8 did_2; u8 did_1; u8 did_0; @@ -9532,6 +10167,7 @@ struct pstorm_fcoe_conn_st_ctx { u8 reserved1; }; +/* The fcoe storm context of Xstorm */ struct xstorm_fcoe_conn_st_ctx { u8 func_mode; u8 src_mac_index; @@ -9539,16 +10175,16 @@ struct xstorm_fcoe_conn_st_ctx { u8 cached_wqes_avail; __le16 stat_ram_addr; u8 flags; -#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_MASK 0x1 -#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_SHIFT 0 -#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1 -#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 1 -#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_MASK 0x1 -#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_SHIFT 2 -#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_MASK 0x3 -#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_SHIFT 3 -#define XSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x7 -#define XSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 5 +#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_SHIFT 0 +#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 1 +#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_SHIFT 2 +#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_MASK 0x3 +#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_SHIFT 3 +#define XSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x7 +#define XSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 5 u8 cached_wqes_offset; u8 reserved2; u8 eth_hdr_size; @@ -9574,18 +10210,18 @@ struct xstorm_fcoe_conn_st_ctx { u8 fcp_cmd_byte_credit; u8 fcp_rsp_byte_credit; __le16 protection_info; -#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_MASK 0x1 -#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_SHIFT 0 -#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1 -#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 1 -#define XSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1 -#define XSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 2 -#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_MASK 0x1 -#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_SHIFT 3 -#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_MASK 0xF -#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_SHIFT 4 -#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_MASK 0xFF -#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_SHIFT 8 +#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_SHIFT 0 +#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 1 +#define XSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 2 +#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_SHIFT 3 +#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_MASK 0xF +#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_SHIFT 4 +#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_MASK 0xFF +#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_SHIFT 8 __le16 xferq_pbl_next_index; __le16 page_size; u8 mid_seq; @@ -9594,216 +10230,216 @@ struct xstorm_fcoe_conn_st_ctx { struct fcoe_wqe cached_wqes[16]; }; -struct xstorm_fcoe_conn_ag_ctx { +struct e4_xstorm_fcoe_conn_ag_ctx { u8 reserved0; - u8 fcoe_state; + u8 state; u8 flags0; -#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT 7 +#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT 7 u8 flags1; -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT 2 -#define XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT 3 -#define XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT 4 -#define XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT 5 -#define XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT 6 -#define XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT 7 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT 3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT 4 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT 5 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT 6 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT 7 u8 flags2; -#define XSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 0 -#define XSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 2 -#define XSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 4 -#define XSTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 6 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 0 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 2 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 4 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define XSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 0 -#define XSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 2 -#define XSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 4 -#define XSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 6 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 0 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 2 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 4 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 6 u8 flags4; -#define XSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 0 -#define XSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 2 -#define XSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 4 -#define XSTORM_FCOE_CONN_AG_CTX_CF11_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT 6 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 0 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 2 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 4 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define XSTORM_FCOE_CONN_AG_CTX_CF12_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT 0 -#define XSTORM_FCOE_CONN_AG_CTX_CF13_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT 2 -#define XSTORM_FCOE_CONN_AG_CTX_CF14_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT 4 -#define XSTORM_FCOE_CONN_AG_CTX_CF15_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT 6 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT 0 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT 2 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT 4 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define XSTORM_FCOE_CONN_AG_CTX_CF16_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT 0 -#define XSTORM_FCOE_CONN_AG_CTX_CF17_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT 2 -#define XSTORM_FCOE_CONN_AG_CTX_CF18_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT 4 -#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT 0 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT 2 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT 4 +#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6 u8 flags7; -#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT 2 -#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 6 -#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 7 +#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 6 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0 -#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1 -#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2 -#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3 -#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4 -#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5 -#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6 -#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 0 -#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT 1 -#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT 2 -#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT 3 -#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT 4 -#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT 5 -#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT 6 -#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT 7 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 0 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT 1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT 2 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT 3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT 4 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT 5 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT 6 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT 0 -#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 1 -#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT 3 -#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT 5 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT 6 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT 7 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT 0 +#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT 5 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT 7 u8 flags11; -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT 0 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT 1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT 2 -#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT 2 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define E4_XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7 u8 flags12; -#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT 0 -#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define E4_XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT 0 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0 -#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0 -#define XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1 -#define XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2 -#define XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3 -#define XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4 -#define XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5 -#define XSTORM_FCOE_CONN_AG_CTX_CF23_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT 6 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 word1; @@ -9831,6 +10467,7 @@ struct xstorm_fcoe_conn_ag_ctx { __le32 reg8; }; +/* The fcoe storm context of Ustorm */ struct ustorm_fcoe_conn_st_ctx { struct regpair respq_pbl_addr; __le16 num_pages_in_pbl; @@ -9840,150 +10477,150 @@ struct ustorm_fcoe_conn_st_ctx { u8 reserved[2]; }; -struct tstorm_fcoe_conn_ag_ctx { +struct e4_tstorm_fcoe_conn_ag_ctx { u8 reserved0; - u8 fcoe_state; + u8 state; u8 flags0; -#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 -#define TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2 -#define TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3 -#define TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4 -#define TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5 -#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3 -#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6 +#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2 +#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3 +#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4 +#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5 +#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3 +#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6 u8 flags1; -#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0 -#define TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 -#define TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2 -#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 -#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 -#define TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 -#define TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6 +#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2 +#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 +#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6 u8 flags2; -#define TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 -#define TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0 -#define TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 -#define TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2 -#define TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3 -#define TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4 -#define TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3 -#define TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3 -#define TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0 -#define TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3 -#define TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2 -#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4 -#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 -#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6 -#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2 +#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4 +#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 u8 flags4; -#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0 -#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1 -#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2 -#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3 -#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4 -#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5 -#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6 -#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; }; -struct ustorm_fcoe_conn_ag_ctx { +struct e4_ustorm_fcoe_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 -#define USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 -#define USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 -#define USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 -#define USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 -#define USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 -#define USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 -#define USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 -#define USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0 -#define USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 -#define USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2 -#define USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 -#define USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4 -#define USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 -#define USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4 -#define USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5 -#define USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6 -#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -9996,72 +10633,76 @@ struct ustorm_fcoe_conn_ag_ctx { __le16 word3; }; +/* The fcoe storm context of Tstorm */ struct tstorm_fcoe_conn_st_ctx { __le16 stat_ram_addr; __le16 rx_max_fc_payload_len; __le16 e_d_tov_val; u8 flags; -#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_MASK 0x1 -#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_SHIFT 0 -#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_MASK 0x1 -#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_SHIFT 1 -#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_MASK 0x3F -#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_SHIFT 2 +#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_MASK 0x1 +#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_SHIFT 0 +#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_MASK 0x1 +#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_SHIFT 1 +#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_MASK 0x3F +#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_SHIFT 2 u8 timers_cleanup_invocation_cnt; __le32 reserved1[2]; - __le32 dst_mac_address_bytes0to3; - __le16 dst_mac_address_bytes4to5; + __le32 dst_mac_address_bytes_0_to_3; + __le16 dst_mac_address_bytes_4_to_5; __le16 ramrod_echo; u8 flags1; -#define TSTORM_FCOE_CONN_ST_CTX_MODE_MASK 0x3 -#define TSTORM_FCOE_CONN_ST_CTX_MODE_SHIFT 0 -#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0x3F -#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 2 - u8 q_relative_offset; +#define TSTORM_FCOE_CONN_ST_CTX_MODE_MASK 0x3 +#define TSTORM_FCOE_CONN_ST_CTX_MODE_SHIFT 0 +#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0x3F +#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 2 + u8 cq_relative_offset; + u8 cmdq_relative_offset; u8 bdq_resource_id; - u8 reserved0[5]; + u8 reserved0[4]; }; -struct mstorm_fcoe_conn_ag_ctx { +struct e4_mstorm_fcoe_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 -#define MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 -#define MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 -#define MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 -#define MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 -#define MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 -#define MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 -#define MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 -#define MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 -#define MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; __le32 reg1; }; +/* Fast path part of the fcoe storm context of Mstorm */ struct fcoe_mstorm_fcoe_conn_st_ctx_fp { __le16 xfer_prod; - __le16 reserved1; + u8 num_cqs; + u8 reserved1; u8 protection_info; #define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_MASK 0x1 #define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_SHIFT 0 @@ -10073,6 +10714,7 @@ struct fcoe_mstorm_fcoe_conn_st_ctx_fp { u8 reserved2[2]; }; +/* Non fast path part of the fcoe storm context of Mstorm */ struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp { __le16 conn_id; __le16 stat_ram_addr; @@ -10088,37 +10730,46 @@ struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp { struct regpair reserved2[3]; }; +/* The fcoe storm context of Mstorm */ struct mstorm_fcoe_conn_st_ctx { struct fcoe_mstorm_fcoe_conn_st_ctx_fp fp; struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp non_fp; }; -struct fcoe_conn_context { +/* fcoe connection context */ +struct e4_fcoe_conn_context { struct ystorm_fcoe_conn_st_ctx ystorm_st_context; struct pstorm_fcoe_conn_st_ctx pstorm_st_context; struct regpair pstorm_st_padding[2]; struct xstorm_fcoe_conn_st_ctx xstorm_st_context; - struct xstorm_fcoe_conn_ag_ctx xstorm_ag_context; + struct e4_xstorm_fcoe_conn_ag_ctx xstorm_ag_context; struct regpair xstorm_ag_padding[6]; struct ustorm_fcoe_conn_st_ctx ustorm_st_context; struct regpair ustorm_st_padding[2]; - struct tstorm_fcoe_conn_ag_ctx tstorm_ag_context; + struct e4_tstorm_fcoe_conn_ag_ctx tstorm_ag_context; struct regpair tstorm_ag_padding[2]; struct timers_context timer_context; - struct ustorm_fcoe_conn_ag_ctx ustorm_ag_context; + struct e4_ustorm_fcoe_conn_ag_ctx ustorm_ag_context; struct tstorm_fcoe_conn_st_ctx tstorm_st_context; - struct mstorm_fcoe_conn_ag_ctx mstorm_ag_context; + struct e4_mstorm_fcoe_conn_ag_ctx mstorm_ag_context; struct mstorm_fcoe_conn_st_ctx mstorm_st_context; }; +/* FCoE connection offload params passed by driver to FW in FCoE offload + * ramrod. + */ struct fcoe_conn_offload_ramrod_params { struct fcoe_conn_offload_ramrod_data offload_ramrod_data; }; +/* FCoE connection terminate params passed by driver to FW in FCoE terminate + * conn ramrod. + */ struct fcoe_conn_terminate_ramrod_params { struct fcoe_conn_terminate_ramrod_data terminate_ramrod_data; }; +/* FCoE event type */ enum fcoe_event_type { FCOE_EVENT_INIT_FUNC, FCOE_EVENT_DESTROY_FUNC, @@ -10129,10 +10780,12 @@ enum fcoe_event_type { MAX_FCOE_EVENT_TYPE }; +/* FCoE init params passed by driver to FW in FCoE init ramrod */ struct fcoe_init_ramrod_params { struct fcoe_init_func_ramrod_data init_ramrod_data; }; +/* FCoE ramrod Command IDs */ enum fcoe_ramrod_cmd_id { FCOE_RAMROD_CMD_ID_INIT_FUNC, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, @@ -10142,41 +10795,44 @@ enum fcoe_ramrod_cmd_id { MAX_FCOE_RAMROD_CMD_ID }; +/* FCoE statistics params buffer passed by driver to FW in FCoE statistics + * ramrod. + */ struct fcoe_stat_ramrod_params { struct fcoe_stat_ramrod_data stat_ramrod_data; }; -struct ystorm_fcoe_conn_ag_ctx { +struct e4_ystorm_fcoe_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 -#define YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 -#define YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 -#define YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 -#define YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 -#define YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 -#define YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 -#define YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 -#define YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 -#define YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -10190,230 +10846,233 @@ struct ystorm_fcoe_conn_ag_ctx { __le32 reg3; }; +/* The iscsi storm connection context of Ystorm */ struct ystorm_iscsi_conn_st_ctx { - __le32 reserved[4]; + __le32 reserved[8]; }; +/* Combined iSCSI and TCP storm connection of Pstorm */ struct pstorm_iscsi_tcp_conn_st_ctx { __le32 tcp[32]; __le32 iscsi[4]; }; +/* The combined tcp and iscsi storm context of Xstorm */ struct xstorm_iscsi_tcp_conn_st_ctx { - __le32 reserved_iscsi[40]; __le32 reserved_tcp[4]; + __le32 reserved_iscsi[44]; }; -struct xstorm_iscsi_conn_ag_ctx { +struct e4_xstorm_iscsi_conn_ag_ctx { u8 cdu_validation; u8 state; u8 flags0; -#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 -#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4 -#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7 u8 flags1; -#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6 -#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7 u8 flags2; -#define XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4 -#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 u8 flags3; -#define XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4 -#define XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6 u8 flags4; -#define XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4 -#define XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4 -#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6 u8 flags6; -#define XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4 -#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 u8 flags7; -#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6 -#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4 -#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5 -#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6 -#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4 -#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5 -#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6 -#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 -#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3 -#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6 -#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7 u8 flags11; -#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1 -#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4 -#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5 -#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 physical_q1; @@ -10449,7 +11108,7 @@ struct xstorm_iscsi_conn_ag_ctx { u8 byte13; u8 byte14; u8 byte15; - u8 ereserved; + u8 e5_reserved; __le16 word11; __le32 reg10; __le32 reg11; @@ -10461,89 +11120,89 @@ struct xstorm_iscsi_conn_ag_ctx { __le32 reg17; }; -struct tstorm_iscsi_conn_ag_ctx { +struct e4_tstorm_iscsi_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 -#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2 -#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3 -#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4 -#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5 -#define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 -#define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3 -#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0 -#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3 -#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2 -#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 -#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 -#define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 -#define TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6 u8 flags2; -#define TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 -#define TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0 -#define TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 -#define TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2 -#define TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3 -#define TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4 -#define TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3 -#define TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define TSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3 -#define TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 2 -#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4 -#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5 -#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6 -#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 2 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 u8 flags4; -#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0 -#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1 -#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2 -#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3 -#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4 -#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5 -#define TSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 6 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 6 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; __le32 reg2; @@ -10558,63 +11217,63 @@ struct tstorm_iscsi_conn_ag_ctx { __le16 word0; }; -struct ustorm_iscsi_conn_ag_ctx { +struct e4_ustorm_iscsi_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 -#define USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 -#define USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 -#define USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 -#define USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 -#define USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 -#define USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 -#define USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3 -#define USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0 -#define USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 -#define USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2 -#define USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 -#define USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4 -#define USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 -#define USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 -#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 -#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4 -#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5 -#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6 -#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -10627,113 +11286,117 @@ struct ustorm_iscsi_conn_ag_ctx { __le16 word3; }; +/* The iscsi storm connection context of Tstorm */ struct tstorm_iscsi_conn_st_ctx { - __le32 reserved[40]; + __le32 reserved[44]; }; -struct mstorm_iscsi_conn_ag_ctx { +struct e4_mstorm_iscsi_conn_ag_ctx { u8 reserved; u8 state; u8 flags0; -#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 -#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 -#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 -#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 -#define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 -#define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 -#define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 -#define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 -#define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 -#define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 -#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 -#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 -#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 -#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; __le32 reg1; }; +/* Combined iSCSI and TCP storm connection of Mstorm */ struct mstorm_iscsi_tcp_conn_st_ctx { __le32 reserved_tcp[20]; - __le32 reserved_iscsi[8]; + __le32 reserved_iscsi[12]; }; +/* The iscsi storm context of Ustorm */ struct ustorm_iscsi_conn_st_ctx { __le32 reserved[52]; }; -struct iscsi_conn_context { +/* iscsi connection context */ +struct e4_iscsi_conn_context { struct ystorm_iscsi_conn_st_ctx ystorm_st_context; - struct regpair ystorm_st_padding[2]; struct pstorm_iscsi_tcp_conn_st_ctx pstorm_st_context; struct regpair pstorm_st_padding[2]; struct pb_context xpb2_context; struct xstorm_iscsi_tcp_conn_st_ctx xstorm_st_context; struct regpair xstorm_st_padding[2]; - struct xstorm_iscsi_conn_ag_ctx xstorm_ag_context; - struct tstorm_iscsi_conn_ag_ctx tstorm_ag_context; + struct e4_xstorm_iscsi_conn_ag_ctx xstorm_ag_context; + struct e4_tstorm_iscsi_conn_ag_ctx tstorm_ag_context; struct regpair tstorm_ag_padding[2]; struct timers_context timer_context; - struct ustorm_iscsi_conn_ag_ctx ustorm_ag_context; + struct e4_ustorm_iscsi_conn_ag_ctx ustorm_ag_context; struct pb_context upb_context; struct tstorm_iscsi_conn_st_ctx tstorm_st_context; struct regpair tstorm_st_padding[2]; - struct mstorm_iscsi_conn_ag_ctx mstorm_ag_context; + struct e4_mstorm_iscsi_conn_ag_ctx mstorm_ag_context; struct mstorm_iscsi_tcp_conn_st_ctx mstorm_st_context; struct ustorm_iscsi_conn_st_ctx ustorm_st_context; }; +/* iSCSI init params passed by driver to FW in iSCSI init ramrod */ struct iscsi_init_ramrod_params { struct iscsi_spe_func_init iscsi_init_spe; struct tcp_init_params tcp_init; }; -struct ystorm_iscsi_conn_ag_ctx { +struct e4_ystorm_iscsi_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 -#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 -#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 -#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 -#define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 -#define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 -#define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 -#define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 -#define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 -#define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 -#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 -#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 -#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 -#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 -#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 -#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -11613,7 +12276,7 @@ struct public_drv_mb { #define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF #define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3 -#define DRV_MB_PARAM_NVM_LEN_SHIFT 24 +#define DRV_MB_PARAM_NVM_LEN_OFFSET 24 #define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0 #define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index a05feb38c6ee..fca2dbd93ad9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -807,3 +807,71 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn, return rc; } +int qed_dmae_sanity(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, const char *phase) +{ + u32 size = PAGE_SIZE / 2, val; + struct qed_dmae_params params; + int rc = 0; + dma_addr_t p_phys; + void *p_virt; + u32 *p_tmp; + + p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + 2 * size, &p_phys, GFP_KERNEL); + if (!p_virt) { + DP_NOTICE(p_hwfn, + "DMAE sanity [%s]: failed to allocate memory\n", + phase); + return -ENOMEM; + } + + /* Fill the bottom half of the allocated memory with a known pattern */ + for (p_tmp = (u32 *)p_virt; + p_tmp < (u32 *)((u8 *)p_virt + size); p_tmp++) { + /* Save the address itself as the value */ + val = (u32)(uintptr_t)p_tmp; + *p_tmp = val; + } + + /* Zero the top half of the allocated memory */ + memset((u8 *)p_virt + size, 0, size); + + DP_VERBOSE(p_hwfn, + QED_MSG_SP, + "DMAE sanity [%s]: src_addr={phys 0x%llx, virt %p}, dst_addr={phys 0x%llx, virt %p}, size 0x%x\n", + phase, + (u64)p_phys, + p_virt, (u64)(p_phys + size), (u8 *)p_virt + size, size); + + memset(¶ms, 0, sizeof(params)); + rc = qed_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size, + size / 4 /* size_in_dwords */, ¶ms); + if (rc) { + DP_NOTICE(p_hwfn, + "DMAE sanity [%s]: qed_dmae_host2host() failed. rc = %d.\n", + phase, rc); + goto out; + } + + /* Verify that the top half of the allocated memory has the pattern */ + for (p_tmp = (u32 *)((u8 *)p_virt + size); + p_tmp < (u32 *)((u8 *)p_virt + (2 * size)); p_tmp++) { + /* The corresponding address in the bottom half */ + val = (u32)(uintptr_t)p_tmp - size; + + if (*p_tmp != val) { + DP_NOTICE(p_hwfn, + "DMAE sanity [%s]: addr={phys 0x%llx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n", + phase, + (u64)p_phys + ((u8 *)p_tmp - (u8 *)p_virt), + p_tmp, *p_tmp, val); + rc = -EINVAL; + goto out; + } + } + +out: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, 2 * size, p_virt, p_phys); + return rc; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h index f2505c691c26..8db2839a8ec8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h @@ -299,4 +299,8 @@ union qed_qm_pq_params { int qed_init_fw_data(struct qed_dev *cdev, const u8 *fw_data); + +int qed_dmae_sanity(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, const char *phase); + #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index b069ad088269..18fb5062a83d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -31,6 +31,7 @@ */ #include <linux/types.h> +#include <linux/crc8.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/slab.h> @@ -40,102 +41,197 @@ #include "qed_init_ops.h" #include "qed_reg_addr.h" +#define CDU_VALIDATION_DEFAULT_CFG 61 + +static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = { + {400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */ + {528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */ + {608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */ +}; + +static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { + {240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */ +}; + /* General constants */ #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \ QM_PQ_ELEMENT_SIZE, \ 0x1000) : 0) #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \ 0x100) - 1 : 0) -#define QM_INVALID_PQ_ID 0xffff +#define QM_INVALID_PQ_ID 0xffff + /* Feature enable */ -#define QM_BYPASS_EN 1 -#define QM_BYTE_CRD_EN 1 +#define QM_BYPASS_EN 1 +#define QM_BYTE_CRD_EN 1 + /* Other PQ constants */ -#define QM_OTHER_PQS_PER_PF 4 +#define QM_OTHER_PQS_PER_PF 4 + /* WFQ constants */ -#define QM_WFQ_UPPER_BOUND 62500000 -#define QM_WFQ_VP_PQ_VOQ_SHIFT 0 -#define QM_WFQ_VP_PQ_PF_SHIFT 5 -#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000) -#define QM_WFQ_MAX_INC_VAL 43750000 + +/* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */ +#define QM_WFQ_UPPER_BOUND 62500000 + +/* Bit of VOQ in WFQ VP PQ map */ +#define QM_WFQ_VP_PQ_VOQ_SHIFT 0 + +/* Bit of PF in WFQ VP PQ map */ +#define QM_WFQ_VP_PQ_PF_E4_SHIFT 5 + +/* 0x9000 = 4*9*1024 */ +#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000) + +/* Max WFQ increment value is 0.7 * upper bound */ +#define QM_WFQ_MAX_INC_VAL ((QM_WFQ_UPPER_BOUND * 7) / 10) /* RL constants */ -#define QM_RL_UPPER_BOUND 62500000 -#define QM_RL_PERIOD 5 /* in us */ -#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD) -#define QM_RL_MAX_INC_VAL 43750000 -#define QM_RL_INC_VAL(rate) max_t(u32, \ - (u32)(((rate ? rate : \ - 1000000) * \ - QM_RL_PERIOD * \ - 101) / (8 * 100)), 1) + +/* Period in us */ +#define QM_RL_PERIOD 5 + +/* Period in 25MHz cycles */ +#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD) + +/* RL increment value - rate is specified in mbps */ +#define QM_RL_INC_VAL(rate) ({ \ + typeof(rate) __rate = (rate); \ + max_t(u32, \ + (u32)(((__rate ? __rate : 1000000) * QM_RL_PERIOD * 101) / \ + (8 * 100)), \ + 1); }) + +/* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */ +#define QM_PF_RL_UPPER_BOUND 62500000 + +/* Max PF RL increment value is 0.7 * upper bound */ +#define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10) + +/* Vport RL Upper bound, link speed is in Mpbs */ +#define QM_VP_RL_UPPER_BOUND(speed) ((u32)max_t(u32, \ + QM_RL_INC_VAL(speed), \ + 9700 + 1000)) + +/* Max Vport RL increment value is the Vport RL upper bound */ +#define QM_VP_RL_MAX_INC_VAL(speed) QM_VP_RL_UPPER_BOUND(speed) + +/* Vport RL credit threshold in case of QM bypass */ +#define QM_VP_RL_BYPASS_THRESH_SPEED (QM_VP_RL_UPPER_BOUND(10000) - 1) + /* AFullOprtnstcCrdMask constants */ -#define QM_OPPOR_LINE_VOQ_DEF 1 -#define QM_OPPOR_FW_STOP_DEF 0 -#define QM_OPPOR_PQ_EMPTY_DEF 1 +#define QM_OPPOR_LINE_VOQ_DEF 1 +#define QM_OPPOR_FW_STOP_DEF 0 +#define QM_OPPOR_PQ_EMPTY_DEF 1 + /* Command Queue constants */ -#define PBF_CMDQ_PURE_LB_LINES 150 -#define PBF_CMDQ_LINES_RT_OFFSET(voq) ( \ - PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \ - (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \ - PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET)) -#define PBF_BTB_GUARANTEED_RT_OFFSET(voq) ( \ - PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \ - (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \ - PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET)) -#define QM_VOQ_LINE_CRD(pbf_cmd_lines) ((((pbf_cmd_lines) - \ - 4) * \ - 2) | QM_LINE_CRD_REG_SIGN_BIT) + +/* Pure LB CmdQ lines (+spare) */ +#define PBF_CMDQ_PURE_LB_LINES 150 + +#define PBF_CMDQ_LINES_E5_RSVD_RATIO 8 + +#define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \ + (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \ + (ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \ + PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET)) + +#define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \ + (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \ + (ext_voq) * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \ + PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET)) + +#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \ + ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT) + /* BTB: blocks constants (block size = 256B) */ -#define BTB_JUMBO_PKT_BLOCKS 38 -#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS -#define BTB_PURE_LB_FACTOR 10 -#define BTB_PURE_LB_RATIO 7 + +/* 256B blocks in 9700B packet */ +#define BTB_JUMBO_PKT_BLOCKS 38 + +/* Headroom per-port */ +#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS +#define BTB_PURE_LB_FACTOR 10 + +/* Factored (hence really 0.7) */ +#define BTB_PURE_LB_RATIO 7 + /* QM stop command constants */ -#define QM_STOP_PQ_MASK_WIDTH 32 -#define QM_STOP_CMD_ADDR 2 -#define QM_STOP_CMD_STRUCT_SIZE 2 -#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0 -#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0 -#define QM_STOP_CMD_PAUSE_MASK_MASK -1 -#define QM_STOP_CMD_GROUP_ID_OFFSET 1 -#define QM_STOP_CMD_GROUP_ID_SHIFT 16 -#define QM_STOP_CMD_GROUP_ID_MASK 15 -#define QM_STOP_CMD_PQ_TYPE_OFFSET 1 -#define QM_STOP_CMD_PQ_TYPE_SHIFT 24 -#define QM_STOP_CMD_PQ_TYPE_MASK 1 -#define QM_STOP_CMD_MAX_POLL_COUNT 100 -#define QM_STOP_CMD_POLL_PERIOD_US 500 +#define QM_STOP_PQ_MASK_WIDTH 32 +#define QM_STOP_CMD_ADDR 2 +#define QM_STOP_CMD_STRUCT_SIZE 2 +#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0 +#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0 +#define QM_STOP_CMD_PAUSE_MASK_MASK -1 +#define QM_STOP_CMD_GROUP_ID_OFFSET 1 +#define QM_STOP_CMD_GROUP_ID_SHIFT 16 +#define QM_STOP_CMD_GROUP_ID_MASK 15 +#define QM_STOP_CMD_PQ_TYPE_OFFSET 1 +#define QM_STOP_CMD_PQ_TYPE_SHIFT 24 +#define QM_STOP_CMD_PQ_TYPE_MASK 1 +#define QM_STOP_CMD_MAX_POLL_COUNT 100 +#define QM_STOP_CMD_POLL_PERIOD_US 500 /* QM command macros */ -#define QM_CMD_STRUCT_SIZE(cmd) cmd ## \ - _STRUCT_SIZE -#define QM_CMD_SET_FIELD(var, cmd, field, \ - value) SET_FIELD(var[cmd ## _ ## field ## \ - _OFFSET], \ - cmd ## _ ## field, \ - value) -/* QM: VOQ macros */ -#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) ((port) * \ - (max_phys_tcs_per_port) + \ - (tc)) -#define LB_VOQ(port) ( \ - MAX_PHYS_VOQS + (port)) -#define VOQ(port, tc, max_phy_tcs_pr_port) \ - ((tc) < \ - LB_TC ? PHYS_VOQ(port, \ - tc, \ - max_phy_tcs_pr_port) \ - : LB_VOQ(port)) +#define QM_CMD_STRUCT_SIZE(cmd) cmd ## _STRUCT_SIZE +#define QM_CMD_SET_FIELD(var, cmd, field, value) \ + SET_FIELD(var[cmd ## _ ## field ## _OFFSET], \ + cmd ## _ ## field, \ + value) + +#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, rl_valid, vp_pq_id, rl_id, \ + ext_voq, wrr) \ + do { \ + typeof(map) __map; \ + memset(&__map, 0, sizeof(__map)); \ + SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _PQ_VALID, 1); \ + SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_VALID, \ + rl_valid); \ + SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VP_PQ_ID, \ + vp_pq_id); \ + SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_ID, rl_id); \ + SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VOQ, ext_voq); \ + SET_FIELD(__map.reg, \ + QM_RF_PQ_MAP_ ## chip ## _WRR_WEIGHT_GROUP, wrr); \ + STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \ + *((u32 *)&__map)); \ + (map) = __map; \ + } while (0) + +#define WRITE_PQ_INFO_TO_RAM 1 +#define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \ + (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \ + ((rl_valid) << 22) | ((rl) << 24)) +#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \ + (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4) + /******************** INTERNAL IMPLEMENTATION *********************/ + +/* Returns the external VOQ number */ +static u8 qed_get_ext_voq(struct qed_hwfn *p_hwfn, + u8 port_id, u8 tc, u8 max_phys_tcs_per_port) +{ + if (tc == PURE_LB_TC) + return NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB + port_id; + else + return port_id * max_phys_tcs_per_port + tc; +} + /* Prepare PF RL enable/disable runtime init values */ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en) { STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0); if (pf_rl_en) { + u8 num_ext_voqs = MAX_NUM_VOQS_E4; + u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1; + /* Enable RLs for all VOQs */ - STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET, - (1 << MAX_NUM_VOQS) - 1); + STORE_RT_REG(p_hwfn, + QM_REG_RLPFVOQENABLE_RT_OFFSET, + (u32)voq_bit_mask); + if (num_ext_voqs >= 32) + STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET, + (u32)(voq_bit_mask >> 32)); + /* Write RL period */ STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M); @@ -147,7 +243,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en) if (QM_BYPASS_EN) STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET, - QM_RL_UPPER_BOUND); + QM_PF_RL_UPPER_BOUND); } } @@ -181,7 +277,7 @@ static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en) if (QM_BYPASS_EN) STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET, - QM_RL_UPPER_BOUND); + QM_VP_RL_BYPASS_THRESH_SPEED); } } @@ -202,15 +298,15 @@ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en) * the specified VOQ. */ static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn, - u8 voq, u16 cmdq_lines) + u8 ext_voq, u16 cmdq_lines) { - u32 qm_line_crd; + u32 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines); - qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines); - OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), + OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), (u32)cmdq_lines); - STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd); - STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq, + STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq, + qm_line_crd); + STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq, qm_line_crd); } @@ -221,43 +317,52 @@ static void qed_cmdq_lines_rt_init( u8 max_phys_tcs_per_port, struct init_qm_port_params port_params[MAX_NUM_PORTS]) { - u8 tc, voq, port_id, num_tcs_in_port; + u8 tc, ext_voq, port_id, num_tcs_in_port; + u8 num_ext_voqs = MAX_NUM_VOQS_E4; + + /* Clear PBF lines of all VOQs */ + for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++) + STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0); - /* Clear PBF lines for all VOQs */ - for (voq = 0; voq < MAX_NUM_VOQS; voq++) - STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0); for (port_id = 0; port_id < max_ports_per_engine; port_id++) { - if (port_params[port_id].active) { - u16 phys_lines, phys_lines_per_tc; - - /* find #lines to divide between active phys TCs */ - phys_lines = port_params[port_id].num_pbf_cmd_lines - - PBF_CMDQ_PURE_LB_LINES; - /* find #lines per active physical TC */ - num_tcs_in_port = 0; - for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { - if (((port_params[port_id].active_phys_tcs >> - tc) & 0x1) == 1) - num_tcs_in_port++; - } + u16 phys_lines, phys_lines_per_tc; - phys_lines_per_tc = phys_lines / num_tcs_in_port; - /* init registers per active TC */ - for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { - if (((port_params[port_id].active_phys_tcs >> - tc) & 0x1) != 1) - continue; + if (!port_params[port_id].active) + continue; - voq = PHYS_VOQ(port_id, tc, - max_phys_tcs_per_port); - qed_cmdq_lines_voq_rt_init(p_hwfn, voq, - phys_lines_per_tc); - } + /* Find number of command queue lines to divide between the + * active physical TCs. In E5, 1/8 of the lines are reserved. + * the lines for pure LB TC are subtracted. + */ + phys_lines = port_params[port_id].num_pbf_cmd_lines; + phys_lines -= PBF_CMDQ_PURE_LB_LINES; + + /* Find #lines per active physical TC */ + num_tcs_in_port = 0; + for (tc = 0; tc < max_phys_tcs_per_port; tc++) + if (((port_params[port_id].active_phys_tcs >> + tc) & 0x1) == 1) + num_tcs_in_port++; + phys_lines_per_tc = phys_lines / num_tcs_in_port; - /* init registers for pure LB TC */ - qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id), - PBF_CMDQ_PURE_LB_LINES); + /* Init registers per active TC */ + for (tc = 0; tc < max_phys_tcs_per_port; tc++) { + ext_voq = qed_get_ext_voq(p_hwfn, + port_id, + tc, max_phys_tcs_per_port); + if (((port_params[port_id].active_phys_tcs >> + tc) & 0x1) == 1) + qed_cmdq_lines_voq_rt_init(p_hwfn, + ext_voq, + phys_lines_per_tc); } + + /* Init registers for pure LB TC */ + ext_voq = qed_get_ext_voq(p_hwfn, + port_id, + PURE_LB_TC, max_phys_tcs_per_port); + qed_cmdq_lines_voq_rt_init(p_hwfn, + ext_voq, PBF_CMDQ_PURE_LB_LINES); } } @@ -268,11 +373,9 @@ static void qed_btb_blocks_rt_init( struct init_qm_port_params port_params[MAX_NUM_PORTS]) { u32 usable_blocks, pure_lb_blocks, phys_blocks; - u8 tc, voq, port_id, num_tcs_in_port; + u8 tc, ext_voq, port_id, num_tcs_in_port; for (port_id = 0; port_id < max_ports_per_engine; port_id++) { - u32 temp; - if (!port_params[port_id].active) continue; @@ -280,13 +383,14 @@ static void qed_btb_blocks_rt_init( usable_blocks = port_params[port_id].num_btb_blocks - BTB_HEADROOM_BLOCKS; - /* find blocks per physical TC */ + /* Find blocks per physical TC. Use factor to avoid floating + * arithmethic. + */ num_tcs_in_port = 0; - for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { + for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1) num_tcs_in_port++; - } pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) / (num_tcs_in_port * BTB_PURE_LB_FACTOR + @@ -299,47 +403,55 @@ static void qed_btb_blocks_rt_init( /* Init physical TCs */ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { if (((port_params[port_id].active_phys_tcs >> - tc) & 0x1) != 1) - continue; - - voq = PHYS_VOQ(port_id, tc, - max_phys_tcs_per_port); - STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq), - phys_blocks); + tc) & 0x1) == 1) { + ext_voq = + qed_get_ext_voq(p_hwfn, + port_id, + tc, + max_phys_tcs_per_port); + STORE_RT_REG(p_hwfn, + PBF_BTB_GUARANTEED_RT_OFFSET + (ext_voq), phys_blocks); + } } /* Init pure LB TC */ - temp = LB_VOQ(port_id); - STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp), + ext_voq = qed_get_ext_voq(p_hwfn, + port_id, + PURE_LB_TC, max_phys_tcs_per_port); + STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq), pure_lb_blocks); } } /* Prepare Tx PQ mapping runtime init values for the specified PF */ -static void qed_tx_pq_map_rt_init( - struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_qm_pf_rt_init_params *p_params, - u32 base_mem_addr_4kb) +static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_qm_pf_rt_init_params *p_params, + u32 base_mem_addr_4kb) { - struct init_qm_vport_params *vport_params = p_params->vport_params; - u16 num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs; - u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE; - u16 last_pq_group = (p_params->start_pq + num_pqs - 1) / - QM_PF_QUEUE_GROUP_SIZE; - u16 i, pq_id, pq_group; - - /* A bit per Tx PQ indicating if the PQ is associated with a VF */ u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 }; + struct init_qm_vport_params *vport_params = p_params->vport_params; u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE; - u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids); - u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids); - u32 mem_addr_4kb = base_mem_addr_4kb; + u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group; + struct init_qm_pq_params *pq_params = p_params->pq_params; + u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb; + + num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs; + + first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE; + last_pq_group = (p_params->start_pq + num_pqs - 1) / + QM_PF_QUEUE_GROUP_SIZE; + + pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids); + vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids); + mem_addr_4kb = base_mem_addr_4kb; /* Set mapping from PQ group to PF */ for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++) STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group, (u32)(p_params->pf_id)); + /* Set PQ sizes */ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET, QM_PQ_SIZE_256B(p_params->num_pf_cids)); @@ -348,58 +460,82 @@ static void qed_tx_pq_map_rt_init( /* Go over all Tx PQs */ for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) { - u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id, - p_params->max_phys_tcs_per_port); - bool is_vf_pq = (i >= p_params->num_pf_pqs); - struct qm_rf_pq_map tx_pq_map; - - bool rl_valid = p_params->pq_params[i].rl_valid && - (p_params->pq_params[i].vport_id < - MAX_QM_GLOBAL_RLS); + u8 ext_voq, vport_id_in_pf, tc_id = pq_params[i].tc_id; + u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS; + struct qm_rf_pq_map_e4 tx_pq_map; + bool is_vf_pq, rl_valid; + u16 *p_first_tx_pq_id; + + ext_voq = qed_get_ext_voq(p_hwfn, + p_params->port_id, + tc_id, + p_params->max_phys_tcs_per_port); + is_vf_pq = (i >= p_params->num_pf_pqs); + rl_valid = pq_params[i].rl_valid && + pq_params[i].vport_id < max_qm_global_rls; /* Update first Tx PQ of VPORT/TC */ - u8 vport_id_in_pf = p_params->pq_params[i].vport_id - - p_params->start_vport; - u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0]; - u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id]; + vport_id_in_pf = pq_params[i].vport_id - p_params->start_vport; + p_first_tx_pq_id = + &vport_params[vport_id_in_pf].first_tx_pq_id[tc_id]; + if (*p_first_tx_pq_id == QM_INVALID_PQ_ID) { + u32 map_val = + (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | + (p_params->pf_id << QM_WFQ_VP_PQ_PF_E4_SHIFT); - if (first_tx_pq_id == QM_INVALID_PQ_ID) { /* Create new VP PQ */ - pq_ids[p_params->pq_params[i].tc_id] = pq_id; - first_tx_pq_id = pq_id; + *p_first_tx_pq_id = pq_id; /* Map VP PQ to VOQ and PF */ STORE_RT_REG(p_hwfn, QM_REG_WFQVPMAP_RT_OFFSET + - first_tx_pq_id, - (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | - (p_params->pf_id << - QM_WFQ_VP_PQ_PF_SHIFT)); + *p_first_tx_pq_id, + map_val); } - if (p_params->pq_params[i].rl_valid && !rl_valid) + /* Check RL ID */ + if (pq_params[i].rl_valid && pq_params[i].vport_id >= + max_qm_global_rls) DP_NOTICE(p_hwfn, - "Invalid VPORT ID for rate limiter configuration"); - /* Fill PQ map entry */ - memset(&tx_pq_map, 0, sizeof(tx_pq_map)); - SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1); - SET_FIELD(tx_pq_map.reg, - QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0); - SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id); - SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID, - rl_valid ? - p_params->pq_params[i].vport_id : 0); - SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq); - SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, - p_params->pq_params[i].wrr_group); - /* Write PQ map entry to CAM */ - STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id, - *((u32 *)&tx_pq_map)); - /* Set base address */ + "Invalid VPORT ID for rate limiter configuration\n"); + + /* Prepare PQ map entry */ + QM_INIT_TX_PQ_MAP(p_hwfn, + tx_pq_map, + E4, + pq_id, + rl_valid ? 1 : 0, + *p_first_tx_pq_id, + rl_valid ? pq_params[i].vport_id : 0, + ext_voq, pq_params[i].wrr_group); + + /* Set PQ base address */ STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id, mem_addr_4kb); + /* Clear PQ pointer table entry (64 bit) */ + if (p_params->is_pf_loading) + for (j = 0; j < 2; j++) + STORE_RT_REG(p_hwfn, + QM_REG_PTRTBLTX_RT_OFFSET + + (pq_id * 2) + j, 0); + + /* Write PQ info to RAM */ + if (WRITE_PQ_INFO_TO_RAM != 0) { + u32 pq_info = 0; + + pq_info = PQ_INFO_ELEMENT(*p_first_tx_pq_id, + p_params->pf_id, + tc_id, + p_params->port_id, + rl_valid ? 1 : 0, + rl_valid ? + pq_params[i].vport_id : 0); + qed_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id), + pq_info); + } + /* If VF PQ, add indication to PQ VF mask */ if (is_vf_pq) { tx_pq_vf_mask[pq_id / @@ -421,16 +557,16 @@ static void qed_tx_pq_map_rt_init( /* Prepare Other PQ mapping runtime init values for the specified PF */ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn, - u8 port_id, u8 pf_id, + bool is_pf_loading, u32 num_pf_cids, u32 num_tids, u32 base_mem_addr_4kb) { u32 pq_size, pq_mem_4kb, mem_addr_4kb; - u16 i, pq_id, pq_group; + u16 i, j, pq_id, pq_group; - /* a single other PQ group is used in each PF, - * where PQ group i is used in PF i. + /* A single other PQ group is used in each PF, where PQ group i is used + * in PF i. */ pq_group = pf_id; pq_size = num_pf_cids + num_tids; @@ -440,16 +576,25 @@ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn, /* Map PQ group to PF */ STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group, (u32)(pf_id)); + /* Set PQ sizes */ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET, QM_PQ_SIZE_256B(pq_size)); - /* Set base address */ for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE; i < QM_OTHER_PQS_PER_PF; i++, pq_id++) { + /* Set PQ base address */ STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id, mem_addr_4kb); + + /* Clear PQ pointer table entry */ + if (is_pf_loading) + for (j = 0; j < 2; j++) + STORE_RT_REG(p_hwfn, + QM_REG_PTRTBLOTHER_RT_OFFSET + + (pq_id * 2) + j, 0); + mem_addr_4kb += pq_mem_4kb; } } @@ -461,16 +606,11 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, struct qed_qm_pf_rt_init_params *p_params) { u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs; - u32 crd_reg_offset; - u32 inc_val; + struct init_qm_pq_params *pq_params = p_params->pq_params; + u32 inc_val, crd_reg_offset; + u8 ext_voq; u16 i; - if (p_params->pf_id < MAX_NUM_PFS_BB) - crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET; - else - crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET; - crd_reg_offset += p_params->pf_id % MAX_NUM_PFS_BB; - inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq); if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n"); @@ -478,19 +618,26 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, } for (i = 0; i < num_tx_pqs; i++) { - u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id, - p_params->max_phys_tcs_per_port); - + ext_voq = qed_get_ext_voq(p_hwfn, + p_params->port_id, + pq_params[i].tc_id, + p_params->max_phys_tcs_per_port); + crd_reg_offset = + (p_params->pf_id < MAX_NUM_PFS_BB ? + QM_REG_WFQPFCRD_RT_OFFSET : + QM_REG_WFQPFCRD_MSB_RT_OFFSET) + + ext_voq * MAX_NUM_PFS_BB + + (p_params->pf_id % MAX_NUM_PFS_BB); OVERWRITE_RT_REG(p_hwfn, - crd_reg_offset + voq * MAX_NUM_PFS_BB, - QM_WFQ_CRD_REG_SIGN_BIT); + crd_reg_offset, (u32)QM_WFQ_CRD_REG_SIGN_BIT); } STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id, - QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT); + QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT); STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id, inc_val); + return 0; } @@ -501,15 +648,19 @@ static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl) { u32 inc_val = QM_RL_INC_VAL(pf_rl); - if (inc_val > QM_RL_MAX_INC_VAL) { + if (inc_val > QM_PF_RL_MAX_INC_VAL) { DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n"); return -1; } - STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id, - QM_RL_CRD_REG_SIGN_BIT); - STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id, - QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT); + + STORE_RT_REG(p_hwfn, + QM_REG_RLPFCRD_RT_OFFSET + pf_id, + (u32)QM_RL_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, + QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id, + QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT); STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val); + return 0; } @@ -520,12 +671,12 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn, u8 num_vports, struct init_qm_vport_params *vport_params) { + u16 vport_pq_id; u32 inc_val; u8 tc, i; /* Go over all PF VPORTs */ for (i = 0; i < num_vports; i++) { - if (!vport_params[i].vport_wfq) continue; @@ -536,17 +687,14 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn, return -1; } - /* each VPORT can have several VPORT PQ IDs for - * different TCs - */ + /* Each VPORT can have several VPORT PQ IDs for various TCs */ for (tc = 0; tc < NUM_OF_TCS; tc++) { - u16 vport_pq_id = vport_params[i].first_tx_pq_id[tc]; - + vport_pq_id = vport_params[i].first_tx_pq_id[tc]; if (vport_pq_id != QM_INVALID_PQ_ID) { STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET + vport_pq_id, - QM_WFQ_CRD_REG_SIGN_BIT); + (u32)QM_WFQ_CRD_REG_SIGN_BIT); STORE_RT_REG(p_hwfn, QM_REG_WFQVPWEIGHT_RT_OFFSET + vport_pq_id, inc_val); @@ -557,12 +705,17 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn, return 0; } +/* Prepare VPORT RL runtime init values for the specified VPORTs. + * Return -1 on error. + */ static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn, u8 start_vport, u8 num_vports, + u32 link_speed, struct init_qm_vport_params *vport_params) { u8 i, vport_id; + u32 inc_val; if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) { DP_NOTICE(p_hwfn, @@ -572,22 +725,22 @@ static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn, /* Go over all PF VPORTs */ for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) { - u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl); - - if (inc_val > QM_RL_MAX_INC_VAL) { + inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ? + vport_params[i].vport_rl : + link_speed); + if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) { DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration\n"); return -1; } - STORE_RT_REG(p_hwfn, - QM_REG_RLGLBLCRD_RT_OFFSET + vport_id, - QM_RL_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id, + (u32)QM_RL_CRD_REG_SIGN_BIT); STORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id, - QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT); - STORE_RT_REG(p_hwfn, - QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id, + QM_VP_RL_UPPER_BOUND(link_speed) | + (u32)QM_RL_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id, inc_val); } @@ -599,7 +752,7 @@ static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn, { u32 reg_val, i; - for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0; + for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val; i++) { udelay(QM_STOP_CMD_POLL_PERIOD_US); reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY); @@ -632,8 +785,8 @@ static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn, } /******************** INTERFACE IMPLEMENTATION *********************/ -u32 qed_qm_pf_mem_size(u8 pf_id, - u32 num_pf_cids, + +u32 qed_qm_pf_mem_size(u32 num_pf_cids, u32 num_vf_cids, u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs) { @@ -642,11 +795,10 @@ u32 qed_qm_pf_mem_size(u8 pf_id, QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF; } -int qed_qm_common_rt_init( - struct qed_hwfn *p_hwfn, - struct qed_qm_common_rt_init_params *p_params) +int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn, + struct qed_qm_common_rt_init_params *p_params) { - /* init AFullOprtnstcCrdMask */ + /* Init AFullOprtnstcCrdMask */ u32 mask = (QM_OPPOR_LINE_VOQ_DEF << QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) | (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) | @@ -664,18 +816,31 @@ int qed_qm_common_rt_init( QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT); STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask); + + /* Enable/disable PF RL */ qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en); + + /* Enable/disable PF WFQ */ qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en); + + /* Enable/disable VPORT RL */ qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en); + + /* Enable/disable VPORT WFQ */ qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en); + + /* Init PBF CMDQ line credit */ qed_cmdq_lines_rt_init(p_hwfn, p_params->max_ports_per_engine, p_params->max_phys_tcs_per_port, p_params->port_params); + + /* Init BTB blocks in PBF */ qed_btb_blocks_rt_init(p_hwfn, p_params->max_ports_per_engine, p_params->max_phys_tcs_per_port, p_params->port_params); + return 0; } @@ -695,24 +860,31 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID; /* Map Other PQs (if any) */ - qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id, - p_params->num_pf_cids, p_params->num_tids, 0); + qed_other_pq_map_rt_init(p_hwfn, + p_params->pf_id, + p_params->is_pf_loading, p_params->num_pf_cids, + p_params->num_tids, 0); /* Map Tx PQs */ qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb); + /* Init PF WFQ */ if (p_params->pf_wfq) if (qed_pf_wfq_rt_init(p_hwfn, p_params)) return -1; + /* Init PF RL */ if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl)) return -1; + /* Set VPORT WFQ */ if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params)) return -1; + /* Set VPORT RL */ if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport, - p_params->num_vports, vport_params)) + p_params->num_vports, p_params->link_speed, + vport_params)) return -1; return 0; @@ -729,6 +901,7 @@ int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, } qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val); + return 0; } @@ -737,14 +910,13 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn, { u32 inc_val = QM_RL_INC_VAL(pf_rl); - if (inc_val > QM_RL_MAX_INC_VAL) { + if (inc_val > QM_PF_RL_MAX_INC_VAL) { DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n"); return -1; } - qed_wr(p_hwfn, p_ptt, - QM_REG_RLPFCRD + pf_id * 4, - QM_RL_CRD_REG_SIGN_BIT); + qed_wr(p_hwfn, + p_ptt, QM_REG_RLPFCRD + pf_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT); qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val); return 0; @@ -767,33 +939,35 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, for (tc = 0; tc < NUM_OF_TCS; tc++) { vport_pq_id = first_tx_pq_id[tc]; if (vport_pq_id != QM_INVALID_PQ_ID) - qed_wr(p_hwfn, p_ptt, - QM_REG_WFQVPWEIGHT + vport_pq_id * 4, - inc_val); + qed_wr(p_hwfn, + p_ptt, + QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val); } return 0; } int qed_init_vport_rl(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl) + struct qed_ptt *p_ptt, + u8 vport_id, u32 vport_rl, u32 link_speed) { - u32 inc_val = QM_RL_INC_VAL(vport_rl); + u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS; - if (vport_id >= MAX_QM_GLOBAL_RLS) { + if (vport_id >= max_qm_global_rls) { DP_NOTICE(p_hwfn, "Invalid VPORT ID for rate limiter configuration\n"); return -1; } - if (inc_val > QM_RL_MAX_INC_VAL) { + inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed); + if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) { DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration\n"); return -1; } - qed_wr(p_hwfn, p_ptt, - QM_REG_RLGLBLCRD + vport_id * 4, - QM_RL_CRD_REG_SIGN_BIT); + qed_wr(p_hwfn, + p_ptt, + QM_REG_RLGLBLCRD + vport_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT); qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val); return 0; @@ -805,23 +979,27 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, bool is_tx_pq, u16 start_pq, u16 num_pqs) { u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 }; - u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id; + u32 pq_mask = 0, last_pq, pq_id; + + last_pq = start_pq + num_pqs - 1; /* Set command's PQ type */ QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1); + /* Go over requested PQs */ for (pq_id = start_pq; pq_id <= last_pq; pq_id++) { /* Set PQ bit in mask (stop command only) */ if (!is_release_cmd) - pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH)); + pq_mask |= BIT((pq_id % QM_STOP_PQ_MASK_WIDTH)); /* If last PQ or end of PQ mask, write command */ if ((pq_id == last_pq) || (pq_id % QM_STOP_PQ_MASK_WIDTH == (QM_STOP_PQ_MASK_WIDTH - 1))) { - QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, - PAUSE_MASK, pq_mask); - QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, + QM_CMD_SET_FIELD(cmd_arr, + QM_STOP_CMD, PAUSE_MASK, pq_mask); + QM_CMD_SET_FIELD(cmd_arr, + QM_STOP_CMD, GROUP_ID, pq_id / QM_STOP_PQ_MASK_WIDTH); if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR, @@ -834,87 +1012,103 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, return true; } -static void -qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable) -{ - if (enable) - set_bit(bit, var); - else - clear_bit(bit, var); -} +#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \ + do { \ + typeof(var) *__p_var = &(var); \ + typeof(offset) __offset = offset; \ + *__p_var = (*__p_var & ~BIT(__offset)) | \ + ((enable) ? BIT(__offset) : 0); \ + } while (0) #define PRS_ETH_TUNN_FIC_FORMAT -188897008 void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 dest_port) { + /* Update PRS register */ qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port); + + /* Update NIG register */ qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port); + + /* Update PBF register */ qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port); } void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool vxlan_enable) { - unsigned long reg_val = 0; + u32 reg_val; u8 shift; + /* Update PRS register */ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT; - qed_set_tunnel_type_enable_bit(®_val, shift, vxlan_enable); - + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable); qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); - if (reg_val) - qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, - PRS_ETH_TUNN_FIC_FORMAT); + qed_wr(p_hwfn, + p_ptt, + PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + (u32)PRS_ETH_TUNN_FIC_FORMAT); + /* Update NIG register */ reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT; - qed_set_tunnel_type_enable_bit(®_val, shift, vxlan_enable); - + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable); qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val); - qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, - vxlan_enable ? 1 : 0); + /* Update DORQ register */ + qed_wr(p_hwfn, + p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, vxlan_enable ? 1 : 0); } -void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, +void qed_set_gre_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool eth_gre_enable, bool ip_gre_enable) { - unsigned long reg_val = 0; + u32 reg_val; u8 shift; + /* Update PRS register */ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT; - qed_set_tunnel_type_enable_bit(®_val, shift, eth_gre_enable); - + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable); shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT; - qed_set_tunnel_type_enable_bit(®_val, shift, ip_gre_enable); + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable); qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); if (reg_val) - qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, - PRS_ETH_TUNN_FIC_FORMAT); + qed_wr(p_hwfn, + p_ptt, + PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + (u32)PRS_ETH_TUNN_FIC_FORMAT); + /* Update NIG register */ reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT; - qed_set_tunnel_type_enable_bit(®_val, shift, eth_gre_enable); - + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable); shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT; - qed_set_tunnel_type_enable_bit(®_val, shift, ip_gre_enable); + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable); qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val); - qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, - eth_gre_enable ? 1 : 0); - qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, - ip_gre_enable ? 1 : 0); + /* Update DORQ registers */ + qed_wr(p_hwfn, + p_ptt, + DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, eth_gre_enable ? 1 : 0); + qed_wr(p_hwfn, + p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, ip_gre_enable ? 1 : 0); } void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 dest_port) { + /* Update PRS register */ qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port); + + /* Update NIG register */ qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port); + + /* Update PBF register */ qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port); } @@ -922,32 +1116,39 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool eth_geneve_enable, bool ip_geneve_enable) { - unsigned long reg_val = 0; + u32 reg_val; u8 shift; + /* Update PRS register */ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT; - qed_set_tunnel_type_enable_bit(®_val, shift, eth_geneve_enable); - + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_geneve_enable); shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT; - qed_set_tunnel_type_enable_bit(®_val, shift, ip_geneve_enable); - + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_geneve_enable); qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); if (reg_val) - qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, - PRS_ETH_TUNN_FIC_FORMAT); + qed_wr(p_hwfn, + p_ptt, + PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + (u32)PRS_ETH_TUNN_FIC_FORMAT); + /* Update NIG register */ qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE, eth_geneve_enable ? 1 : 0); qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0); - /* EDPM with geneve tunnel not supported in BB_B0 */ + /* EDPM with geneve tunnel not supported in BB */ if (QED_IS_BB_B0(p_hwfn->cdev)) return; - qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN, + /* Update DORQ registers */ + qed_wr(p_hwfn, + p_ptt, + DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5, eth_geneve_enable ? 1 : 0); - qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN, + qed_wr(p_hwfn, + p_ptt, + DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5, ip_geneve_enable ? 1 : 0); } @@ -959,117 +1160,297 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, #define RAM_LINE_SIZE sizeof(u64) #define REG_SIZE sizeof(u32) -void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u16 pf_id) +void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id) { - u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM + - pf_id * RAM_LINE_SIZE; - - /*stop using gft logic */ + /* Disable gft search for PF */ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0); - qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0); + + /* Clean ram & cam for next gft session */ + + /* Zero camline */ qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0); - qed_wr(p_hwfn, p_ptt, hw_addr, 0); - qed_wr(p_hwfn, p_ptt, hw_addr + 4, 0); + + /* Zero ramline */ + qed_wr(p_hwfn, + p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, 0); + qed_wr(p_hwfn, + p_ptt, + PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE, + 0); } -void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u16 pf_id, bool tcp, bool udp, - bool ipv4, bool ipv6) +void qed_set_gft_event_id_cm_hdr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - union gft_cam_line_union camline; - struct gft_ram_line ramline; u32 rfs_cm_hdr_event_id; + /* Set RFS event ID to be awakened i Tstorm By Prs */ rfs_cm_hdr_event_id = qed_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT); + rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID << + PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT; + rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR << + PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT; + qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id); +} + +void qed_gft_config(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 pf_id, + bool tcp, + bool udp, + bool ipv4, bool ipv6, enum gft_profile_type profile_type) +{ + u32 reg_val, cam_line, ram_line_lo, ram_line_hi; if (!ipv6 && !ipv4) DP_NOTICE(p_hwfn, - "set_rfs_mode_enable: must accept at least on of - ipv4 or ipv6"); + "gft_config: must accept at least on of - ipv4 or ipv6'\n"); if (!tcp && !udp) DP_NOTICE(p_hwfn, - "set_rfs_mode_enable: must accept at least on of - udp or tcp"); + "gft_config: must accept at least on of - udp or tcp\n"); + if (profile_type >= MAX_GFT_PROFILE_TYPE) + DP_NOTICE(p_hwfn, "gft_config: unsupported gft_profile_type\n"); - rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID << - PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT; - rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR << - PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT; - qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id); + /* Set RFS event ID to be awakened i Tstorm By Prs */ + reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID << + PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT; + reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT; + qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val); - /* Configure Registers for RFS mode */ - qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1); + /* Do not load context only cid in PRS on match. */ qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0); - camline.cam_line_mapped.camline = 0; - /* Cam line is now valid!! */ - SET_FIELD(camline.cam_line_mapped.camline, - GFT_CAM_LINE_MAPPED_VALID, 1); + /* Do not use tenant ID exist bit for gft search */ + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0); - /* filters are per PF!! */ - SET_FIELD(camline.cam_line_mapped.camline, + /* Set Cam */ + cam_line = 0; + SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1); + + /* Filters are per PF!! */ + SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID_MASK, GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK); - SET_FIELD(camline.cam_line_mapped.camline, - GFT_CAM_LINE_MAPPED_PF_ID, pf_id); + SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id); + if (!(tcp && udp)) { - SET_FIELD(camline.cam_line_mapped.camline, + SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK); if (tcp) - SET_FIELD(camline.cam_line_mapped.camline, + SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, GFT_PROFILE_TCP_PROTOCOL); else - SET_FIELD(camline.cam_line_mapped.camline, + SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, GFT_PROFILE_UDP_PROTOCOL); } if (!(ipv4 && ipv6)) { - SET_FIELD(camline.cam_line_mapped.camline, - GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1); + SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1); if (ipv4) - SET_FIELD(camline.cam_line_mapped.camline, + SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION, GFT_PROFILE_IPV4); else - SET_FIELD(camline.cam_line_mapped.camline, + SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION, GFT_PROFILE_IPV6); } /* Write characteristics to cam */ qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, - camline.cam_line_mapped.camline); - camline.cam_line_mapped.camline = qed_rd(p_hwfn, p_ptt, - PRS_REG_GFT_CAM + - CAM_LINE_SIZE * pf_id); + cam_line); + cam_line = + qed_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id); /* Write line to RAM - compare to filter 4 tuple */ - ramline.lo = 0; - ramline.hi = 0; - SET_FIELD(ramline.hi, GFT_RAM_LINE_DST_IP, 1); - SET_FIELD(ramline.hi, GFT_RAM_LINE_SRC_IP, 1); - SET_FIELD(ramline.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); - SET_FIELD(ramline.lo, GFT_RAM_LINE_ETHERTYPE, 1); - SET_FIELD(ramline.lo, GFT_RAM_LINE_SRC_PORT, 1); - SET_FIELD(ramline.lo, GFT_RAM_LINE_DST_PORT, 1); - - /* Each iteration write to reg */ - qed_wr(p_hwfn, p_ptt, + ram_line_lo = 0; + ram_line_hi = 0; + + if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) { + SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1); + SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1); + SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); + SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); + SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1); + SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1); + } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) { + SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); + SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); + SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1); + } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_PORT) { + SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1); + SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); + } + + qed_wr(p_hwfn, + p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, - ramline.lo); - qed_wr(p_hwfn, p_ptt, - PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + 4, - ramline.hi); + ram_line_lo); + qed_wr(p_hwfn, + p_ptt, + PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE, + ram_line_hi); /* Set default profile so that no filter match will happen */ - qed_wr(p_hwfn, p_ptt, - PRS_REG_GFT_PROFILE_MASK_RAM + - RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH, - ramline.lo); - qed_wr(p_hwfn, p_ptt, - PRS_REG_GFT_PROFILE_MASK_RAM + - RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH + 4, - ramline.hi); + qed_wr(p_hwfn, + p_ptt, + PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * + PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff); + qed_wr(p_hwfn, + p_ptt, + PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * + PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff); + + /* Enable gft search */ + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1); +} + +DECLARE_CRC8_TABLE(cdu_crc8_table); + +/* Calculate and return CDU validation byte per connection type/region/cid */ +static u8 qed_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid) +{ + const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG; + u8 crc, validation_byte = 0; + static u8 crc8_table_valid; /* automatically initialized to 0 */ + u32 validation_string = 0; + u32 data_to_crc; + + if (!crc8_table_valid) { + crc8_populate_msb(cdu_crc8_table, 0x07); + crc8_table_valid = 1; + } + + /* The CRC is calculated on the String-to-compress: + * [31:8] = {CID[31:20],CID[11:0]} + * [7:4] = Region + * [3:0] = Type + */ + if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1) + validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8); + + if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1) + validation_string |= ((region & 0xF) << 4); + + if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1) + validation_string |= (conn_type & 0xF); + + /* Convert to big-endian and calculate CRC8 */ + data_to_crc = be32_to_cpu(validation_string); + + crc = crc8(cdu_crc8_table, + (u8 *)&data_to_crc, sizeof(data_to_crc), CRC8_INIT_VALUE); + + /* The validation byte [7:0] is composed: + * for type A validation + * [7] = active configuration bit + * [6:0] = crc[6:0] + * + * for type B validation + * [7] = active configuration bit + * [6:3] = connection_type[3:0] + * [2:0] = crc[2:0] + */ + validation_byte |= + ((validation_cfg >> + CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7; + + if ((validation_cfg >> + CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1) + validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7); + else + validation_byte |= crc & 0x7F; + + return validation_byte; +} + +/* Calcualte and set validation bytes for session context */ +void qed_calc_session_ctx_validation(void *p_ctx_mem, + u16 ctx_size, u8 ctx_type, u32 cid) +{ + u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx; + + p_ctx = (u8 * const)p_ctx_mem; + x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]]; + t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]]; + u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]]; + + memset(p_ctx, 0, ctx_size); + + *x_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 3, cid); + *t_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 4, cid); + *u_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 5, cid); +} + +/* Calcualte and set validation bytes for task context */ +void qed_calc_task_ctx_validation(void *p_ctx_mem, + u16 ctx_size, u8 ctx_type, u32 tid) +{ + u8 *p_ctx, *region1_val_ptr; + + p_ctx = (u8 * const)p_ctx_mem; + region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]]; + + memset(p_ctx, 0, ctx_size); + + *region1_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 1, tid); +} + +/* Memset session context to 0 while preserving validation bytes */ +void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type) +{ + u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx; + u8 x_val, t_val, u_val; + + p_ctx = (u8 * const)p_ctx_mem; + x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]]; + t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]]; + u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]]; + + x_val = *x_val_ptr; + t_val = *t_val_ptr; + u_val = *u_val_ptr; + + memset(p_ctx, 0, ctx_size); + + *x_val_ptr = x_val; + *t_val_ptr = t_val; + *u_val_ptr = u_val; +} + +/* Memset task context to 0 while preserving validation bytes */ +void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type) +{ + u8 *p_ctx, *region1_val_ptr; + u8 region1_val; + + p_ctx = (u8 * const)p_ctx_mem; + region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]]; + + region1_val = *region1_val_ptr; + + memset(p_ctx, 0, ctx_size); + + *region1_val_ptr = region1_val; +} + +/* Enable and configure context validation */ +void qed_enable_context_validation(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 ctx_validation; + + /* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */ + ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24; + qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation); + + /* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */ + ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8; + qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation); + + /* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */ + ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8; + qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c index e3f368882f46..3bb76da6baa2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c @@ -414,11 +414,23 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn, } /* init_ops callbacks entry point */ -static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct init_callback_op *p_cmd) +static int qed_init_cmd_cb(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct init_callback_op *p_cmd) { - DP_NOTICE(p_hwfn, "Currently init values have no need of callbacks\n"); + int rc; + + switch (p_cmd->callback_id) { + case DMAE_READY_CB: + rc = qed_dmae_sanity(p_hwfn, p_ptt, "engine_phase"); + break; + default: + DP_NOTICE(p_hwfn, "Unexpected init op callback ID %d\n", + p_cmd->callback_id); + return -EINVAL; + } + + return rc; } static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn, @@ -519,7 +531,7 @@ int qed_init_run(struct qed_hwfn *p_hwfn, break; case INIT_OP_CALLBACK: - qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback); + rc = qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback); break; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 719cdbfe1695..d3eabcf9c86c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -59,10 +59,10 @@ struct qed_pi_info { }; struct qed_sb_sp_info { - struct qed_sb_info sb_info; + struct qed_sb_info sb_info; /* per protocol index data */ - struct qed_pi_info pi_info_arr[PIS_PER_SB]; + struct qed_pi_info pi_info_arr[PIS_PER_SB_E4]; }; enum qed_attention_type { @@ -82,7 +82,7 @@ struct aeu_invert_reg_bit { #define ATTENTION_LENGTH_SHIFT (4) #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \ ATTENTION_LENGTH_SHIFT) -#define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT) +#define ATTENTION_SINGLE BIT(ATTENTION_LENGTH_SHIFT) #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY) #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \ ATTENTION_PARITY) @@ -1313,7 +1313,7 @@ static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, if (IS_VF(p_hwfn->cdev)) return; - sb_offset = igu_sb_id * PIS_PER_SB; + sb_offset = igu_sb_id * PIS_PER_SB_E4; memset(&pi_entry, 0, sizeof(struct cau_pi_entry)); SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset); diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 5199634ed630..54b4ee0acfd7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -197,7 +197,7 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev); #define QED_SB_EVENT_MASK 0x0003 #define SB_ALIGNED_SIZE(p_hwfn) \ - ALIGNED_TYPE_SIZE(struct status_block, p_hwfn) + ALIGNED_TYPE_SIZE(struct status_block_e4, p_hwfn) #define QED_SB_INVALID_IDX 0xffff diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index 813c77cc857f..c0d4a54a5edb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -62,22 +62,6 @@ #include "qed_sriov.h" #include "qed_reg_addr.h" -static int -qed_iscsi_async_event(struct qed_hwfn *p_hwfn, - u8 fw_event_code, - u16 echo, union event_ring_data *data, u8 fw_return_code) -{ - if (p_hwfn->p_iscsi_info->event_cb) { - struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info; - - return p_iscsi->event_cb(p_iscsi->event_context, - fw_event_code, data); - } else { - DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n"); - return -EINVAL; - } -} - struct qed_iscsi_conn { struct list_head list_entry; bool free_on_delete; @@ -105,7 +89,7 @@ struct qed_iscsi_conn { u8 local_mac[6]; u8 remote_mac[6]; u16 vlan_id; - u8 tcp_flags; + u16 tcp_flags; u8 ip_version; u32 remote_ip[4]; u32 local_ip[4]; @@ -122,7 +106,6 @@ struct qed_iscsi_conn { u32 ss_thresh; u16 srtt; u16 rtt_var; - u32 ts_time; u32 ts_recent; u32 ts_recent_age; u32 total_rt; @@ -144,7 +127,6 @@ struct qed_iscsi_conn { u16 mss; u8 snd_wnd_scale; u8 rcv_wnd_scale; - u32 ts_ticks_per_second; u16 da_timeout_value; u8 ack_frequency; @@ -162,6 +144,22 @@ struct qed_iscsi_conn { }; static int +qed_iscsi_async_event(struct qed_hwfn *p_hwfn, + u8 fw_event_code, + u16 echo, union event_ring_data *data, u8 fw_return_code) +{ + if (p_hwfn->p_iscsi_info->event_cb) { + struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info; + + return p_iscsi->event_cb(p_iscsi->event_context, + fw_event_code, data); + } else { + DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n"); + return -EINVAL; + } +} + +static int qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_addr, @@ -214,9 +212,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring; p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring; p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring; - p_init->ooo_enable = p_params->ooo_enable; p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + p_params->ll2_ooo_queue_id; + p_init->func_params.log_page_size = p_params->log_page_size; val = p_params->num_tasks; p_init->func_params.num_tasks = cpu_to_le16(val); @@ -276,7 +274,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(p_params->two_msl_timer); val = p_params->tx_sws_timer; p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(val); - p_ramrod->tcp_init.maxfinrt = p_params->max_fin_rt; + p_ramrod->tcp_init.max_fin_rt = p_params->max_fin_rt; p_hwfn->p_iscsi_info->event_context = event_context; p_hwfn->p_iscsi_info->event_cb = async_event_cb; @@ -304,8 +302,8 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, int rc = 0; u32 dval; u16 wval; - u8 i; u16 *p; + u8 i; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); @@ -371,7 +369,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id); - p_tcp->flags = p_conn->tcp_flags; + p_tcp->flags = cpu_to_le16(p_conn->tcp_flags); p_tcp->ip_version = p_conn->ip_version; for (i = 0; i < 4; i++) { dval = p_conn->remote_ip[i]; @@ -436,7 +434,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, p_tcp2->remote_mac_addr_lo = swab16(get_unaligned(p + 2)); p_tcp2->vlan_id = cpu_to_le16(p_conn->vlan_id); - p_tcp2->flags = p_conn->tcp_flags; + p_tcp2->flags = cpu_to_le16(p_conn->tcp_flags); p_tcp2->ip_version = p_conn->ip_version; for (i = 0; i < 4; i++) { @@ -458,6 +456,11 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, p_tcp2->syn_ip_payload_length = cpu_to_le16(wval); p_tcp2->syn_phy_addr_lo = DMA_LO_LE(p_conn->syn_phy_addr); p_tcp2->syn_phy_addr_hi = DMA_HI_LE(p_conn->syn_phy_addr); + p_tcp2->cwnd = cpu_to_le32(p_conn->cwnd); + p_tcp2->ka_max_probe_cnt = p_conn->ka_probe_cnt; + p_tcp2->ka_timeout = cpu_to_le32(p_conn->ka_timeout); + p_tcp2->max_rt_time = cpu_to_le32(p_conn->max_rt_time); + p_tcp2->ka_interval = cpu_to_le32(p_conn->ka_interval); } return qed_spq_post(p_hwfn, p_ent, NULL); @@ -692,8 +695,7 @@ static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn, } } -static int qed_iscsi_setup_connection(struct qed_hwfn *p_hwfn, - struct qed_iscsi_conn *p_conn) +static int qed_iscsi_setup_connection(struct qed_iscsi_conn *p_conn) { if (!p_conn->queue_cnts_virt_addr) goto nomem; @@ -844,7 +846,7 @@ static int qed_iscsi_acquire_connection(struct qed_hwfn *p_hwfn, rc = qed_iscsi_allocate_connection(p_hwfn, &p_conn); if (!rc) - rc = qed_iscsi_setup_connection(p_hwfn, p_conn); + rc = qed_iscsi_setup_connection(p_conn); if (rc) { spin_lock_bh(&p_hwfn->p_iscsi_info->lock); @@ -1294,7 +1296,6 @@ static int qed_iscsi_offload_conn(struct qed_dev *cdev, con->ss_thresh = conn_info->ss_thresh; con->srtt = conn_info->srtt; con->rtt_var = conn_info->rtt_var; - con->ts_time = conn_info->ts_time; con->ts_recent = conn_info->ts_recent; con->ts_recent_age = conn_info->ts_recent_age; con->total_rt = conn_info->total_rt; @@ -1316,7 +1317,6 @@ static int qed_iscsi_offload_conn(struct qed_dev *cdev, con->mss = conn_info->mss; con->snd_wnd_scale = conn_info->snd_wnd_scale; con->rcv_wnd_scale = conn_info->rcv_wnd_scale; - con->ts_ticks_per_second = conn_info->ts_ticks_per_second; con->da_timeout_value = conn_info->da_timeout_value; con->ack_frequency = conn_info->ack_frequency; diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 409041eab189..ca4a81dc1ace 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -64,14 +64,21 @@ struct mpa_v2_hdr { #define QED_IWARP_INVALID_TCP_CID 0xffffffff #define QED_IWARP_RCV_WND_SIZE_DEF (256 * 1024) -#define QED_IWARP_RCV_WND_SIZE_MIN (64 * 1024) +#define QED_IWARP_RCV_WND_SIZE_MIN (0xffff) #define TIMESTAMP_HEADER_SIZE (12) +#define QED_IWARP_MAX_FIN_RT_DEFAULT (2) #define QED_IWARP_TS_EN BIT(0) #define QED_IWARP_DA_EN BIT(1) #define QED_IWARP_PARAM_CRC_NEEDED (1) #define QED_IWARP_PARAM_P2P (1) +#define QED_IWARP_DEF_MAX_RT_TIME (0) +#define QED_IWARP_DEF_CWND_FACTOR (4) +#define QED_IWARP_DEF_KA_MAX_PROBE_CNT (5) +#define QED_IWARP_DEF_KA_TIMEOUT (1200000) /* 20 min */ +#define QED_IWARP_DEF_KA_INTERVAL (1000) /* 1 sec */ + static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, u16 echo, union event_ring_data *data, @@ -120,11 +127,17 @@ static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid) spin_unlock_bh(&p_hwfn->p_rdma_info->lock); } -void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn, - struct iwarp_init_func_params *p_ramrod) +void +qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn, + struct iwarp_init_func_ramrod_data *p_ramrod) { - p_ramrod->ll2_ooo_q_index = RESC_START(p_hwfn, QED_LL2_QUEUE) + - p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle; + p_ramrod->iwarp.ll2_ooo_q_index = + RESC_START(p_hwfn, QED_LL2_QUEUE) + + p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle; + + p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT; + + return; } static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid) @@ -699,6 +712,12 @@ qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) tcp->ttl = 0x40; tcp->tos_or_tc = 0; + tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME; + tcp->cwnd = QED_IWARP_DEF_CWND_FACTOR * tcp->mss; + tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT; + tcp->ka_timeout = QED_IWARP_DEF_KA_TIMEOUT; + tcp->ka_interval = QED_IWARP_DEF_KA_INTERVAL; + tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale; tcp->connect_mode = ep->connect_mode; @@ -807,6 +826,7 @@ static int qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) { struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod; + struct qed_iwarp_info *iwarp_info; struct qed_sp_init_data init_data; dma_addr_t async_output_phys; struct qed_spq_entry *p_ent; @@ -874,6 +894,8 @@ qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) p_mpa_ramrod->common.reject = 1; } + iwarp_info = &p_hwfn->p_rdma_info->iwarp; + p_mpa_ramrod->rcv_wnd = iwarp_info->rcv_wnd_size; p_mpa_ramrod->mode = ep->mpa_rev; SET_FIELD(p_mpa_ramrod->rtr_pref, IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type); @@ -2745,6 +2767,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, /* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */ iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) - ilog2(QED_IWARP_RCV_WND_SIZE_MIN); + iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale; iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED; iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED; diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h index c1ecd743305f..b8f612d00241 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h @@ -95,6 +95,7 @@ struct qed_iwarp_info { spinlock_t iw_lock; /* for iwarp resources */ spinlock_t qp_lock; /* for teardown races */ u32 rcv_wnd_scale; + u16 rcv_wnd_size; u16 max_mtu; u8 mac_addr[ETH_ALEN]; u8 crc_needed; @@ -187,7 +188,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_rdma_start_in_params *params); void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn, - struct iwarp_init_func_params *p_ramrod); + struct iwarp_init_func_ramrod_data *p_ramrod); int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 085338990f49..893ef08a4b39 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -223,10 +223,9 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid; int rc; - p_cid = vmalloc(sizeof(*p_cid)); + p_cid = vzalloc(sizeof(*p_cid)); if (!p_cid) return NULL; - memset(p_cid, 0, sizeof(*p_cid)); p_cid->opaque_fid = opaque_fid; p_cid->cid = cid; @@ -1969,33 +1968,45 @@ void qed_reset_vport_stats(struct qed_dev *cdev) _qed_get_vport_stats(cdev, cdev->reset_stats); } -static void -qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - struct qed_arfs_config_params *p_cfg_params) +static enum gft_profile_type +qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode) { - if (p_cfg_params->arfs_enable) { - qed_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id, - p_cfg_params->tcp, p_cfg_params->udp, - p_cfg_params->ipv4, p_cfg_params->ipv6); - DP_VERBOSE(p_hwfn, QED_MSG_SP, - "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n", + if (mode == QED_FILTER_CONFIG_MODE_5_TUPLE) + return GFT_PROFILE_TYPE_4_TUPLE; + if (mode == QED_FILTER_CONFIG_MODE_IP_DEST) + return GFT_PROFILE_TYPE_IP_DST_PORT; + return GFT_PROFILE_TYPE_L4_DST_PORT; +} + +void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_arfs_config_params *p_cfg_params) +{ + if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) { + qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id, + p_cfg_params->tcp, + p_cfg_params->udp, + p_cfg_params->ipv4, + p_cfg_params->ipv6, + qed_arfs_mode_to_hsi(p_cfg_params->mode)); + DP_VERBOSE(p_hwfn, + QED_MSG_SP, + "Configured Filtering: tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s mode=%08x\n", p_cfg_params->tcp ? "Enable" : "Disable", p_cfg_params->udp ? "Enable" : "Disable", p_cfg_params->ipv4 ? "Enable" : "Disable", - p_cfg_params->ipv6 ? "Enable" : "Disable"); + p_cfg_params->ipv6 ? "Enable" : "Disable", + (u32)p_cfg_params->mode); } else { - qed_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id); + DP_VERBOSE(p_hwfn, QED_MSG_SP, "Disabled Filtering\n"); + qed_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id); } - - DP_VERBOSE(p_hwfn, QED_MSG_SP, "Configured ARFS mode : %s\n", - p_cfg_params->arfs_enable ? "Enable" : "Disable"); } -static int -qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, +int +qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_spq_comp_cb *p_cb, - dma_addr_t p_addr, u16 length, u16 qid, - u8 vport_id, bool b_is_add) + struct qed_ntuple_filter_params *p_params) { struct rx_update_gft_filter_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; @@ -2004,13 +2015,15 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 abs_vport_id = 0; int rc = -EINVAL; - rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); + rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); if (rc) return rc; - rc = qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id); - if (rc) - return rc; + if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) { + rc = qed_fw_l2_queue(p_hwfn, p_params->qid, &abs_rx_q_id); + if (rc) + return rc; + } /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); @@ -2032,17 +2045,27 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, return rc; p_ramrod = &p_ent->ramrod.rx_update_gft; - DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr); - p_ramrod->pkt_hdr_length = cpu_to_le16(length); - p_ramrod->rx_qid_or_action_icid = cpu_to_le16(abs_rx_q_id); - p_ramrod->vport_id = abs_vport_id; - p_ramrod->filter_type = RFS_FILTER_TYPE; - p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER; + + DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr); + p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length); + + if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) { + p_ramrod->rx_qid_valid = 1; + p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id); + } + + p_ramrod->flow_id_valid = 0; + p_ramrod->flow_id = 0; + + p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id); + p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER + : GFT_DELETE_FILTER; DP_VERBOSE(p_hwfn, QED_MSG_SP, "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n", abs_vport_id, abs_rx_q_id, - b_is_add ? "Adding" : "Removing", (u64)p_addr, length); + p_params->b_is_add ? "Adding" : "Removing", + (u64)p_params->addr, p_params->length); return qed_spq_post(p_hwfn, p_ent, NULL); } @@ -2743,7 +2766,8 @@ static int qed_configure_filter(struct qed_dev *cdev, } } -static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher) +static int qed_configure_arfs_searcher(struct qed_dev *cdev, + enum qed_filter_config_mode mode) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_arfs_config_params arfs_config_params; @@ -2753,8 +2777,7 @@ static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher) arfs_config_params.udp = true; arfs_config_params.ipv4 = true; arfs_config_params.ipv6 = true; - arfs_config_params.arfs_enable = en_searcher; - + arfs_config_params.mode = mode; qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt, &arfs_config_params); return 0; @@ -2762,8 +2785,8 @@ static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher) static void qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn, - void *cookie, union event_ring_data *data, - u8 fw_return_code) + void *cookie, + union event_ring_data *data, u8 fw_return_code) { struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common; void *dev = p_hwfn->cdev->ops_cookie; @@ -2771,10 +2794,10 @@ qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn, op->arfs_filter_op(dev, cookie, fw_return_code); } -static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie, - dma_addr_t mapping, u16 length, - u16 vport_id, u16 rx_queue_id, - bool add_filter) +static int +qed_ntuple_arfs_filter_config(struct qed_dev *cdev, + void *cookie, + struct qed_ntuple_filter_params *params) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_spq_comp_cb cb; @@ -2783,9 +2806,19 @@ static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie, cb.function = qed_arfs_sp_response_handler; cb.cookie = cookie; - rc = qed_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt, - &cb, mapping, length, rx_queue_id, - vport_id, add_filter); + if (params->b_is_vf) { + if (!qed_iov_is_valid_vfid(p_hwfn, params->vf_id, false, + false)) { + DP_INFO(p_hwfn, "vfid 0x%02x is out of bounds\n", + params->vf_id); + return rc; + } + + params->vport_id = params->vf_id + 1; + params->qid = QED_RFS_NTUPLE_QID_RSS; + } + + rc = qed_configure_rfs_ntuple_filter(p_hwfn, &cb, params); if (rc) DP_NOTICE(p_hwfn, "Failed to issue a-RFS filter configuration\n"); diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index cc1f248551c9..c4030e949cce 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -190,7 +190,7 @@ struct qed_arfs_config_params { bool udp; bool ipv4; bool ipv6; - bool arfs_enable; + enum qed_filter_config_mode mode; }; struct qed_sp_vport_update_params { @@ -277,6 +277,37 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats); void qed_reset_vport_stats(struct qed_dev *cdev); +/** + * *@brief qed_arfs_mode_configure - + * + **Enable or disable rfs mode. It must accept atleast one of tcp or udp true + **and atleast one of ipv4 or ipv6 true to enable rfs mode. + * + **@param p_hwfn + **@param p_ptt + **@param p_cfg_params - arfs mode configuration parameters. + * + */ +void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_arfs_config_params *p_cfg_params); + +/** + * @brief - qed_configure_rfs_ntuple_filter + * + * This ramrod should be used to add or remove arfs hw filter + * + * @params p_hwfn + * @params p_cb - Used for QED_SPQ_MODE_CB,where client would initialize + * it with cookie and callback function address, if not + * using this mode then client must pass NULL. + * @params p_params + */ +int +qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, + struct qed_spq_comp_cb *p_cb, + struct qed_ntuple_filter_params *p_params); + #define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8) #define QED_QUEUE_CID_SELF (0xff) diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 047f556ca62e..c4f14fdc4e77 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -406,6 +406,9 @@ static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn, data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi); data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo); data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error; + data->qp_id = le16_to_cpu(p_cqe->rx_cqe_gsi.qp_id); + + data->src_qp = le32_to_cpu(p_cqe->rx_cqe_gsi.src_qp); } static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn, @@ -927,7 +930,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, qed_chain_get_pbl_phys(&p_rx->rcq_chain)); p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg; - p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en; + p_ramrod->inner_vlan_stripping_en = + p_ll2_conn->input.rx_vlan_removal_en; p_ramrod->queue_id = p_ll2_conn->queue_id; p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0; @@ -1299,8 +1303,20 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data) memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input)); - p_ll2_info->tx_dest = (data->input.tx_dest == QED_LL2_TX_DEST_NW) ? - CORE_TX_DEST_NW : CORE_TX_DEST_LB; + switch (data->input.tx_dest) { + case QED_LL2_TX_DEST_NW: + p_ll2_info->tx_dest = CORE_TX_DEST_NW; + break; + case QED_LL2_TX_DEST_LB: + p_ll2_info->tx_dest = CORE_TX_DEST_LB; + break; + case QED_LL2_TX_DEST_DROP: + p_ll2_info->tx_dest = CORE_TX_DEST_DROP; + break; + default: + return -EINVAL; + } + if (data->input.conn_type == QED_LL2_TYPE_OOO || data->input.secondary_queue) p_ll2_info->main_func_queue = false; @@ -2281,8 +2297,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) goto release_terminate; } - if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI && - cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) { + if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI) { DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n"); rc = qed_ll2_start_ooo(cdev, params); if (rc) { @@ -2340,8 +2355,7 @@ static int qed_ll2_stop(struct qed_dev *cdev) qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt); eth_zero_addr(cdev->ll2_mac_address); - if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI && - cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) + if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI) qed_ll2_stop_ooo(cdev); rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 8b99c7d26f34..6f46cb11f349 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -2234,7 +2234,7 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len) DRV_MSG_CODE_NVM_READ_NVRAM, addr + offset + (bytes_to_copy << - DRV_MB_PARAM_NVM_LEN_SHIFT), + DRV_MB_PARAM_NVM_LEN_OFFSET), &resp, &resp_param, &read_len, (u32 *)(p_buf + offset)); diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index b7abb8205d3a..5d040b873137 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -360,13 +360,13 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) static void qed_rdma_free_tid(void *rdma_cxt, u32 itid) { - struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid); + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid); - spin_lock_bh(&p_hwfn->p_rdma_info->lock); - qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid); - spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); } static void qed_rdma_free_reserved_lkey(struct qed_hwfn *p_hwfn) @@ -570,7 +570,7 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { qed_iwarp_init_fw_ramrod(p_hwfn, - &p_ent->ramrod.iwarp_init_func.iwarp); + &p_ent->ramrod.iwarp_init_func); p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma; } else { p_ramrod = &p_ent->ramrod.roce_init_func.rdma; diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 0cdb4337b3a0..f7122059b6b5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -124,6 +124,8 @@ 0x1f0434UL #define PRS_REG_SEARCH_TAG1 \ 0x1f0444UL +#define PRS_REG_SEARCH_TENANT_ID \ + 0x1f044cUL #define PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST \ 0x1f0a0cUL #define PRS_REG_SEARCH_TCP_FIRST_FRAG \ @@ -200,7 +202,13 @@ 0x2e8800UL #define CCFC_REG_STRONG_ENABLE_VF \ 0x2e070cUL -#define CDU_REG_CID_ADDR_PARAMS \ +#define CDU_REG_CCFC_CTX_VALID0 \ + 0x580400UL +#define CDU_REG_CCFC_CTX_VALID1 \ + 0x580404UL +#define CDU_REG_TCFC_CTX_VALID0 \ + 0x580408UL +#define CDU_REG_CID_ADDR_PARAMS \ 0x580900UL #define DBG_REG_CLIENT_ENABLE \ 0x010004UL @@ -564,7 +572,7 @@ #define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL #define PRS_REG_GRE_PROTOCOL 0x1f0734UL #define PRS_REG_VXLAN_PORT 0x1f0738UL -#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL +#define PRS_REG_OUTPUT_FORMAT_4_0_BB_K2 0x1f099cUL #define NIG_REG_ENC_TYPE_ENABLE 0x501058UL #define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE (0x1 << 0) @@ -580,11 +588,11 @@ #define PRS_REG_NGE_PORT 0x1f086cUL #define NIG_REG_NGE_PORT 0x508b38UL -#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL -#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL -#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL -#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN 0x10092cUL -#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN 0x100930UL +#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL +#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL +#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL +#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5 0x10092cUL +#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5 0x100930UL #define NIG_REG_NGE_IP_ENABLE 0x508b28UL #define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL @@ -595,15 +603,15 @@ #define QM_REG_WFQPFWEIGHT 0x2f4e80UL #define QM_REG_WFQVPWEIGHT 0x2fa000UL -#define PGLCS_REG_DBG_SELECT_K2 \ +#define PGLCS_REG_DBG_SELECT_K2_E5 \ 0x001d14UL -#define PGLCS_REG_DBG_DWORD_ENABLE_K2 \ +#define PGLCS_REG_DBG_DWORD_ENABLE_K2_E5 \ 0x001d18UL -#define PGLCS_REG_DBG_SHIFT_K2 \ +#define PGLCS_REG_DBG_SHIFT_K2_E5 \ 0x001d1cUL -#define PGLCS_REG_DBG_FORCE_VALID_K2 \ +#define PGLCS_REG_DBG_FORCE_VALID_K2_E5 \ 0x001d20UL -#define PGLCS_REG_DBG_FORCE_FRAME_K2 \ +#define PGLCS_REG_DBG_FORCE_FRAME_K2_E5 \ 0x001d24UL #define MISC_REG_RESET_PL_PDA_VMAIN_1 \ 0x008070UL @@ -615,7 +623,7 @@ 0x009050UL #define MISCS_REG_RESET_PL_HV \ 0x009060UL -#define MISCS_REG_RESET_PL_HV_2_K2 \ +#define MISCS_REG_RESET_PL_HV_2_K2_E5 \ 0x009150UL #define DMAE_REG_DBG_SELECT \ 0x00c510UL @@ -647,15 +655,15 @@ 0x0500b0UL #define GRC_REG_DBG_FORCE_FRAME \ 0x0500b4UL -#define UMAC_REG_DBG_SELECT_K2 \ +#define UMAC_REG_DBG_SELECT_K2_E5 \ 0x051094UL -#define UMAC_REG_DBG_DWORD_ENABLE_K2 \ +#define UMAC_REG_DBG_DWORD_ENABLE_K2_E5 \ 0x051098UL -#define UMAC_REG_DBG_SHIFT_K2 \ +#define UMAC_REG_DBG_SHIFT_K2_E5 \ 0x05109cUL -#define UMAC_REG_DBG_FORCE_VALID_K2 \ +#define UMAC_REG_DBG_FORCE_VALID_K2_E5 \ 0x0510a0UL -#define UMAC_REG_DBG_FORCE_FRAME_K2 \ +#define UMAC_REG_DBG_FORCE_FRAME_K2_E5 \ 0x0510a4UL #define MCP2_REG_DBG_SELECT \ 0x052400UL @@ -717,15 +725,15 @@ 0x1f0ba0UL #define PRS_REG_DBG_FORCE_FRAME \ 0x1f0ba4UL -#define CNIG_REG_DBG_SELECT_K2 \ +#define CNIG_REG_DBG_SELECT_K2_E5 \ 0x218254UL -#define CNIG_REG_DBG_DWORD_ENABLE_K2 \ +#define CNIG_REG_DBG_DWORD_ENABLE_K2_E5 \ 0x218258UL -#define CNIG_REG_DBG_SHIFT_K2 \ +#define CNIG_REG_DBG_SHIFT_K2_E5 \ 0x21825cUL -#define CNIG_REG_DBG_FORCE_VALID_K2 \ +#define CNIG_REG_DBG_FORCE_VALID_K2_E5 \ 0x218260UL -#define CNIG_REG_DBG_FORCE_FRAME_K2 \ +#define CNIG_REG_DBG_FORCE_FRAME_K2_E5 \ 0x218264UL #define PRM_REG_DBG_SELECT \ 0x2306a8UL @@ -997,35 +1005,35 @@ 0x580710UL #define CDU_REG_DBG_FORCE_FRAME \ 0x580714UL -#define WOL_REG_DBG_SELECT_K2 \ +#define WOL_REG_DBG_SELECT_K2_E5 \ 0x600140UL -#define WOL_REG_DBG_DWORD_ENABLE_K2 \ +#define WOL_REG_DBG_DWORD_ENABLE_K2_E5 \ 0x600144UL -#define WOL_REG_DBG_SHIFT_K2 \ +#define WOL_REG_DBG_SHIFT_K2_E5 \ 0x600148UL -#define WOL_REG_DBG_FORCE_VALID_K2 \ +#define WOL_REG_DBG_FORCE_VALID_K2_E5 \ 0x60014cUL -#define WOL_REG_DBG_FORCE_FRAME_K2 \ +#define WOL_REG_DBG_FORCE_FRAME_K2_E5 \ 0x600150UL -#define BMBN_REG_DBG_SELECT_K2 \ +#define BMBN_REG_DBG_SELECT_K2_E5 \ 0x610140UL -#define BMBN_REG_DBG_DWORD_ENABLE_K2 \ +#define BMBN_REG_DBG_DWORD_ENABLE_K2_E5 \ 0x610144UL -#define BMBN_REG_DBG_SHIFT_K2 \ +#define BMBN_REG_DBG_SHIFT_K2_E5 \ 0x610148UL -#define BMBN_REG_DBG_FORCE_VALID_K2 \ +#define BMBN_REG_DBG_FORCE_VALID_K2_E5 \ 0x61014cUL -#define BMBN_REG_DBG_FORCE_FRAME_K2 \ +#define BMBN_REG_DBG_FORCE_FRAME_K2_E5 \ 0x610150UL -#define NWM_REG_DBG_SELECT_K2 \ +#define NWM_REG_DBG_SELECT_K2_E5 \ 0x8000ecUL -#define NWM_REG_DBG_DWORD_ENABLE_K2 \ +#define NWM_REG_DBG_DWORD_ENABLE_K2_E5 \ 0x8000f0UL -#define NWM_REG_DBG_SHIFT_K2 \ +#define NWM_REG_DBG_SHIFT_K2_E5 \ 0x8000f4UL -#define NWM_REG_DBG_FORCE_VALID_K2 \ +#define NWM_REG_DBG_FORCE_VALID_K2_E5 \ 0x8000f8UL -#define NWM_REG_DBG_FORCE_FRAME_K2\ +#define NWM_REG_DBG_FORCE_FRAME_K2_E5 \ 0x8000fcUL #define PBF_REG_DBG_SELECT \ 0xd80060UL @@ -1247,36 +1255,76 @@ 0x1901534UL #define USEM_REG_DBG_FORCE_FRAME \ 0x1901538UL -#define NWS_REG_DBG_SELECT_K2 \ +#define NWS_REG_DBG_SELECT_K2_E5 \ 0x700128UL -#define NWS_REG_DBG_DWORD_ENABLE_K2 \ +#define NWS_REG_DBG_DWORD_ENABLE_K2_E5 \ 0x70012cUL -#define NWS_REG_DBG_SHIFT_K2 \ +#define NWS_REG_DBG_SHIFT_K2_E5 \ 0x700130UL -#define NWS_REG_DBG_FORCE_VALID_K2 \ +#define NWS_REG_DBG_FORCE_VALID_K2_E5 \ 0x700134UL -#define NWS_REG_DBG_FORCE_FRAME_K2 \ +#define NWS_REG_DBG_FORCE_FRAME_K2_E5 \ 0x700138UL -#define MS_REG_DBG_SELECT_K2 \ +#define MS_REG_DBG_SELECT_K2_E5 \ 0x6a0228UL -#define MS_REG_DBG_DWORD_ENABLE_K2 \ +#define MS_REG_DBG_DWORD_ENABLE_K2_E5 \ 0x6a022cUL -#define MS_REG_DBG_SHIFT_K2 \ +#define MS_REG_DBG_SHIFT_K2_E5 \ 0x6a0230UL -#define MS_REG_DBG_FORCE_VALID_K2 \ +#define MS_REG_DBG_FORCE_VALID_K2_E5 \ 0x6a0234UL -#define MS_REG_DBG_FORCE_FRAME_K2 \ +#define MS_REG_DBG_FORCE_FRAME_K2_E5 \ 0x6a0238UL -#define PCIE_REG_DBG_COMMON_SELECT_K2 \ +#define PCIE_REG_DBG_COMMON_SELECT_K2_E5 \ 0x054398UL -#define PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2 \ +#define PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5 \ 0x05439cUL -#define PCIE_REG_DBG_COMMON_SHIFT_K2 \ +#define PCIE_REG_DBG_COMMON_SHIFT_K2_E5 \ 0x0543a0UL -#define PCIE_REG_DBG_COMMON_FORCE_VALID_K2 \ +#define PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5 \ 0x0543a4UL -#define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2 \ +#define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5 \ 0x0543a8UL +#define PTLD_REG_DBG_SELECT_E5 \ + 0x5a1600UL +#define PTLD_REG_DBG_DWORD_ENABLE_E5 \ + 0x5a1604UL +#define PTLD_REG_DBG_SHIFT_E5 \ + 0x5a1608UL +#define PTLD_REG_DBG_FORCE_VALID_E5 \ + 0x5a160cUL +#define PTLD_REG_DBG_FORCE_FRAME_E5 \ + 0x5a1610UL +#define YPLD_REG_DBG_SELECT_E5 \ + 0x5c1600UL +#define YPLD_REG_DBG_DWORD_ENABLE_E5 \ + 0x5c1604UL +#define YPLD_REG_DBG_SHIFT_E5 \ + 0x5c1608UL +#define YPLD_REG_DBG_FORCE_VALID_E5 \ + 0x5c160cUL +#define YPLD_REG_DBG_FORCE_FRAME_E5 \ + 0x5c1610UL +#define RGSRC_REG_DBG_SELECT_E5 \ + 0x320040UL +#define RGSRC_REG_DBG_DWORD_ENABLE_E5 \ + 0x320044UL +#define RGSRC_REG_DBG_SHIFT_E5 \ + 0x320048UL +#define RGSRC_REG_DBG_FORCE_VALID_E5 \ + 0x32004cUL +#define RGSRC_REG_DBG_FORCE_FRAME_E5 \ + 0x320050UL +#define TGSRC_REG_DBG_SELECT_E5 \ + 0x322040UL +#define TGSRC_REG_DBG_DWORD_ENABLE_E5 \ + 0x322044UL +#define TGSRC_REG_DBG_SHIFT_E5 \ + 0x322048UL +#define TGSRC_REG_DBG_FORCE_VALID_E5 \ + 0x32204cUL +#define TGSRC_REG_DBG_FORCE_FRAME_E5 \ + 0x322050UL #define MISC_REG_RESET_PL_UA \ 0x008050UL #define MISC_REG_RESET_PL_HV \ @@ -1415,7 +1463,7 @@ 0x1940000UL #define SEM_FAST_REG_INT_RAM \ 0x020000UL -#define SEM_FAST_REG_INT_RAM_SIZE \ +#define SEM_FAST_REG_INT_RAM_SIZE_BB_K2 \ 20480 #define GRC_REG_TRACE_FIFO_VALID_DATA \ 0x050064UL @@ -1433,6 +1481,8 @@ 0x340800UL #define BRB_REG_BIG_RAM_DATA \ 0x341500UL +#define BRB_REG_BIG_RAM_DATA_SIZE \ + 64 #define SEM_FAST_REG_STALL_0_BB_K2 \ 0x000488UL #define SEM_FAST_REG_STALLED \ @@ -1451,7 +1501,7 @@ 0x238c30UL #define MISCS_REG_BLOCK_256B_EN \ 0x009074UL -#define MCP_REG_SCRATCH_SIZE \ +#define MCP_REG_SCRATCH_SIZE_BB_K2 \ 57344 #define MCP_REG_CPU_REG_FILE \ 0xe05200UL @@ -1485,35 +1535,35 @@ 0x008c14UL #define NWS_REG_NWS_CMU_K2 \ 0x720000UL -#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2 \ +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5 \ 0x000680UL -#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2 \ +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5 \ 0x000684UL -#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2 \ +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5 \ 0x0006c0UL -#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2 \ +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5 \ 0x0006c4UL -#define MS_REG_MS_CMU_K2 \ +#define MS_REG_MS_CMU_K2_E5 \ 0x6a4000UL -#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2 \ +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \ 0x000208UL -#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2 \ +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \ 0x00020cUL -#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2 \ +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \ 0x000210UL -#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2 \ +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \ 0x000214UL -#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2 \ +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \ 0x000208UL -#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2 \ +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \ 0x00020cUL -#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2 \ +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \ 0x000210UL -#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2 \ +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \ 0x000214UL -#define PHY_PCIE_REG_PHY0_K2 \ +#define PHY_PCIE_REG_PHY0_K2_E5 \ 0x620000UL -#define PHY_PCIE_REG_PHY1_K2 \ +#define PHY_PCIE_REG_PHY1_K2_E5 \ 0x624000UL #define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL #define PRS_REG_LIGHT_L2_ETHERTYPE_EN 0x1f0968UL diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index a1d33f35aad3..5e927b6cac22 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -351,7 +351,9 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n"); p_ramrod->mf_mode = MF_NPAR; } - p_ramrod->outer_tag = p_hwfn->hw_info.ovlan; + + p_ramrod->outer_tag_config.outer_tag.tci = + cpu_to_le16(p_hwfn->hw_info.ovlan); /* Place EQ address in RAMROD */ DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, @@ -396,8 +398,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR; DP_VERBOSE(p_hwfn, QED_MSG_SPQ, - "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n", - sb, sb_index, p_ramrod->outer_tag); + "Setting event_ring_sb [id %04x index %02x], outer_tag.tci [%d]\n", + sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tci); rc = qed_spq_post(p_hwfn, p_ent, NULL); diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index cd9a0297ebc5..1673fc90027f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -213,7 +213,7 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn, static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, struct qed_spq *p_spq) { - struct core_conn_context *p_cxt; + struct e4_core_conn_context *p_cxt; struct qed_cxt_info cxt_info; u16 physical_q; int rc; @@ -231,11 +231,11 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, p_cxt = cxt_info.p_cxt; SET_FIELD(p_cxt->xstorm_ag_context.flags10, - XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1); + E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1); SET_FIELD(p_cxt->xstorm_ag_context.flags1, - XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1); + E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1); SET_FIELD(p_cxt->xstorm_ag_context.flags9, - XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1); + E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1); /* QM physical queue */ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 3f40b1de7957..5acb91b3564c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -153,9 +153,9 @@ static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn, return qed_spq_post(p_hwfn, p_ent, NULL); } -static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, - int rel_vf_id, - bool b_enabled_only, bool b_non_malicious) +bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, + int rel_vf_id, + bool b_enabled_only, bool b_non_malicious) { if (!p_hwfn->pf_iov_info) { DP_NOTICE(p_hwfn->cdev, "No iov info\n"); @@ -1621,7 +1621,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, /* fill in pfdev info */ pfdev_info->chip_num = p_hwfn->cdev->chip_num; pfdev_info->db_size = 0; - pfdev_info->indices_per_sb = PIS_PER_SB; + pfdev_info->indices_per_sb = PIS_PER_SB_E4; pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED | PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE; @@ -3582,11 +3582,11 @@ static int qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) { - u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS]; + u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4]; int i, cnt; /* Read initial consumers & producers */ - for (i = 0; i < MAX_NUM_VOQS; i++) { + for (i = 0; i < MAX_NUM_VOQS_E4; i++) { u32 prod; cons[i] = qed_rd(p_hwfn, p_ptt, @@ -3601,7 +3601,7 @@ qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn, /* Wait for consumers to pass the producers */ i = 0; for (cnt = 0; cnt < 50; cnt++) { - for (; i < MAX_NUM_VOQS; i++) { + for (; i < MAX_NUM_VOQS_E4; i++) { u32 tmp; tmp = qed_rd(p_hwfn, p_ptt, @@ -3611,7 +3611,7 @@ qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn, break; } - if (i == MAX_NUM_VOQS) + if (i == MAX_NUM_VOQS_E4) break; msleep(20); @@ -4237,6 +4237,7 @@ qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id) static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, int vfid, int val) { + struct qed_mcp_link_state *p_link; struct qed_vf_info *vf; u8 abs_vp_id = 0; int rc; @@ -4249,7 +4250,10 @@ static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn, if (rc) return rc; - return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val); + p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output; + + return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val, + p_link->speed); } static int diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index 3955929ba892..9a8fd79611f2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -274,6 +274,23 @@ enum qed_iov_wq_flag { #ifdef CONFIG_QED_SRIOV /** + * @brief Check if given VF ID @vfid is valid + * w.r.t. @b_enabled_only value + * if b_enabled_only = true - only enabled VF id is valid + * else any VF id less than max_vfs is valid + * + * @param p_hwfn + * @param rel_vf_id - Relative VF ID + * @param b_enabled_only - consider only enabled VF + * @param b_non_malicious - true iff we want to validate vf isn't malicious. + * + * @return bool - true for valid VF ID + */ +bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, + int rel_vf_id, + bool b_enabled_only, bool b_non_malicious); + +/** * @brief - Given a VF index, return index of next [including that] active VF. * * @param p_hwfn @@ -376,6 +393,13 @@ void qed_vf_start_iov_wq(struct qed_dev *cdev); int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled); void qed_inform_vf_link_state(struct qed_hwfn *hwfn); #else +static inline bool +qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, + int rel_vf_id, bool b_enabled_only, bool b_non_malicious) +{ + return false; +} + static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id) { diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index a3a70ade411f..9935978c5542 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -40,6 +40,7 @@ #include <linux/kernel.h> #include <linux/mutex.h> #include <linux/bpf.h> +#include <net/xdp.h> #include <linux/qed/qede_rdma.h> #include <linux/io.h> #ifdef CONFIG_RFS_ACCEL @@ -52,9 +53,9 @@ #include <linux/qed/qed_eth_if.h> #define QEDE_MAJOR_VERSION 8 -#define QEDE_MINOR_VERSION 10 -#define QEDE_REVISION_VERSION 10 -#define QEDE_ENGINEERING_VERSION 21 +#define QEDE_MINOR_VERSION 33 +#define QEDE_REVISION_VERSION 0 +#define QEDE_ENGINEERING_VERSION 20 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ __stringify(QEDE_MINOR_VERSION) "." \ __stringify(QEDE_REVISION_VERSION) "." \ @@ -345,6 +346,7 @@ struct qede_rx_queue { u64 xdp_no_pass; void *handle; + struct xdp_rxq_info xdp_rxq; }; union db_prod { @@ -494,6 +496,8 @@ int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid); void qede_vlan_mark_nonconfigured(struct qede_dev *edev); int qede_configure_vlan_filters(struct qede_dev *edev); +netdev_features_t qede_fix_features(struct net_device *dev, + netdev_features_t features); int qede_set_features(struct net_device *dev, netdev_features_t features); void qede_set_rx_mode(struct net_device *ndev); void qede_config_rx_mode(struct net_device *ndev); diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index dae741270022..4ca3847fffd4 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -940,6 +940,9 @@ int qede_change_mtu(struct net_device *ndev, int new_mtu) DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), "Configuring MTU size of %d\n", new_mtu); + if (new_mtu > PAGE_SIZE) + ndev->features &= ~NETIF_F_GRO_HW; + /* Set the mtu field and re-start the interface if needed */ args.u.mtu = new_mtu; args.func = &qede_update_mtu; diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index c1a0708a7d7c..6687e04d1558 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -98,10 +98,18 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev, u16 rxq_id, bool add_fltr) { const struct qed_eth_ops *op = edev->ops; + struct qed_ntuple_filter_params params; if (n->used) return; + memset(¶ms, 0, sizeof(params)); + + params.addr = n->mapping; + params.length = n->buf_len; + params.qid = rxq_id; + params.b_is_add = add_fltr; + DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, "%s arfs filter flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n", add_fltr ? "Adding" : "Deleting", @@ -110,8 +118,7 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev, n->used = true; n->filter_op = add_fltr; - op->ntuple_filter_config(edev->cdev, n, n->mapping, n->buf_len, 0, - rxq_id, add_fltr); + op->ntuple_filter_config(edev->cdev, n, ¶ms); } static void @@ -141,7 +148,10 @@ qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev, edev->arfs->filter_count++; if (edev->arfs->filter_count == 1 && !edev->arfs->enable) { - edev->ops->configure_arfs_searcher(edev->cdev, true); + enum qed_filter_config_mode mode; + + mode = QED_FILTER_CONFIG_MODE_5_TUPLE; + edev->ops->configure_arfs_searcher(edev->cdev, mode); edev->arfs->enable = true; } @@ -160,8 +170,11 @@ qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev, edev->arfs->filter_count--; if (!edev->arfs->filter_count && edev->arfs->enable) { + enum qed_filter_config_mode mode; + + mode = QED_FILTER_CONFIG_MODE_DISABLE; edev->arfs->enable = false; - edev->ops->configure_arfs_searcher(edev->cdev, false); + edev->ops->configure_arfs_searcher(edev->cdev, mode); } } @@ -255,8 +268,11 @@ void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr) if (!edev->arfs->filter_count) { if (edev->arfs->enable) { + enum qed_filter_config_mode mode; + + mode = QED_FILTER_CONFIG_MODE_DISABLE; edev->arfs->enable = false; - edev->ops->configure_arfs_searcher(edev->cdev, false); + edev->ops->configure_arfs_searcher(edev->cdev, mode); } #ifdef CONFIG_RFS_ACCEL } else { @@ -895,19 +911,26 @@ static void qede_set_features_reload(struct qede_dev *edev, edev->ndev->features = args->u.features; } +netdev_features_t qede_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct qede_dev *edev = netdev_priv(dev); + + if (edev->xdp_prog || edev->ndev->mtu > PAGE_SIZE || + !(features & NETIF_F_GRO)) + features &= ~NETIF_F_GRO_HW; + + return features; +} + int qede_set_features(struct net_device *dev, netdev_features_t features) { struct qede_dev *edev = netdev_priv(dev); netdev_features_t changes = features ^ dev->features; bool need_reload = false; - /* No action needed if hardware GRO is disabled during driver load */ - if (changes & NETIF_F_GRO) { - if (dev->features & NETIF_F_GRO) - need_reload = !edev->gro_disable; - else - need_reload = edev->gro_disable; - } + if (changes & NETIF_F_GRO_HW) + need_reload = true; if (need_reload) { struct qede_reload_args args; diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 48ec4c56cddf..dafc079ab6b9 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -1006,6 +1006,7 @@ static bool qede_rx_xdp(struct qede_dev *edev, xdp.data = xdp.data_hard_start + *data_offset; xdp_set_data_meta_invalid(&xdp); xdp.data_end = xdp.data + *len; + xdp.rxq = &rxq->xdp_rxq; /* Queues always have a full reset currently, so for the time * being until there's atomic program replace just mark read diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 8f9b3eb82137..2db70eabddfe 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -545,6 +545,7 @@ static const struct net_device_ops qede_netdev_ops = { #endif .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, + .ndo_fix_features = qede_fix_features, .ndo_set_features = qede_set_features, .ndo_get_stats64 = qede_get_stats64, #ifdef CONFIG_QED_SRIOV @@ -572,6 +573,7 @@ static const struct net_device_ops qede_netdev_vf_ops = { .ndo_change_mtu = qede_change_mtu, .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, + .ndo_fix_features = qede_fix_features, .ndo_set_features = qede_set_features, .ndo_get_stats64 = qede_get_stats64, .ndo_udp_tunnel_add = qede_udp_tunnel_add, @@ -589,6 +591,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = { .ndo_change_mtu = qede_change_mtu, .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, + .ndo_fix_features = qede_fix_features, .ndo_set_features = qede_set_features, .ndo_get_stats64 = qede_get_stats64, .ndo_udp_tunnel_add = qede_udp_tunnel_add, @@ -676,7 +679,7 @@ static void qede_init_ndev(struct qede_dev *edev) ndev->priv_flags |= IFF_UNICAST_FLT; /* user-changeble features */ - hw_features = NETIF_F_GRO | NETIF_F_SG | + hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6; @@ -762,6 +765,12 @@ static void qede_free_fp_array(struct qede_dev *edev) fp = &edev->fp_array[i]; kfree(fp->sb_info); + /* Handle mem alloc failure case where qede_init_fp + * didn't register xdp_rxq_info yet. + * Implicit only (fp->type & QEDE_FASTPATH_RX) + */ + if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq)) + xdp_rxq_info_unreg(&fp->rxq->xdp_rxq); kfree(fp->rxq); kfree(fp->xdp_tx); kfree(fp->txq); @@ -1068,10 +1077,6 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) pci_set_drvdata(pdev, NULL); - /* Release edev's reference to XDP's bpf if such exist */ - if (edev->xdp_prog) - bpf_prog_put(edev->xdp_prog); - /* Use global ops since we've freed edev */ qed_ops->common->slowpath_stop(cdev); if (system_state == SYSTEM_POWER_OFF) @@ -1148,7 +1153,7 @@ static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info, static int qede_alloc_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info, u16 sb_id) { - struct status_block *sb_virt; + struct status_block_e4 *sb_virt; dma_addr_t sb_phys; int rc; @@ -1232,18 +1237,9 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq) dma_addr_t mapping; int i; - /* Don't perform FW aggregations in case of XDP */ - if (edev->xdp_prog) - edev->gro_disable = 1; - if (edev->gro_disable) return 0; - if (edev->ndev->mtu > PAGE_SIZE) { - edev->gro_disable = 1; - return 0; - } - for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { struct qede_agg_info *tpa_info = &rxq->tpa_info[i]; struct sw_rx_data *replace_buf = &tpa_info->buffer; @@ -1273,6 +1269,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq) err: qede_free_sge_mem(edev, rxq); edev->gro_disable = 1; + edev->ndev->features &= ~NETIF_F_GRO_HW; return -ENOMEM; } @@ -1502,6 +1499,10 @@ static void qede_init_fp(struct qede_dev *edev) else fp->rxq->data_direction = DMA_FROM_DEVICE; fp->rxq->dev = &edev->pdev->dev; + + /* Driver have no error path from here */ + WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev, + fp->rxq->rxq_id) < 0); } if (fp->type & QEDE_FASTPATH_TX) { @@ -1515,7 +1516,7 @@ static void qede_init_fp(struct qede_dev *edev) edev->ndev->name, queue_id); } - edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO); + edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW); } static int qede_set_real_num_queues(struct qede_dev *edev) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index f7080d0ab874..46b0372dd032 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -3891,7 +3891,7 @@ static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter) struct list_head *head = &mbx->cmd_q; struct qlcnic_cmd_args *cmd = NULL; - spin_lock(&mbx->queue_lock); + spin_lock_bh(&mbx->queue_lock); while (!list_empty(head)) { cmd = list_entry(head->next, struct qlcnic_cmd_args, list); @@ -3902,7 +3902,7 @@ static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter) qlcnic_83xx_notify_cmd_completion(adapter, cmd); } - spin_unlock(&mbx->queue_lock); + spin_unlock_bh(&mbx->queue_lock); } static int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter) @@ -3938,12 +3938,12 @@ static void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter, { struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; - spin_lock(&mbx->queue_lock); + spin_lock_bh(&mbx->queue_lock); list_del(&cmd->list); mbx->num_cmds--; - spin_unlock(&mbx->queue_lock); + spin_unlock_bh(&mbx->queue_lock); qlcnic_83xx_notify_cmd_completion(adapter, cmd); } @@ -4008,7 +4008,7 @@ static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter, init_completion(&cmd->completion); cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN; - spin_lock(&mbx->queue_lock); + spin_lock_bh(&mbx->queue_lock); list_add_tail(&cmd->list, &mbx->cmd_q); mbx->num_cmds++; @@ -4016,7 +4016,7 @@ static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter, *timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT; queue_work(mbx->work_q, &mbx->work); - spin_unlock(&mbx->queue_lock); + spin_unlock_bh(&mbx->queue_lock); return 0; } @@ -4112,15 +4112,15 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work) mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT; spin_unlock_irqrestore(&mbx->aen_lock, flags); - spin_lock(&mbx->queue_lock); + spin_lock_bh(&mbx->queue_lock); if (list_empty(head)) { - spin_unlock(&mbx->queue_lock); + spin_unlock_bh(&mbx->queue_lock); return; } cmd = list_entry(head->next, struct qlcnic_cmd_args, list); - spin_unlock(&mbx->queue_lock); + spin_unlock_bh(&mbx->queue_lock); mbx_ops->encode_cmd(adapter, cmd); mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST); diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.h b/drivers/net/ethernet/qualcomm/emac/emac-mac.h index 5028fb4bec2b..4beedb8faa1e 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.h +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.h @@ -114,8 +114,9 @@ struct emac_tpd { #define TPD_INSTC_SET(tpd, val) BITS_SET((tpd)->word[3], 17, 17, val) /* High-14bit Buffer Address, So, the 64b-bit address is * {DESC_CTRL_11_TX_DATA_HIADDR[17:0],(register) BUFFER_ADDR_H, BUFFER_ADDR_L} + * Extend TPD_BUFFER_ADDR_H to [31, 18], because we never enable timestamping. */ -#define TPD_BUFFER_ADDR_H_SET(tpd, val) BITS_SET((tpd)->word[3], 18, 30, val) +#define TPD_BUFFER_ADDR_H_SET(tpd, val) BITS_SET((tpd)->word[3], 18, 31, val) /* Format D. Word offset from the 1st byte of this packet to start to calculate * the custom checksum. */ diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 38c924bdd32e..13235baf4766 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -615,8 +615,11 @@ static int emac_probe(struct platform_device *pdev) u32 reg; int ret; - /* The TPD buffer address is limited to 45 bits. */ - ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(45)); + /* The TPD buffer address is limited to: + * 1. PTP: 45bits. (Driver doesn't support yet.) + * 2. NON-PTP: 46bits. + */ + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(46)); if (ret) { dev_err(&pdev->dev, "could not set DMA mask\n"); return ret; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index df21e900f874..7e7704daf5f1 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -143,11 +143,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { - int ingress_format = RMNET_INGRESS_FORMAT_DEMUXING | - RMNET_INGRESS_FORMAT_DEAGGREGATION | - RMNET_INGRESS_FORMAT_MAP; - int egress_format = RMNET_EGRESS_FORMAT_MUXING | - RMNET_EGRESS_FORMAT_MAP; + u32 data_format = RMNET_INGRESS_FORMAT_DEAGGREGATION; struct net_device *real_dev; int mode = RMNET_EPMODE_VND; struct rmnet_endpoint *ep; @@ -181,13 +177,20 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, if (err) goto err2; - netdev_dbg(dev, "data format [ingress 0x%08X] [egress 0x%08X]\n", - ingress_format, egress_format); - port->egress_data_format = egress_format; - port->ingress_data_format = ingress_format; port->rmnet_mode = mode; hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]); + + if (data[IFLA_VLAN_FLAGS]) { + struct ifla_vlan_flags *flags; + + flags = nla_data(data[IFLA_VLAN_FLAGS]); + data_format = flags->flags & flags->mask; + } + + netdev_dbg(dev, "data format [0x%08X]\n", data_format); + port->data_format = data_format; + return 0; err2: @@ -317,9 +320,49 @@ static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[], return 0; } +static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[], + struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct rmnet_priv *priv = netdev_priv(dev); + struct net_device *real_dev; + struct rmnet_endpoint *ep; + struct rmnet_port *port; + u16 mux_id; + + real_dev = __dev_get_by_index(dev_net(dev), + nla_get_u32(tb[IFLA_LINK])); + + if (!real_dev || !dev || !rmnet_is_real_dev_registered(real_dev)) + return -ENODEV; + + port = rmnet_get_port_rtnl(real_dev); + + if (data[IFLA_VLAN_ID]) { + mux_id = nla_get_u16(data[IFLA_VLAN_ID]); + ep = rmnet_get_endpoint(port, priv->mux_id); + + hlist_del_init_rcu(&ep->hlnode); + hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]); + + ep->mux_id = mux_id; + priv->mux_id = mux_id; + } + + if (data[IFLA_VLAN_FLAGS]) { + struct ifla_vlan_flags *flags; + + flags = nla_data(data[IFLA_VLAN_FLAGS]); + port->data_format = flags->flags & flags->mask; + } + + return 0; +} + static size_t rmnet_get_size(const struct net_device *dev) { - return nla_total_size(2); /* IFLA_VLAN_ID */ + return nla_total_size(2) /* IFLA_VLAN_ID */ + + nla_total_size(sizeof(struct ifla_vlan_flags)); /* IFLA_VLAN_FLAGS */ } struct rtnl_link_ops rmnet_link_ops __read_mostly = { @@ -331,6 +374,7 @@ struct rtnl_link_ops rmnet_link_ops __read_mostly = { .newlink = rmnet_newlink, .dellink = rmnet_dellink, .get_size = rmnet_get_size, + .changelink = rmnet_changelink, }; /* Needs either rcu_read_lock() or rtnl lock */ diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h index c19259eea99e..00e4634100d3 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h @@ -32,8 +32,7 @@ struct rmnet_endpoint { */ struct rmnet_port { struct net_device *dev; - u32 ingress_data_format; - u32 egress_data_format; + u32 data_format; u8 nr_rmnet_devs; u8 rmnet_mode; struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP]; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c index 08e4afc0ab39..601edec28c5f 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -15,6 +15,8 @@ #include <linux/netdevice.h> #include <linux/netdev_features.h> +#include <linux/if_arp.h> +#include <net/sock.h> #include "rmnet_private.h" #include "rmnet_config.h" #include "rmnet_vnd.h" @@ -64,19 +66,19 @@ __rmnet_map_ingress_handler(struct sk_buff *skb, struct rmnet_port *port) { struct rmnet_endpoint *ep; + u16 len, pad; u8 mux_id; - u16 len; if (RMNET_MAP_GET_CD_BIT(skb)) { - if (port->ingress_data_format - & RMNET_INGRESS_FORMAT_MAP_COMMANDS) + if (port->data_format & RMNET_INGRESS_FORMAT_MAP_COMMANDS) return rmnet_map_command(skb, port); goto free_skb; } mux_id = RMNET_MAP_GET_MUX_ID(skb); - len = RMNET_MAP_GET_LENGTH(skb) - RMNET_MAP_GET_PAD(skb); + pad = RMNET_MAP_GET_PAD(skb); + len = RMNET_MAP_GET_LENGTH(skb) - pad; if (mux_id >= RMNET_MAX_LOGICAL_EP) goto free_skb; @@ -89,8 +91,14 @@ __rmnet_map_ingress_handler(struct sk_buff *skb, /* Subtract MAP header */ skb_pull(skb, sizeof(struct rmnet_map_header)); - skb_trim(skb, len); rmnet_set_skb_proto(skb); + + if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4) { + if (!rmnet_map_checksum_downlink_packet(skb, len + pad)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + + skb_trim(skb, len); rmnet_deliver_skb(skb); return; @@ -104,8 +112,17 @@ rmnet_map_ingress_handler(struct sk_buff *skb, { struct sk_buff *skbn; - if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) { - while ((skbn = rmnet_map_deaggregate(skb)) != NULL) + if (skb->dev->type == ARPHRD_ETHER) { + if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_KERNEL)) { + kfree_skb(skb); + return; + } + + skb_push(skb, ETH_HLEN); + } + + if (port->data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) { + while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL) __rmnet_map_ingress_handler(skbn, port); consume_skb(skb); @@ -124,29 +141,32 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, additional_header_len = 0; required_headroom = sizeof(struct rmnet_map_header); + if (port->data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4) { + additional_header_len = sizeof(struct rmnet_map_ul_csum_header); + required_headroom += additional_header_len; + } + if (skb_headroom(skb) < required_headroom) { if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL)) goto fail; } + if (port->data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4) + rmnet_map_checksum_uplink_packet(skb, orig_dev); + map_header = rmnet_map_add_map_header(skb, additional_header_len, 0); if (!map_header) goto fail; - if (port->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) { - if (mux_id == 0xff) - map_header->mux_id = 0; - else - map_header->mux_id = mux_id; - } + map_header->mux_id = mux_id; skb->protocol = htons(ETH_P_MAP); - return RMNET_MAP_SUCCESS; + return 0; fail: kfree_skb(skb); - return RMNET_MAP_CONSUMED; + return -ENOMEM; } static void @@ -178,8 +198,7 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) switch (port->rmnet_mode) { case RMNET_EPMODE_VND: - if (port->ingress_data_format & RMNET_INGRESS_FORMAT_MAP) - rmnet_map_ingress_handler(skb, port); + rmnet_map_ingress_handler(skb, port); break; case RMNET_EPMODE_BRIDGE: rmnet_bridge_handler(skb, port->bridge_ep); @@ -201,6 +220,8 @@ void rmnet_egress_handler(struct sk_buff *skb) struct rmnet_priv *priv; u8 mux_id; + sk_pacing_shift_update(skb->sk, 8); + orig_dev = skb->dev; priv = netdev_priv(orig_dev); skb->dev = priv->real_dev; @@ -212,19 +233,8 @@ void rmnet_egress_handler(struct sk_buff *skb) return; } - if (port->egress_data_format & RMNET_EGRESS_FORMAT_MAP) { - switch (rmnet_map_egress_handler(skb, port, mux_id, orig_dev)) { - case RMNET_MAP_CONSUMED: - return; - - case RMNET_MAP_SUCCESS: - break; - - default: - kfree_skb(skb); - return; - } - } + if (rmnet_map_egress_handler(skb, port, mux_id, orig_dev)) + return; rmnet_vnd_tx_fixup(skb, orig_dev); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h index 3af3fe7b5457..6ce31e29136d 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h @@ -30,15 +30,6 @@ struct rmnet_map_control_command { }; } __aligned(1); -enum rmnet_map_results { - RMNET_MAP_SUCCESS, - RMNET_MAP_CONSUMED, - RMNET_MAP_GENERAL_FAILURE, - RMNET_MAP_NOT_ENABLED, - RMNET_MAP_FAILED_AGGREGATION, - RMNET_MAP_FAILED_MUX -}; - enum rmnet_map_commands { RMNET_MAP_COMMAND_NONE, RMNET_MAP_COMMAND_FLOW_DISABLE, @@ -56,6 +47,22 @@ struct rmnet_map_header { u16 pkt_len; } __aligned(1); +struct rmnet_map_dl_csum_trailer { + u8 reserved1; + u8 valid:1; + u8 reserved2:7; + u16 csum_start_offset; + u16 csum_length; + __be16 csum_value; +} __aligned(1); + +struct rmnet_map_ul_csum_header { + __be16 csum_start_offset; + u16 csum_insert_offset:14; + u16 udp_ip4_ind:1; + u16 csum_enabled:1; +} __aligned(1); + #define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \ (Y)->data)->mux_id) #define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \ @@ -76,10 +83,13 @@ struct rmnet_map_header { #define RMNET_MAP_NO_PAD_BYTES 0 #define RMNET_MAP_ADD_PAD_BYTES 1 -u8 rmnet_map_demultiplex(struct sk_buff *skb); -struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb); +struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, + struct rmnet_port *port); struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, int hdrlen, int pad); void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port); +int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len); +void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, + struct net_device *orig_dev); #endif /* _RMNET_MAP_H_ */ diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c index 51e604923ac1..6bc328fb88e1 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c @@ -58,11 +58,24 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb, } static void rmnet_map_send_ack(struct sk_buff *skb, - unsigned char type) + unsigned char type, + struct rmnet_port *port) { struct rmnet_map_control_command *cmd; int xmit_status; + if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4) { + if (skb->len < sizeof(struct rmnet_map_header) + + RMNET_MAP_GET_LENGTH(skb) + + sizeof(struct rmnet_map_dl_csum_trailer)) { + kfree_skb(skb); + return; + } + + skb_trim(skb, skb->len - + sizeof(struct rmnet_map_dl_csum_trailer)); + } + skb->protocol = htons(ETH_P_MAP); cmd = RMNET_MAP_GET_CMD_START(skb); @@ -100,5 +113,5 @@ void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port) break; } if (rc == RMNET_MAP_COMMAND_ACK) - rmnet_map_send_ack(skb, rc); + rmnet_map_send_ack(skb, rc, port); } diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c index 86b8c758f94e..c74a6c56d315 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c @@ -14,6 +14,9 @@ */ #include <linux/netdevice.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <net/ip6_checksum.h> #include "rmnet_config.h" #include "rmnet_map.h" #include "rmnet_private.h" @@ -21,6 +24,233 @@ #define RMNET_MAP_DEAGGR_SPACING 64 #define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2) +static __sum16 *rmnet_map_get_csum_field(unsigned char protocol, + const void *txporthdr) +{ + __sum16 *check = NULL; + + switch (protocol) { + case IPPROTO_TCP: + check = &(((struct tcphdr *)txporthdr)->check); + break; + + case IPPROTO_UDP: + check = &(((struct udphdr *)txporthdr)->check); + break; + + default: + check = NULL; + break; + } + + return check; +} + +static int +rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb, + struct rmnet_map_dl_csum_trailer *csum_trailer) +{ + __sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum; + u16 csum_value, csum_value_final; + struct iphdr *ip4h; + void *txporthdr; + __be16 addend; + + ip4h = (struct iphdr *)(skb->data); + if ((ntohs(ip4h->frag_off) & IP_MF) || + ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) + return -EOPNOTSUPP; + + txporthdr = skb->data + ip4h->ihl * 4; + + csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr); + + if (!csum_field) + return -EPROTONOSUPPORT; + + /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */ + if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP) + return 0; + + csum_value = ~ntohs(csum_trailer->csum_value); + hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl); + ip_payload_csum = csum16_sub((__force __sum16)csum_value, + (__force __be16)hdr_csum); + + pseudo_csum = ~csum_tcpudp_magic(ip4h->saddr, ip4h->daddr, + ntohs(ip4h->tot_len) - ip4h->ihl * 4, + ip4h->protocol, 0); + addend = (__force __be16)ntohs((__force __be16)pseudo_csum); + pseudo_csum = csum16_add(ip_payload_csum, addend); + + addend = (__force __be16)ntohs((__force __be16)*csum_field); + csum_temp = ~csum16_sub(pseudo_csum, addend); + csum_value_final = (__force u16)csum_temp; + + if (unlikely(csum_value_final == 0)) { + switch (ip4h->protocol) { + case IPPROTO_UDP: + /* RFC 768 - DL4 1's complement rule for UDP csum 0 */ + csum_value_final = ~csum_value_final; + break; + + case IPPROTO_TCP: + /* DL4 Non-RFC compliant TCP checksum found */ + if (*csum_field == (__force __sum16)0xFFFF) + csum_value_final = ~csum_value_final; + break; + } + } + + if (csum_value_final == ntohs((__force __be16)*csum_field)) + return 0; + else + return -EINVAL; +} + +#if IS_ENABLED(CONFIG_IPV6) +static int +rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb, + struct rmnet_map_dl_csum_trailer *csum_trailer) +{ + __sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp; + u16 csum_value, csum_value_final; + __be16 ip6_hdr_csum, addend; + struct ipv6hdr *ip6h; + void *txporthdr; + u32 length; + + ip6h = (struct ipv6hdr *)(skb->data); + + txporthdr = skb->data + sizeof(struct ipv6hdr); + csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr); + + if (!csum_field) + return -EPROTONOSUPPORT; + + csum_value = ~ntohs(csum_trailer->csum_value); + ip6_hdr_csum = (__force __be16) + ~ntohs((__force __be16)ip_compute_csum(ip6h, + (int)(txporthdr - (void *)(skb->data)))); + ip6_payload_csum = csum16_sub((__force __sum16)csum_value, + ip6_hdr_csum); + + length = (ip6h->nexthdr == IPPROTO_UDP) ? + ntohs(((struct udphdr *)txporthdr)->len) : + ntohs(ip6h->payload_len); + pseudo_csum = ~(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, + length, ip6h->nexthdr, 0)); + addend = (__force __be16)ntohs((__force __be16)pseudo_csum); + pseudo_csum = csum16_add(ip6_payload_csum, addend); + + addend = (__force __be16)ntohs((__force __be16)*csum_field); + csum_temp = ~csum16_sub(pseudo_csum, addend); + csum_value_final = (__force u16)csum_temp; + + if (unlikely(csum_value_final == 0)) { + switch (ip6h->nexthdr) { + case IPPROTO_UDP: + /* RFC 2460 section 8.1 + * DL6 One's complement rule for UDP checksum 0 + */ + csum_value_final = ~csum_value_final; + break; + + case IPPROTO_TCP: + /* DL6 Non-RFC compliant TCP checksum found */ + if (*csum_field == (__force __sum16)0xFFFF) + csum_value_final = ~csum_value_final; + break; + } + } + + if (csum_value_final == ntohs((__force __be16)*csum_field)) + return 0; + else + return -EINVAL; +} +#endif + +static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr) +{ + struct iphdr *ip4h = (struct iphdr *)iphdr; + void *txphdr; + u16 *csum; + + txphdr = iphdr + ip4h->ihl * 4; + + if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) { + csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr); + *csum = ~(*csum); + } +} + +static void +rmnet_map_ipv4_ul_csum_header(void *iphdr, + struct rmnet_map_ul_csum_header *ul_header, + struct sk_buff *skb) +{ + struct iphdr *ip4h = (struct iphdr *)iphdr; + __be16 *hdr = (__be16 *)ul_header, offset; + + offset = htons((__force u16)(skb_transport_header(skb) - + (unsigned char *)iphdr)); + ul_header->csum_start_offset = offset; + ul_header->csum_insert_offset = skb->csum_offset; + ul_header->csum_enabled = 1; + if (ip4h->protocol == IPPROTO_UDP) + ul_header->udp_ip4_ind = 1; + else + ul_header->udp_ip4_ind = 0; + + /* Changing remaining fields to network order */ + hdr++; + *hdr = htons((__force u16)*hdr); + + skb->ip_summed = CHECKSUM_NONE; + + rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr); +} + +#if IS_ENABLED(CONFIG_IPV6) +static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr) +{ + struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr; + void *txphdr; + u16 *csum; + + txphdr = ip6hdr + sizeof(struct ipv6hdr); + + if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) { + csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr); + *csum = ~(*csum); + } +} + +static void +rmnet_map_ipv6_ul_csum_header(void *ip6hdr, + struct rmnet_map_ul_csum_header *ul_header, + struct sk_buff *skb) +{ + __be16 *hdr = (__be16 *)ul_header, offset; + + offset = htons((__force u16)(skb_transport_header(skb) - + (unsigned char *)ip6hdr)); + ul_header->csum_start_offset = offset; + ul_header->csum_insert_offset = skb->csum_offset; + ul_header->csum_enabled = 1; + ul_header->udp_ip4_ind = 0; + + /* Changing remaining fields to network order */ + hdr++; + *hdr = htons((__force u16)*hdr); + + skb->ip_summed = CHECKSUM_NONE; + + rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr); +} +#endif + /* Adds MAP header to front of skb->data * Padding is calculated and set appropriately in MAP header. Mux ID is * initialized to 0. @@ -32,9 +262,6 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, u32 padding, map_datalen; u8 *padbytes; - if (skb_headroom(skb) < sizeof(struct rmnet_map_header)) - return NULL; - map_datalen = skb->len - hdrlen; map_header = (struct rmnet_map_header *) skb_push(skb, sizeof(struct rmnet_map_header)); @@ -69,7 +296,8 @@ done: * returned, indicating that there are no more packets to deaggregate. Caller * is responsible for freeing the original skb. */ -struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb) +struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, + struct rmnet_port *port) { struct rmnet_map_header *maph; struct sk_buff *skbn; @@ -81,6 +309,9 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb) maph = (struct rmnet_map_header *)skb->data; packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header); + if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4) + packet_len += sizeof(struct rmnet_map_dl_csum_trailer); + if (((int)skb->len - (int)packet_len) < 0) return NULL; @@ -100,3 +331,73 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb) return skbn; } + +/* Validates packet checksums. Function takes a pointer to + * the beginning of a buffer which contains the IP payload + + * padding + checksum trailer. + * Only IPv4 and IPv6 are supported along with TCP & UDP. + * Fragmented or tunneled packets are not supported. + */ +int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len) +{ + struct rmnet_map_dl_csum_trailer *csum_trailer; + + if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) + return -EOPNOTSUPP; + + csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len); + + if (!csum_trailer->valid) + return -EINVAL; + + if (skb->protocol == htons(ETH_P_IP)) + return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer); + else if (skb->protocol == htons(ETH_P_IPV6)) +#if IS_ENABLED(CONFIG_IPV6) + return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer); +#else + return -EPROTONOSUPPORT; +#endif + + return 0; +} + +/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP + * packets that are supported for UL checksum offload. + */ +void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, + struct net_device *orig_dev) +{ + struct rmnet_map_ul_csum_header *ul_header; + void *iphdr; + + ul_header = (struct rmnet_map_ul_csum_header *) + skb_push(skb, sizeof(struct rmnet_map_ul_csum_header)); + + if (unlikely(!(orig_dev->features & + (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))) + goto sw_csum; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + iphdr = (char *)ul_header + + sizeof(struct rmnet_map_ul_csum_header); + + if (skb->protocol == htons(ETH_P_IP)) { + rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb); + return; + } else if (skb->protocol == htons(ETH_P_IPV6)) { +#if IS_ENABLED(CONFIG_IPV6) + rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb); + return; +#else + goto sw_csum; +#endif + } + } + +sw_csum: + ul_header->csum_start_offset = 0; + ul_header->csum_insert_offset = 0; + ul_header->csum_enabled = 0; + ul_header->udp_ip4_ind = 0; +} diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h index 49102f922b31..de0143eaa05a 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h @@ -19,14 +19,10 @@ #define RMNET_TX_QUEUE_LEN 1000 /* Constants */ -#define RMNET_EGRESS_FORMAT_MAP BIT(1) -#define RMNET_EGRESS_FORMAT_AGGREGATION BIT(2) -#define RMNET_EGRESS_FORMAT_MUXING BIT(3) - -#define RMNET_INGRESS_FORMAT_MAP BIT(1) -#define RMNET_INGRESS_FORMAT_DEAGGREGATION BIT(2) -#define RMNET_INGRESS_FORMAT_DEMUXING BIT(3) -#define RMNET_INGRESS_FORMAT_MAP_COMMANDS BIT(4) +#define RMNET_INGRESS_FORMAT_DEAGGREGATION BIT(0) +#define RMNET_INGRESS_FORMAT_MAP_COMMANDS BIT(1) +#define RMNET_INGRESS_FORMAT_MAP_CKSUMV4 BIT(2) +#define RMNET_EGRESS_FORMAT_MAP_CKSUMV4 BIT(3) /* Replace skb->dev to a virtual rmnet device and pass up the stack */ #define RMNET_EPMODE_VND (1) diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index 9caa5e387450..570a227acdd8 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -185,6 +185,13 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, if (ep->egress_dev) return -EINVAL; + if (rmnet_get_endpoint(port, id)) + return -EBUSY; + + rmnet_dev->hw_features = NETIF_F_RXCSUM; + rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + rmnet_dev->hw_features |= NETIF_F_SG; + rc = register_netdevice(rmnet_dev); if (!rc) { ep->egress_dev = rmnet_dev; diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index e7ab23e87de2..81045dfa1cd8 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -748,8 +748,8 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, mss = skb_shinfo(skb)->gso_size; if (mss > MSSMask) { - WARN_ONCE(1, "Net bug: GSO size %d too large for 8139CP\n", - mss); + netdev_WARN_ONCE(dev, "Net bug: GSO size %d too large for 8139CP\n", + mss); goto out_dma_error; } diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 734286ebe5ef..0bf7d1759250 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1395,7 +1395,7 @@ DECLARE_RTL_COND(rtl_ocp_tx_cond) { void __iomem *ioaddr = tp->mmio_addr; - return RTL_R8(IBISR0) & 0x02; + return RTL_R8(IBISR0) & 0x20; } static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) @@ -1403,7 +1403,7 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) void __iomem *ioaddr = tp->mmio_addr; RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01); - rtl_msleep_loop_wait_low(tp, &rtl_ocp_tx_cond, 50, 2000); + rtl_msleep_loop_wait_high(tp, &rtl_ocp_tx_cond, 50, 2000); RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20); RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01); } @@ -1675,33 +1675,24 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp) } } -static void __rtl8169_check_link_status(struct net_device *dev, - struct rtl8169_private *tp, - void __iomem *ioaddr, bool pm) +static void rtl8169_check_link_status(struct net_device *dev, + struct rtl8169_private *tp, + void __iomem *ioaddr) { if (tp->link_ok(ioaddr)) { rtl_link_chg_patch(tp); /* This is to cancel a scheduled suspend if there's one. */ - if (pm) - pm_request_resume(&tp->pci_dev->dev); + pm_request_resume(&tp->pci_dev->dev); netif_carrier_on(dev); if (net_ratelimit()) netif_info(tp, ifup, dev, "link up\n"); } else { netif_carrier_off(dev); netif_info(tp, ifdown, dev, "link down\n"); - if (pm) - pm_schedule_suspend(&tp->pci_dev->dev, 5000); + pm_runtime_idle(&tp->pci_dev->dev); } } -static void rtl8169_check_link_status(struct net_device *dev, - struct rtl8169_private *tp, - void __iomem *ioaddr) -{ - __rtl8169_check_link_status(dev, tp, ioaddr, false); -} - #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) static u32 __rtl8169_get_wol(struct rtl8169_private *tp) @@ -4638,16 +4629,6 @@ static void rtl8169_phy_timer(struct timer_list *t) rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING); } -static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev, - void __iomem *ioaddr) -{ - iounmap(ioaddr); - pci_release_regions(pdev); - pci_clear_mwi(pdev); - pci_disable_device(pdev); - free_netdev(dev); -} - DECLARE_RTL_COND(rtl_phy_reset_cond) { return tp->phy_reset_pending(tp); @@ -4779,14 +4760,6 @@ static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data return -EOPNOTSUPP; } -static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp) -{ - if (tp->features & RTL_FEATURE_MSI) { - pci_disable_msi(pdev); - tp->features &= ~RTL_FEATURE_MSI; - } -} - static void rtl_init_mdio_ops(struct rtl8169_private *tp) { struct mdio_ops *ops = &tp->mdio_ops; @@ -7759,7 +7732,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp) rtl8169_pcierr_interrupt(dev); if (status & LinkChg) - __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true); + rtl8169_check_link_status(dev, tp, tp->mmio_addr); rtl_irq_enable_all(tp); } @@ -7972,7 +7945,7 @@ static int rtl_open(struct net_device *dev) rtl_unlock_work(tp); tp->saved_wolopts = 0; - pm_runtime_put_noidle(&pdev->dev); + pm_runtime_put_sync(&pdev->dev); rtl8169_check_link_status(dev, tp, ioaddr); out: @@ -8116,8 +8089,10 @@ static int rtl8169_runtime_suspend(struct device *device) struct net_device *dev = pci_get_drvdata(pdev); struct rtl8169_private *tp = netdev_priv(dev); - if (!tp->TxDescArray) + if (!tp->TxDescArray) { + rtl_pll_power_down(tp); return 0; + } rtl_lock_work(tp); tp->saved_wolopts = __rtl8169_get_wol(tp); @@ -8159,9 +8134,11 @@ static int rtl8169_runtime_idle(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); - struct rtl8169_private *tp = netdev_priv(dev); - return tp->TxDescArray ? -EBUSY : 0; + if (!netif_running(dev) || !netif_carrier_ok(dev)) + pm_schedule_suspend(device, 10000); + + return -EBUSY; } static const struct dev_pm_ops rtl8169_pm_ops = { @@ -8208,9 +8185,6 @@ static void rtl_shutdown(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct rtl8169_private *tp = netdev_priv(dev); - struct device *d = &pdev->dev; - - pm_runtime_get_sync(d); rtl8169_net_suspend(dev); @@ -8228,8 +8202,6 @@ static void rtl_shutdown(struct pci_dev *pdev) pci_wake_from_d3(pdev, true); pci_set_power_state(pdev, PCI_D3hot); } - - pm_runtime_put_noidle(d); } static void rtl_remove_one(struct pci_dev *pdev) @@ -8251,9 +8223,6 @@ static void rtl_remove_one(struct pci_dev *pdev) unregister_netdev(dev); - dma_free_coherent(&tp->pci_dev->dev, sizeof(*tp->counters), - tp->counters, tp->counters_phys_addr); - rtl_release_firmware(tp); if (pci_dev_run_wake(pdev)) @@ -8261,9 +8230,6 @@ static void rtl_remove_one(struct pci_dev *pdev) /* restore original MAC address */ rtl_rar_set(tp, dev->perm_addr); - - rtl_disable_msi(pdev, tp); - rtl8169_release_board(pdev, dev, tp->mmio_addr); } static const struct net_device_ops rtl_netdev_ops = { @@ -8440,11 +8406,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) MODULENAME, RTL8169_VERSION); } - dev = alloc_etherdev(sizeof (*tp)); - if (!dev) { - rc = -ENOMEM; - goto out; - } + dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp)); + if (!dev) + return -ENOMEM; SET_NETDEV_DEV(dev, &pdev->dev); dev->netdev_ops = &rtl_netdev_ops; @@ -8467,13 +8431,13 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) PCIE_LINK_STATE_CLKPM); /* enable device (incl. PCI PM wakeup and hotplug setup) */ - rc = pci_enable_device(pdev); + rc = pcim_enable_device(pdev); if (rc < 0) { netif_err(tp, probe, dev, "enable failure\n"); - goto err_out_free_dev_1; + return rc; } - if (pci_set_mwi(pdev) < 0) + if (pcim_set_mwi(pdev) < 0) netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n"); /* make sure PCI base addr 1 is MMIO */ @@ -8481,30 +8445,28 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netif_err(tp, probe, dev, "region #%d not an MMIO resource, aborting\n", region); - rc = -ENODEV; - goto err_out_mwi_2; + return -ENODEV; } /* check for weird/broken PCI region reporting */ if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) { netif_err(tp, probe, dev, "Invalid PCI region size(s), aborting\n"); - rc = -ENODEV; - goto err_out_mwi_2; + return -ENODEV; } rc = pci_request_regions(pdev, MODULENAME); if (rc < 0) { netif_err(tp, probe, dev, "could not request regions\n"); - goto err_out_mwi_2; + return rc; } /* ioremap MMIO region */ - ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE); + ioaddr = devm_ioremap(&pdev->dev, pci_resource_start(pdev, region), + R8169_REGS_SIZE); if (!ioaddr) { netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n"); - rc = -EIO; - goto err_out_free_res_3; + return -EIO; } tp->mmio_addr = ioaddr; @@ -8530,7 +8492,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc < 0) { netif_err(tp, probe, dev, "DMA configuration failed\n"); - goto err_out_unmap_4; + return rc; } } @@ -8692,16 +8654,15 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; - tp->counters = dma_alloc_coherent (&pdev->dev, sizeof(*tp->counters), - &tp->counters_phys_addr, GFP_KERNEL); - if (!tp->counters) { - rc = -ENOMEM; - goto err_out_msi_5; - } + tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters), + &tp->counters_phys_addr, + GFP_KERNEL); + if (!tp->counters) + return -ENOMEM; rc = register_netdev(dev); if (rc < 0) - goto err_out_cnt_6; + return rc; pci_set_drvdata(pdev, dev); @@ -8725,30 +8686,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rtl8168_driver_start(tp); } - if (pci_dev_run_wake(pdev)) - pm_runtime_put_noidle(&pdev->dev); - netif_carrier_off(dev); -out: - return rc; + if (pci_dev_run_wake(pdev)) + pm_runtime_put_sync(&pdev->dev); -err_out_cnt_6: - dma_free_coherent(&pdev->dev, sizeof(*tp->counters), tp->counters, - tp->counters_phys_addr); -err_out_msi_5: - netif_napi_del(&tp->napi); - rtl_disable_msi(pdev, tp); -err_out_unmap_4: - iounmap(ioaddr); -err_out_free_res_3: - pci_release_regions(pdev); -err_out_mwi_2: - pci_clear_mwi(pdev); - pci_disable_device(pdev); -err_out_free_dev_1: - free_netdev(dev); - goto out; + return 0; } static struct pci_driver rtl8169_pci_driver = { diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 009780df664b..c87f57ca4437 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -2205,8 +2205,7 @@ out_dma_free: if (chip_id != RCAR_GEN2) ravb_ptp_stop(ndev); out_release: - if (ndev) - free_netdev(ndev); + free_netdev(ndev); pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 53924a4fc31c..a197e11f3a56 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -3125,7 +3125,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev) const struct platform_device_id *id = platform_get_device_id(pdev); struct sh_eth_private *mdp; struct net_device *ndev; - int ret, devno; + int ret; /* get base addr */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -3137,10 +3137,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); - devno = pdev->id; - if (devno < 0) - devno = 0; - ret = platform_get_irq(pdev, 0); if (ret < 0) goto out_release; @@ -3222,8 +3218,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev) eth_hw_addr_random(ndev); } - /* ioremap the TSU registers */ if (mdp->cd->tsu) { + int port = pdev->id < 0 ? 0 : pdev->id % 2; struct resource *rtsu; rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); @@ -3235,7 +3231,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev) /* We can only request the TSU region for the first port * of the two sharing this TSU for the probe to succeed... */ - if (devno % 2 == 0 && + if (port == 0 && !devm_request_mem_region(&pdev->dev, rtsu->start, resource_size(rtsu), dev_name(&pdev->dev))) { @@ -3243,6 +3239,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev) ret = -EBUSY; goto out_release; } + /* ioremap the TSU registers */ mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start, resource_size(rtsu)); if (!mdp->tsu_addr) { @@ -3250,16 +3247,14 @@ static int sh_eth_drv_probe(struct platform_device *pdev) ret = -ENOMEM; goto out_release; } - mdp->port = devno % 2; + mdp->port = port; ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER; - } - /* Need to init only the first port of the two sharing a TSU */ - if (devno % 2 == 0) { - if (mdp->cd->chip_reset) - mdp->cd->chip_reset(ndev); + /* Need to init only the first port of the two sharing a TSU */ + if (port == 0) { + if (mdp->cd->chip_reset) + mdp->cd->chip_reset(ndev); - if (mdp->cd->tsu) { /* TSU init (Init only)*/ sh_eth_tsu_init(mdp); } @@ -3301,8 +3296,7 @@ out_napi_del: out_release: /* net_dev free */ - if (ndev) - free_netdev(ndev); + free_netdev(ndev); pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index 6d6fb8cf3e7c..6473cc68c2d5 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -789,7 +789,6 @@ static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port, ofdpa_flags_nowait(flags), ofdpa_cmd_flow_tbl_add, found, NULL, NULL); - return 0; } static int ofdpa_flow_tbl_del(struct ofdpa_port *ofdpa_port, diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index e566dbb3343d..75fbf58e421c 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -160,11 +160,31 @@ static int efx_ef10_get_warm_boot_count(struct efx_nic *efx) EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO; } +/* On all EF10s up to and including SFC9220 (Medford1), all PFs use BAR 0 for + * I/O space and BAR 2(&3) for memory. On SFC9250 (Medford2), there is no I/O + * bar; PFs use BAR 0/1 for memory. + */ +static unsigned int efx_ef10_pf_mem_bar(struct efx_nic *efx) +{ + switch (efx->pci_dev->device) { + case 0x0b03: /* SFC9250 PF */ + return 0; + default: + return 2; + } +} + +/* All VFs use BAR 0/1 for memory */ +static unsigned int efx_ef10_vf_mem_bar(struct efx_nic *efx) +{ + return 0; +} + static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx) { int bar; - bar = efx->type->mem_bar; + bar = efx->type->mem_bar(efx); return resource_size(&efx->pci_dev->resource[bar]); } @@ -213,7 +233,7 @@ static int efx_ef10_get_vf_index(struct efx_nic *efx) static int efx_ef10_init_datapath_caps(struct efx_nic *efx) { - MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V2_OUT_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V4_OUT_LEN); struct efx_ef10_nic_data *nic_data = efx->nic_data; size_t outlen; int rc; @@ -257,9 +277,70 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx) return -ENODEV; } + if (outlen >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) { + u8 vi_window_mode = MCDI_BYTE(outbuf, + GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE); + + switch (vi_window_mode) { + case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K: + efx->vi_stride = 8192; + break; + case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K: + efx->vi_stride = 16384; + break; + case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K: + efx->vi_stride = 65536; + break; + default: + netif_err(efx, probe, efx->net_dev, + "Unrecognised VI window mode %d\n", + vi_window_mode); + return -EIO; + } + netif_dbg(efx, probe, efx->net_dev, "vi_stride = %u\n", + efx->vi_stride); + } else { + /* keep default VI stride */ + netif_dbg(efx, probe, efx->net_dev, + "firmware did not report VI window mode, assuming vi_stride = %u\n", + efx->vi_stride); + } + + if (outlen >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) { + efx->num_mac_stats = MCDI_WORD(outbuf, + GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS); + netif_dbg(efx, probe, efx->net_dev, + "firmware reports num_mac_stats = %u\n", + efx->num_mac_stats); + } else { + /* leave num_mac_stats as the default value, MC_CMD_MAC_NSTATS */ + netif_dbg(efx, probe, efx->net_dev, + "firmware did not report num_mac_stats, assuming %u\n", + efx->num_mac_stats); + } + return 0; } +static void efx_ef10_read_licensed_features(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_LICENSING_V3_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_LICENSING_V3_OUT_LEN); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, LICENSING_V3_IN_OP, + MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_LICENSING_V3, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc || (outlen < MC_CMD_LICENSING_V3_OUT_LEN)) + return; + + nic_data->licensed_features = MCDI_QWORD(outbuf, + LICENSING_V3_OUT_LICENSED_FEATURES); +} + static int efx_ef10_get_sysclk_freq(struct efx_nic *efx) { MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN); @@ -589,17 +670,6 @@ static int efx_ef10_probe(struct efx_nic *efx) struct efx_ef10_nic_data *nic_data; int i, rc; - /* We can have one VI for each 8K region. However, until we - * use TX option descriptors we need two TX queues per channel. - */ - efx->max_channels = min_t(unsigned int, - EFX_MAX_CHANNELS, - efx_ef10_mem_map_size(efx) / - (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES)); - efx->max_tx_channels = efx->max_channels; - if (WARN_ON(efx->max_channels == 0)) - return -EIO; - nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); if (!nic_data) return -ENOMEM; @@ -671,6 +741,22 @@ static int efx_ef10_probe(struct efx_nic *efx) if (rc < 0) goto fail5; + efx_ef10_read_licensed_features(efx); + + /* We can have one VI for each vi_stride-byte region. + * However, until we use TX option descriptors we need two TX queues + * per channel. + */ + efx->max_channels = min_t(unsigned int, + EFX_MAX_CHANNELS, + efx_ef10_mem_map_size(efx) / + (efx->vi_stride * EFX_TXQ_TYPES)); + efx->max_tx_channels = efx->max_channels; + if (WARN_ON(efx->max_channels == 0)) { + rc = -EIO; + goto fail5; + } + efx->rx_packet_len_offset = ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE; @@ -695,7 +781,7 @@ static int efx_ef10_probe(struct efx_nic *efx) if (rc && rc != -EPERM) goto fail5; - efx_ptp_probe(efx, NULL); + efx_ptp_defer_probe_with_channel(efx); #ifdef CONFIG_SFC_SRIOV if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) { @@ -865,6 +951,11 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx) /* Link a buffer to each TX queue */ efx_for_each_channel(channel, efx) { + /* Extra channels, even those with TXQs (PTP), do not require + * PIO resources. + */ + if (!channel->type->want_pio) + continue; efx_for_each_channel_tx_queue(tx_queue, channel) { /* We assign the PIO buffers to queues in * reverse order to allow for the following @@ -907,7 +998,7 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx) } else { tx_queue->piobuf = nic_data->pio_write_base + - index * EFX_VI_PAGE_SIZE + offset; + index * efx->vi_stride + offset; tx_queue->piobuf_offset = offset; netif_dbg(efx, probe, efx->net_dev, "linked VI %u to PIO buffer %u offset %x addr %p\n", @@ -1212,7 +1303,9 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx) void __iomem *membase; int rc; - channel_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); + channel_vis = max(efx->n_channels, + (efx->n_tx_channels + efx->n_extra_tx_channels) * + EFX_TXQ_TYPES); #ifdef EFX_USE_PIO /* Try to allocate PIO buffers if wanted and if the full @@ -1253,19 +1346,19 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx) * for writing PIO buffers through. * * The UC mapping contains (channel_vis - 1) complete VIs and the - * first half of the next VI. Then the WC mapping begins with - * the second half of this last VI. + * first 4K of the next VI. Then the WC mapping begins with + * the remainder of this last VI. */ - uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * EFX_VI_PAGE_SIZE + + uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * efx->vi_stride + ER_DZ_TX_PIOBUF); if (nic_data->n_piobufs) { /* pio_write_vi_base rounds down to give the number of complete * VIs inside the UC mapping. */ - pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE; + pio_write_vi_base = uc_mem_map_size / efx->vi_stride; wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base + nic_data->n_piobufs) * - EFX_VI_PAGE_SIZE) - + efx->vi_stride) - uc_mem_map_size); max_vis = pio_write_vi_base + nic_data->n_piobufs; } else { @@ -1337,7 +1430,7 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx) nic_data->pio_write_vi_base = pio_write_vi_base; nic_data->pio_write_base = nic_data->wc_membase + - (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF - + (pio_write_vi_base * efx->vi_stride + ER_DZ_TX_PIOBUF - uc_mem_map_size); rc = efx_ef10_link_piobufs(efx); @@ -1571,6 +1664,29 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS), EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES), EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW), + EF10_DMA_STAT(fec_uncorrected_errors, FEC_UNCORRECTED_ERRORS), + EF10_DMA_STAT(fec_corrected_errors, FEC_CORRECTED_ERRORS), + EF10_DMA_STAT(fec_corrected_symbols_lane0, FEC_CORRECTED_SYMBOLS_LANE0), + EF10_DMA_STAT(fec_corrected_symbols_lane1, FEC_CORRECTED_SYMBOLS_LANE1), + EF10_DMA_STAT(fec_corrected_symbols_lane2, FEC_CORRECTED_SYMBOLS_LANE2), + EF10_DMA_STAT(fec_corrected_symbols_lane3, FEC_CORRECTED_SYMBOLS_LANE3), + EF10_DMA_STAT(ctpio_dmabuf_start, CTPIO_DMABUF_START), + EF10_DMA_STAT(ctpio_vi_busy_fallback, CTPIO_VI_BUSY_FALLBACK), + EF10_DMA_STAT(ctpio_long_write_success, CTPIO_LONG_WRITE_SUCCESS), + EF10_DMA_STAT(ctpio_missing_dbell_fail, CTPIO_MISSING_DBELL_FAIL), + EF10_DMA_STAT(ctpio_overflow_fail, CTPIO_OVERFLOW_FAIL), + EF10_DMA_STAT(ctpio_underflow_fail, CTPIO_UNDERFLOW_FAIL), + EF10_DMA_STAT(ctpio_timeout_fail, CTPIO_TIMEOUT_FAIL), + EF10_DMA_STAT(ctpio_noncontig_wr_fail, CTPIO_NONCONTIG_WR_FAIL), + EF10_DMA_STAT(ctpio_frm_clobber_fail, CTPIO_FRM_CLOBBER_FAIL), + EF10_DMA_STAT(ctpio_invalid_wr_fail, CTPIO_INVALID_WR_FAIL), + EF10_DMA_STAT(ctpio_vi_clobber_fallback, CTPIO_VI_CLOBBER_FALLBACK), + EF10_DMA_STAT(ctpio_unqualified_fallback, CTPIO_UNQUALIFIED_FALLBACK), + EF10_DMA_STAT(ctpio_runt_fallback, CTPIO_RUNT_FALLBACK), + EF10_DMA_STAT(ctpio_success, CTPIO_SUCCESS), + EF10_DMA_STAT(ctpio_fallback, CTPIO_FALLBACK), + EF10_DMA_STAT(ctpio_poison, CTPIO_POISON), + EF10_DMA_STAT(ctpio_erase, CTPIO_ERASE), }; #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \ @@ -1646,6 +1762,43 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \ (1ULL << EF10_STAT_port_rx_dp_hlb_wait)) +/* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V2, + * indicated by returning a value >= MC_CMD_MAC_NSTATS_V2 in + * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS. + * These bits are in the second u64 of the raw mask. + */ +#define EF10_FEC_STAT_MASK ( \ + (1ULL << (EF10_STAT_fec_uncorrected_errors - 64)) | \ + (1ULL << (EF10_STAT_fec_corrected_errors - 64)) | \ + (1ULL << (EF10_STAT_fec_corrected_symbols_lane0 - 64)) | \ + (1ULL << (EF10_STAT_fec_corrected_symbols_lane1 - 64)) | \ + (1ULL << (EF10_STAT_fec_corrected_symbols_lane2 - 64)) | \ + (1ULL << (EF10_STAT_fec_corrected_symbols_lane3 - 64))) + +/* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V3, + * indicated by returning a value >= MC_CMD_MAC_NSTATS_V3 in + * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS. + * These bits are in the second u64 of the raw mask. + */ +#define EF10_CTPIO_STAT_MASK ( \ + (1ULL << (EF10_STAT_ctpio_dmabuf_start - 64)) | \ + (1ULL << (EF10_STAT_ctpio_vi_busy_fallback - 64)) | \ + (1ULL << (EF10_STAT_ctpio_long_write_success - 64)) | \ + (1ULL << (EF10_STAT_ctpio_missing_dbell_fail - 64)) | \ + (1ULL << (EF10_STAT_ctpio_overflow_fail - 64)) | \ + (1ULL << (EF10_STAT_ctpio_underflow_fail - 64)) | \ + (1ULL << (EF10_STAT_ctpio_timeout_fail - 64)) | \ + (1ULL << (EF10_STAT_ctpio_noncontig_wr_fail - 64)) | \ + (1ULL << (EF10_STAT_ctpio_frm_clobber_fail - 64)) | \ + (1ULL << (EF10_STAT_ctpio_invalid_wr_fail - 64)) | \ + (1ULL << (EF10_STAT_ctpio_vi_clobber_fallback - 64)) | \ + (1ULL << (EF10_STAT_ctpio_unqualified_fallback - 64)) | \ + (1ULL << (EF10_STAT_ctpio_runt_fallback - 64)) | \ + (1ULL << (EF10_STAT_ctpio_success - 64)) | \ + (1ULL << (EF10_STAT_ctpio_fallback - 64)) | \ + (1ULL << (EF10_STAT_ctpio_poison - 64)) | \ + (1ULL << (EF10_STAT_ctpio_erase - 64))) + static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) { u64 raw_mask = HUNT_COMMON_STAT_MASK; @@ -1684,10 +1837,22 @@ static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask) if (nic_data->datapath_caps & (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) { raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1); - raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1; + raw_mask[1] = (1ULL << (EF10_STAT_V1_COUNT - 64)) - 1; } else { raw_mask[1] = 0; } + /* Only show FEC stats when NIC supports MC_CMD_MAC_STATS_V2 */ + if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V2) + raw_mask[1] |= EF10_FEC_STAT_MASK; + + /* CTPIO stats appear in V3. Only show them on devices that actually + * support CTPIO. Although this driver doesn't use CTPIO others might, + * and we may be reporting the stats for the underlying port. + */ + if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V3 && + (nic_data->datapath_caps2 & + (1 << MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN))) + raw_mask[1] |= EF10_CTPIO_STAT_MASK; #if BITS_PER_LONG == 64 BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2); @@ -1791,7 +1956,7 @@ static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx) dma_stats = efx->stats_buffer.addr; - generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; + generation_end = dma_stats[efx->num_mac_stats - 1]; if (generation_end == EFX_MC_STATS_GENERATION_INVALID) return 0; rmb(); @@ -1839,7 +2004,7 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx) DECLARE_BITMAP(mask, EF10_STAT_COUNT); __le64 generation_start, generation_end; u64 *stats = nic_data->stats; - u32 dma_len = MC_CMD_MAC_NSTATS * sizeof(u64); + u32 dma_len = efx->num_mac_stats * sizeof(u64); struct efx_buffer stats_buf; __le64 *dma_stats; int rc; @@ -1864,7 +2029,7 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx) } dma_stats = stats_buf.addr; - dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID; + dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID; MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr); MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD, @@ -1883,7 +2048,7 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx) goto out; } - generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; + generation_end = dma_stats[efx->num_mac_stats - 1]; if (generation_end == EFX_MC_STATS_GENERATION_INVALID) { WARN_ON_ONCE(1); goto out; @@ -1951,8 +2116,9 @@ static void efx_ef10_push_irq_moderation(struct efx_channel *channel) } else { unsigned int ticks = efx_usecs_to_ticks(efx, usecs); - EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode, - ERF_DZ_TC_TIMER_VAL, ticks); + EFX_POPULATE_DWORD_3(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode, + ERF_DZ_TC_TIMER_VAL, ticks, + ERF_FZ_TC_TMR_REL_VAL, ticks); efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR, channel->channel); } @@ -2263,12 +2429,25 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) int i; BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0); + /* Only attempt to enable TX timestamping if we have the license for it, + * otherwise TXQ init will fail + */ + if (!(nic_data->licensed_features & + (1 << LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN))) { + tx_queue->timestamping = false; + /* Disable sync events on this channel. */ + if (efx->type->ptp_set_ts_sync_events) + efx->type->ptp_set_ts_sync_events(efx, false, false); + } + /* TSOv2 is a limited resource that can only be configured on a limited * number of queues. TSO without checksum offload is not really a thing, * so we only enable it for those queues. + * TSOv2 cannot be used with Hardware timestamping. */ if (csum_offload && (nic_data->datapath_caps2 & - (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN))) { + (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN)) && + !tx_queue->timestamping) { tso_v2 = true; netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n", channel->channel); @@ -2294,14 +2473,16 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) inlen = MC_CMD_INIT_TXQ_IN_LEN(entries); do { - MCDI_POPULATE_DWORD_3(inbuf, INIT_TXQ_IN_FLAGS, + MCDI_POPULATE_DWORD_4(inbuf, INIT_TXQ_IN_FLAGS, /* This flag was removed from mcdi_pcol.h for * the non-_EXT version of INIT_TXQ. However, * firmware still honours it. */ INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2, INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload, - INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload); + INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload, + INIT_TXQ_EXT_IN_FLAG_TIMESTAMP, + tx_queue->timestamping); rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen, NULL, 0, NULL); @@ -2327,12 +2508,13 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION; tx_queue->insert_count = 1; txd = efx_tx_desc(tx_queue, 0); - EFX_POPULATE_QWORD_4(*txd, + EFX_POPULATE_QWORD_5(*txd, ESF_DZ_TX_DESC_IS_OPT, true, ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM, ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload, - ESF_DZ_TX_OPTION_IP_CSUM, csum_offload); + ESF_DZ_TX_OPTION_IP_CSUM, csum_offload, + ESF_DZ_TX_TIMESTAMP, tx_queue->timestamping); tx_queue->write_count = 1; if (tso_v2) { @@ -3233,8 +3415,8 @@ static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel, if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN && ((rx_l3_class != ESE_DZ_L3_CLASS_IP4 && rx_l3_class != ESE_DZ_L3_CLASS_IP6) || - (rx_l4_class != ESE_DZ_L4_CLASS_TCP && - rx_l4_class != ESE_DZ_L4_CLASS_UDP)))) + (rx_l4_class != ESE_FZ_L4_CLASS_TCP && + rx_l4_class != ESE_FZ_L4_CLASS_UDP)))) netdev_WARN(efx->net_dev, "invalid class for RX_TCPUDP_CKSUM_ERR: event=" EFX_QWORD_FMT "\n", @@ -3271,8 +3453,8 @@ static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel, EFX_QWORD_VAL(*event)); else if (unlikely((rx_l3_class != ESE_DZ_L3_CLASS_IP4 && rx_l3_class != ESE_DZ_L3_CLASS_IP6) || - (rx_l4_class != ESE_DZ_L4_CLASS_TCP && - rx_l4_class != ESE_DZ_L4_CLASS_UDP))) + (rx_l4_class != ESE_FZ_L4_CLASS_TCP && + rx_l4_class != ESE_FZ_L4_CLASS_UDP))) netdev_WARN(efx->net_dev, "invalid class for RX_TCP_UDP_INNER_CHKSUM_ERR: event=" EFX_QWORD_FMT "\n", @@ -3307,7 +3489,7 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel, next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS); rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL); rx_l3_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L3_CLASS); - rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS); + rx_l4_class = EFX_QWORD_FIELD(*event, ESF_FZ_RX_L4_CLASS); rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT); rx_encap_hdr = nic_data->datapath_caps & @@ -3385,8 +3567,8 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel, rx_l3_class, rx_l4_class, event); } else { - bool tcpudp = rx_l4_class == ESE_DZ_L4_CLASS_TCP || - rx_l4_class == ESE_DZ_L4_CLASS_UDP; + bool tcpudp = rx_l4_class == ESE_FZ_L4_CLASS_TCP || + rx_l4_class == ESE_FZ_L4_CLASS_UDP; switch (rx_encap_hdr) { case ESE_EZ_ENCAP_HDR_VXLAN: /* VxLAN or GENEVE */ @@ -3407,7 +3589,7 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel, } } - if (rx_l4_class == ESE_DZ_L4_CLASS_TCP) + if (rx_l4_class == ESE_FZ_L4_CLASS_TCP) flags |= EFX_RX_PKT_TCP; channel->irq_mod_score += 2 * n_packets; @@ -3427,31 +3609,92 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel, return n_packets; } -static int +static u32 efx_ef10_extract_event_ts(efx_qword_t *event) +{ + u32 tstamp; + + tstamp = EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI); + tstamp <<= 16; + tstamp |= EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO); + + return tstamp; +} + +static void efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) { struct efx_nic *efx = channel->efx; struct efx_tx_queue *tx_queue; unsigned int tx_ev_desc_ptr; unsigned int tx_ev_q_label; - int tx_descs = 0; + unsigned int tx_ev_type; + u64 ts_part; if (unlikely(READ_ONCE(efx->reset_pending))) - return 0; + return; if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT))) - return 0; + return; - /* Transmit completion */ - tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX); + /* Get the transmit queue */ tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL); tx_queue = efx_channel_get_tx_queue(channel, tx_ev_q_label % EFX_TXQ_TYPES); - tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) & - tx_queue->ptr_mask); - efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask); - return tx_descs; + if (!tx_queue->timestamping) { + /* Transmit completion */ + tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX); + efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask); + return; + } + + /* Transmit timestamps are only available for 8XXX series. They result + * in three events per packet. These occur in order, and are: + * - the normal completion event + * - the low part of the timestamp + * - the high part of the timestamp + * + * Each part of the timestamp is itself split across two 16 bit + * fields in the event. + */ + tx_ev_type = EFX_QWORD_FIELD(*event, ESF_EZ_TX_SOFT1); + + switch (tx_ev_type) { + case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION: + /* In case of Queue flush or FLR, we might have received + * the previous TX completion event but not the Timestamp + * events. + */ + if (tx_queue->completed_desc_ptr != tx_queue->ptr_mask) + efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr); + + tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, + ESF_DZ_TX_DESCR_INDX); + tx_queue->completed_desc_ptr = + tx_ev_desc_ptr & tx_queue->ptr_mask; + break; + + case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO: + ts_part = efx_ef10_extract_event_ts(event); + tx_queue->completed_timestamp_minor = ts_part; + break; + + case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI: + ts_part = efx_ef10_extract_event_ts(event); + tx_queue->completed_timestamp_major = ts_part; + + efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr); + tx_queue->completed_desc_ptr = tx_queue->ptr_mask; + break; + + default: + netif_err(efx, hw, efx->net_dev, + "channel %d unknown tx event type %d (data " + EFX_QWORD_FMT ")\n", + channel->channel, tx_ev_type, + EFX_QWORD_VAL(*event)); + break; + } } static void @@ -3513,7 +3756,6 @@ static int efx_ef10_ev_process(struct efx_channel *channel, int quota) efx_qword_t event, *p_event; unsigned int read_ptr; int ev_code; - int tx_descs = 0; int spent = 0; if (quota <= 0) @@ -3553,13 +3795,7 @@ static int efx_ef10_ev_process(struct efx_channel *channel, int quota) } break; case ESE_DZ_EV_CODE_TX_EV: - tx_descs += efx_ef10_handle_tx_event(channel, &event); - if (tx_descs > efx->txq_entries) { - spent = quota; - goto out; - } else if (++spent == quota) { - goto out; - } + efx_ef10_handle_tx_event(channel, &event); break; case ESE_DZ_EV_CODE_DRIVER_EV: efx_ef10_handle_driver_event(channel, &event); @@ -6034,7 +6270,8 @@ static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en, efx_ef10_rx_enable_timestamping : efx_ef10_rx_disable_timestamping; - efx_for_each_channel(channel, efx) { + channel = efx_ptp_channel(efx); + if (channel) { int rc = set(channel, temp); if (en && rc != 0) { efx_ef10_ptp_set_ts_sync_events(efx, false, temp); @@ -6392,7 +6629,7 @@ out_unlock: const struct efx_nic_type efx_hunt_a0_vf_nic_type = { .is_vf = true, - .mem_bar = EFX_MEM_VF_BAR, + .mem_bar = efx_ef10_vf_mem_bar, .mem_map_size = efx_ef10_mem_map_size, .probe = efx_ef10_probe_vf, .remove = efx_ef10_remove, @@ -6500,7 +6737,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { const struct efx_nic_type efx_hunt_a0_nic_type = { .is_vf = false, - .mem_bar = EFX_MEM_BAR, + .mem_bar = efx_ef10_pf_mem_bar, .mem_map_size = efx_ef10_mem_map_size, .probe = efx_ef10_probe_pf, .remove = efx_ef10_remove, diff --git a/drivers/net/ethernet/sfc/ef10_regs.h b/drivers/net/ethernet/sfc/ef10_regs.h index 2c4bf9476c37..6a56778cf06c 100644 --- a/drivers/net/ethernet/sfc/ef10_regs.h +++ b/drivers/net/ethernet/sfc/ef10_regs.h @@ -1,6 +1,6 @@ /**************************************************************************** * Driver for Solarflare network controllers and boards - * Copyright 2012-2015 Solarflare Communications Inc. + * Copyright 2012-2017 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -79,6 +79,8 @@ #define ER_DZ_EVQ_TMR 0x00000420 #define ER_DZ_EVQ_TMR_STEP 8192 #define ER_DZ_EVQ_TMR_ROWS 2048 +#define ERF_FZ_TC_TMR_REL_VAL_LBN 16 +#define ERF_FZ_TC_TMR_REL_VAL_WIDTH 14 #define ERF_DZ_TC_TIMER_MODE_LBN 14 #define ERF_DZ_TC_TIMER_MODE_WIDTH 2 #define ERF_DZ_TC_TIMER_VAL_LBN 0 @@ -159,16 +161,24 @@ #define ESF_DZ_RX_EV_SOFT2_WIDTH 2 #define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48 #define ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4 -#define ESF_DZ_RX_L4_CLASS_LBN 45 -#define ESF_DZ_RX_L4_CLASS_WIDTH 3 -#define ESE_DZ_L4_CLASS_RSVD7 7 -#define ESE_DZ_L4_CLASS_RSVD6 6 -#define ESE_DZ_L4_CLASS_RSVD5 5 -#define ESE_DZ_L4_CLASS_RSVD4 4 -#define ESE_DZ_L4_CLASS_RSVD3 3 -#define ESE_DZ_L4_CLASS_UDP 2 -#define ESE_DZ_L4_CLASS_TCP 1 -#define ESE_DZ_L4_CLASS_UNKNOWN 0 +#define ESF_DE_RX_L4_CLASS_LBN 45 +#define ESF_DE_RX_L4_CLASS_WIDTH 3 +#define ESE_DE_L4_CLASS_RSVD7 7 +#define ESE_DE_L4_CLASS_RSVD6 6 +#define ESE_DE_L4_CLASS_RSVD5 5 +#define ESE_DE_L4_CLASS_RSVD4 4 +#define ESE_DE_L4_CLASS_RSVD3 3 +#define ESE_DE_L4_CLASS_UDP 2 +#define ESE_DE_L4_CLASS_TCP 1 +#define ESE_DE_L4_CLASS_UNKNOWN 0 +#define ESF_FZ_RX_FASTPD_INDCTR_LBN 47 +#define ESF_FZ_RX_FASTPD_INDCTR_WIDTH 1 +#define ESF_FZ_RX_L4_CLASS_LBN 45 +#define ESF_FZ_RX_L4_CLASS_WIDTH 2 +#define ESE_FZ_L4_CLASS_RSVD3 3 +#define ESE_FZ_L4_CLASS_UDP 2 +#define ESE_FZ_L4_CLASS_TCP 1 +#define ESE_FZ_L4_CLASS_UNKNOWN 0 #define ESF_DZ_RX_L3_CLASS_LBN 42 #define ESF_DZ_RX_L3_CLASS_WIDTH 3 #define ESE_DZ_L3_CLASS_RSVD7 7 @@ -215,6 +225,8 @@ #define ESF_EZ_RX_ABORT_WIDTH 1 #define ESF_DZ_RX_ECC_ERR_LBN 29 #define ESF_DZ_RX_ECC_ERR_WIDTH 1 +#define ESF_DZ_RX_TRUNC_ERR_LBN 29 +#define ESF_DZ_RX_TRUNC_ERR_WIDTH 1 #define ESF_DZ_RX_CRC1_ERR_LBN 28 #define ESF_DZ_RX_CRC1_ERR_WIDTH 1 #define ESF_DZ_RX_CRC0_ERR_LBN 27 @@ -332,6 +344,8 @@ #define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0 #define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56 #define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4 +#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3 +#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2 #define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1 #define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0 #define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48 @@ -341,7 +355,7 @@ #define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0 #define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32 -/* TX_TSO_FATSO2A_DESC */ +/* TX_TSO_V2_DESC_A */ #define ESF_DZ_TX_DESC_IS_OPT_LBN 63 #define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1 #define ESF_DZ_TX_OPTION_TYPE_LBN 60 @@ -360,8 +374,7 @@ #define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0 #define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32 - -/* TX_TSO_FATSO2B_DESC */ +/* TX_TSO_V2_DESC_B */ #define ESF_DZ_TX_DESC_IS_OPT_LBN 63 #define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1 #define ESF_DZ_TX_OPTION_TYPE_LBN 60 @@ -375,11 +388,10 @@ #define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2 #define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1 #define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0 -#define ESF_DZ_TX_TSO_OUTER_IP_ID_LBN 0 -#define ESF_DZ_TX_TSO_OUTER_IP_ID_WIDTH 16 #define ESF_DZ_TX_TSO_TCP_MSS_LBN 32 #define ESF_DZ_TX_TSO_TCP_MSS_WIDTH 16 - +#define ESF_DZ_TX_TSO_OUTER_IPID_LBN 0 +#define ESF_DZ_TX_TSO_OUTER_IPID_WIDTH 16 /*************************************************************************/ diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index e3c492fcaff0..16757cfc5b29 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -27,6 +27,7 @@ #include <net/udp_tunnel.h> #include "efx.h" #include "nic.h" +#include "io.h" #include "selftest.h" #include "sriov.h" @@ -895,12 +896,20 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100)); } +static bool efx_default_channel_want_txqs(struct efx_channel *channel) +{ + return channel->channel - channel->efx->tx_channel_offset < + channel->efx->n_tx_channels; +} + static const struct efx_channel_type efx_default_channel_type = { .pre_probe = efx_channel_dummy_op_int, .post_remove = efx_channel_dummy_op_void, .get_name = efx_get_channel_name, .copy = efx_copy_channel, + .want_txqs = efx_default_channel_want_txqs, .keep_eventq = false, + .want_pio = true, }; int efx_channel_dummy_op_int(struct efx_channel *channel) @@ -952,31 +961,42 @@ void efx_link_status_changed(struct efx_nic *efx) netif_info(efx, link, efx->net_dev, "link down\n"); } -void efx_link_set_advertising(struct efx_nic *efx, u32 advertising) +void efx_link_set_advertising(struct efx_nic *efx, + const unsigned long *advertising) { - efx->link_advertising = advertising; - if (advertising) { - if (advertising & ADVERTISED_Pause) - efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX); - else - efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); - if (advertising & ADVERTISED_Asym_Pause) - efx->wanted_fc ^= EFX_FC_TX; - } + memcpy(efx->link_advertising, advertising, + sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK())); + + efx->link_advertising[0] |= ADVERTISED_Autoneg; + if (advertising[0] & ADVERTISED_Pause) + efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX); + else + efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); + if (advertising[0] & ADVERTISED_Asym_Pause) + efx->wanted_fc ^= EFX_FC_TX; +} + +/* Equivalent to efx_link_set_advertising with all-zeroes, except does not + * force the Autoneg bit on. + */ +void efx_link_clear_advertising(struct efx_nic *efx) +{ + bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS); + efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); } void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc) { efx->wanted_fc = wanted_fc; - if (efx->link_advertising) { + if (efx->link_advertising[0]) { if (wanted_fc & EFX_FC_RX) - efx->link_advertising |= (ADVERTISED_Pause | - ADVERTISED_Asym_Pause); + efx->link_advertising[0] |= (ADVERTISED_Pause | + ADVERTISED_Asym_Pause); else - efx->link_advertising &= ~(ADVERTISED_Pause | - ADVERTISED_Asym_Pause); + efx->link_advertising[0] &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); if (wanted_fc & EFX_FC_TX) - efx->link_advertising ^= ADVERTISED_Asym_Pause; + efx->link_advertising[0] ^= ADVERTISED_Asym_Pause; } } @@ -1248,7 +1268,7 @@ static int efx_init_io(struct efx_nic *efx) netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); - bar = efx->type->mem_bar; + bar = efx->type->mem_bar(efx); rc = pci_enable_device(pci_dev); if (rc) { @@ -1323,7 +1343,7 @@ static void efx_fini_io(struct efx_nic *efx) } if (efx->membase_phys) { - bar = efx->type->mem_bar; + bar = efx->type->mem_bar(efx); pci_release_region(efx->pci_dev, bar); efx->membase_phys = 0; } @@ -1489,6 +1509,7 @@ static int efx_probe_interrupts(struct efx_nic *efx) } /* Assign extra channels if possible */ + efx->n_extra_tx_channels = 0; j = efx->n_channels; for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) { if (!efx->extra_channel_type[i]) @@ -1500,6 +1521,8 @@ static int efx_probe_interrupts(struct efx_nic *efx) --j; efx_get_channel(efx, j)->type = efx->extra_channel_type[i]; + if (efx_channel_has_tx_queues(efx_get_channel(efx, j))) + efx->n_extra_tx_channels++; } } @@ -2909,6 +2932,10 @@ static const struct pci_device_id efx_pci_table[] = { .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1a03), /* SFC9220 VF */ .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0b03), /* SFC9250 PF */ + .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03), /* SFC9250 VF */ + .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, {0} /* end of list */ }; @@ -2977,6 +3004,9 @@ static int efx_init_struct(struct efx_nic *efx, efx->rx_packet_ts_offset = efx->type->rx_ts_offset - efx->type->rx_prefix_size; spin_lock_init(&efx->stats_lock); + efx->vi_stride = EFX_DEFAULT_VI_STRIDE; + efx->num_mac_stats = MC_CMD_MAC_NSTATS; + BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END); mutex_init(&efx->mac_lock); efx->phy_op = &efx_dummy_phy_operations; efx->mdio.dev = net_dev; diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 52c84b782901..0cddc5ad77b1 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -14,11 +14,6 @@ #include "net_driver.h" #include "filter.h" -/* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */ -/* All VFs use BAR 0/1 for memory */ -#define EFX_MEM_BAR 2 -#define EFX_MEM_VF_BAR 0 - int efx_net_open(struct net_device *net_dev); int efx_net_stop(struct net_device *net_dev); @@ -263,7 +258,9 @@ static inline void efx_schedule_channel_irq(struct efx_channel *channel) } void efx_link_status_changed(struct efx_nic *efx); -void efx_link_set_advertising(struct efx_nic *efx, u32); +void efx_link_set_advertising(struct efx_nic *efx, + const unsigned long *advertising); +void efx_link_clear_advertising(struct efx_nic *efx); void efx_link_set_wanted_fc(struct efx_nic *efx, u8); static inline void efx_device_detach_sync(struct efx_nic *efx) diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 3747b5644110..4db2dc2bf52f 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -720,7 +720,7 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev, goto out; } - if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) { + if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising[0]) { netif_dbg(efx, drv, efx->net_dev, "Autonegotiation is disabled\n"); rc = -EINVAL; @@ -732,10 +732,10 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev, (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX)) efx->type->prepare_enable_fc_tx(efx); - old_adv = efx->link_advertising; + old_adv = efx->link_advertising[0]; old_fc = efx->wanted_fc; efx_link_set_wanted_fc(efx, wanted_fc); - if (efx->link_advertising != old_adv || + if (efx->link_advertising[0] != old_adv || (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) { rc = efx->phy_op->reconfigure(efx); if (rc) { diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 5334dc83d926..266b9bee1f3a 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -818,17 +818,16 @@ static void efx_farch_magic_event(struct efx_channel *channel, u32 magic) * The NIC batches TX completion events; the message we receive is of * the form "complete all TX events up to this index". */ -static int +static void efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) { unsigned int tx_ev_desc_ptr; unsigned int tx_ev_q_label; struct efx_tx_queue *tx_queue; struct efx_nic *efx = channel->efx; - int tx_packets = 0; if (unlikely(READ_ONCE(efx->reset_pending))) - return 0; + return; if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { /* Transmit completion */ @@ -836,8 +835,6 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); tx_queue = efx_channel_get_tx_queue( channel, tx_ev_q_label % EFX_TXQ_TYPES); - tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & - tx_queue->ptr_mask); efx_xmit_done(tx_queue, tx_ev_desc_ptr); } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { /* Rewrite the FIFO write pointer */ @@ -856,8 +853,6 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) EFX_QWORD_FMT"\n", channel->channel, EFX_QWORD_VAL(*event)); } - - return tx_packets; } /* Detect errors included in the rx_evt_pkt_ok bit. */ @@ -1090,7 +1085,7 @@ efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) int qid; qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); - if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) { + if (qid < EFX_TXQ_TYPES * (efx->n_tx_channels + efx->n_extra_tx_channels)) { tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES, qid % EFX_TXQ_TYPES); if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) { @@ -1270,7 +1265,6 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget) unsigned int read_ptr; efx_qword_t event, *p_event; int ev_code; - int tx_packets = 0; int spent = 0; if (budget <= 0) @@ -1304,12 +1298,7 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget) goto out; break; case FSE_AZ_EV_CODE_TX_EV: - tx_packets += efx_farch_handle_tx_event(channel, - &event); - if (tx_packets > efx->txq_entries) { - spent = budget; - goto out; - } + efx_farch_handle_tx_event(channel, &event); break; case FSE_AZ_EV_CODE_DRV_GEN_EV: efx_farch_handle_generated_event(channel, &event); @@ -1680,20 +1669,21 @@ void efx_farch_rx_pull_indir_table(struct efx_nic *efx) */ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) { - unsigned vi_count, buftbl_min; + unsigned vi_count, buftbl_min, total_tx_channels; #ifdef CONFIG_SFC_SRIOV struct siena_nic_data *nic_data = efx->nic_data; #endif + total_tx_channels = efx->n_tx_channels + efx->n_extra_tx_channels; /* Account for the buffer table entries backing the datapath channels * and the descriptor caches for those channels. */ buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE + - efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE + + total_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE + efx->n_channels * EFX_MAX_EVQ_SIZE) * sizeof(efx_qword_t) / EFX_BUF_SIZE); - vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); + vi_count = max(efx->n_channels, total_tx_channels * EFX_TXQ_TYPES); #ifdef CONFIG_SFC_SRIOV if (efx->type->sriov_wanted) { diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h index afb94aa2c15e..89563170af52 100644 --- a/drivers/net/ethernet/sfc/io.h +++ b/drivers/net/ethernet/sfc/io.h @@ -222,18 +222,21 @@ static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value, efx_reado(efx, value, reg + index * sizeof(efx_oword_t)); } -/* Page size used as step between per-VI registers */ -#define EFX_VI_PAGE_SIZE 0x2000 +/* default VI stride (step between per-VI registers) is 8K */ +#define EFX_DEFAULT_VI_STRIDE 0x2000 /* Calculate offset to page-mapped register */ -#define EFX_PAGED_REG(page, reg) \ - ((page) * EFX_VI_PAGE_SIZE + (reg)) +static inline unsigned int efx_paged_reg(struct efx_nic *efx, unsigned int page, + unsigned int reg) +{ + return page * efx->vi_stride + reg; +} /* Write the whole of RX_DESC_UPD or TX_DESC_UPD */ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value, unsigned int reg, unsigned int page) { - reg = EFX_PAGED_REG(page, reg); + reg = efx_paged_reg(efx, page, reg); netif_vdbg(efx, hw, efx->net_dev, "writing register %x with " EFX_OWORD_FMT "\n", reg, @@ -262,7 +265,7 @@ static inline void _efx_writed_page(struct efx_nic *efx, const efx_dword_t *value, unsigned int reg, unsigned int page) { - efx_writed(efx, value, EFX_PAGED_REG(page, reg)); + efx_writed(efx, value, efx_paged_reg(efx, page, reg)); } #define efx_writed_page(efx, value, reg, page) \ _efx_writed_page(efx, value, \ @@ -288,10 +291,10 @@ static inline void _efx_writed_page_locked(struct efx_nic *efx, if (page == 0) { spin_lock_irqsave(&efx->biu_lock, flags); - efx_writed(efx, value, EFX_PAGED_REG(page, reg)); + efx_writed(efx, value, efx_paged_reg(efx, page, reg)); spin_unlock_irqrestore(&efx->biu_lock, flags); } else { - efx_writed(efx, value, EFX_PAGED_REG(page, reg)); + efx_writed(efx, value, efx_paged_reg(efx, page, reg)); } } #define efx_writed_page_locked(efx, value, reg, page) \ diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 154ef41d1927..ebd95972ae7b 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -208,6 +208,9 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); #define _MCDI_DWORD(_buf, _field) \ ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2)) +#define MCDI_BYTE(_buf, _field) \ + ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \ + *MCDI_PTR(_buf, _field)) #define MCDI_WORD(_buf, _field) \ ((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \ le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field))) diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index 91fb54fd03d9..869d76f8f589 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -114,6 +114,8 @@ #define MCDI_HEADER_XFLAGS_WIDTH 8 /* Request response using event */ #define MCDI_HEADER_XFLAGS_EVREQ 0x01 +/* Request (and signal) early doorbell return */ +#define MCDI_HEADER_XFLAGS_DBRET 0x02 /* Maximum number of payload bytes */ #define MCDI_CTL_SDU_LEN_MAX_V1 0xfc @@ -123,7 +125,7 @@ /* The MC can generate events for two reasons: - * - To complete a shared memory request if XFLAGS_EVREQ was set + * - To advance a shared memory request if XFLAGS_EVREQ was set * - As a notification (link state, i2c event), controlled * via MC_CMD_LOG_CTRL * @@ -279,6 +281,17 @@ /* Returned by MC_CMD_TESTASSERT if the action that should * have caused an assertion failed to do so. */ #define MC_CMD_ERR_UNREACHABLE 0x1016 +/* This command needs to be processed in the background but there were no + * resources to do so. Send it again after a command has completed. */ +#define MC_CMD_ERR_QUEUE_FULL 0x1017 +/* The operation could not be completed because the PCIe link has gone + * away. This error code is never expected to be returned over the TLP + * transport. */ +#define MC_CMD_ERR_NO_PCIE 0x1018 +/* The operation could not be completed because the datapath has gone + * away. This is distinct from MC_CMD_ERR_DATAPATH_DISABLED in that the + * datapath absence may be temporary*/ +#define MC_CMD_ERR_NO_DATAPATH 0x1019 #define MC_CMD_ERR_CODE_OFST 0 @@ -360,6 +373,7 @@ /* enum: Fatal. */ #define MCDI_EVENT_LEVEL_FATAL 0x3 #define MCDI_EVENT_DATA_OFST 0 +#define MCDI_EVENT_DATA_LEN 4 #define MCDI_EVENT_CMDDONE_SEQ_LBN 0 #define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8 #define MCDI_EVENT_CMDDONE_DATALEN_LBN 8 @@ -370,6 +384,8 @@ #define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16 #define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16 #define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4 +/* enum: Link is down or link speed could not be determined */ +#define MCDI_EVENT_LINKCHANGE_SPEED_UNKNOWN 0x0 /* enum: 100Mbs */ #define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1 /* enum: 1Gbs */ @@ -378,6 +394,12 @@ #define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3 /* enum: 40Gbs */ #define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4 +/* enum: 25Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_25G 0x5 +/* enum: 50Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_50G 0x6 +/* enum: 100Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_100G 0x7 #define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20 #define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4 #define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24 @@ -456,8 +478,63 @@ #define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa /* enum: PTP status update */ #define MCDI_EVENT_AOE_PTP_STATUS 0xb +/* enum: FPGA header incorrect */ +#define MCDI_EVENT_AOE_FPGA_LOAD_HEADER_ERR 0xc +/* enum: FPGA Powered Off due to error in powering up FPGA */ +#define MCDI_EVENT_AOE_FPGA_POWER_OFF 0xd +/* enum: AOE FPGA load failed due to MC to MUM communication failure */ +#define MCDI_EVENT_AOE_FPGA_LOAD_FAILED 0xe +/* enum: Notify that invalid flash type detected */ +#define MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf +/* enum: Notify that the attempt to run FPGA Controller firmware timedout */ +#define MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10 +/* enum: Failure to probe one or more FPGA boot flash chips */ +#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_INVALID 0x11 +/* enum: FPGA boot-flash contains an invalid image header */ +#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_HDR_INVALID 0x12 +/* enum: Failed to program clocks required by the FPGA */ +#define MCDI_EVENT_AOE_FPGA_CLOCKS_PROGRAM_FAILED 0x13 +/* enum: Notify that FPGA Controller is alive to serve MCDI requests */ +#define MCDI_EVENT_AOE_FC_RUNNING 0x14 #define MCDI_EVENT_AOE_ERR_DATA_LBN 8 #define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8 +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_LBN 8 +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_WIDTH 8 +/* enum: FC Assert happened, but the register information is not available */ +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_SEEN 0x0 +/* enum: The register information for FC Assert is ready for readinng by driver + */ +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_DATA_READY 0x1 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_LBN 8 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_WIDTH 8 +/* enum: Reading from NV failed */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_NV_READ_FAIL 0x0 +/* enum: Invalid Magic Number if FPGA header */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_MAGIC_FAIL 0x1 +/* enum: Invalid Silicon type detected in header */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_SILICON_TYPE 0x2 +/* enum: Unsupported VRatio */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_VRATIO 0x3 +/* enum: Unsupported DDR Type */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_TYPE 0x4 +/* enum: DDR Voltage out of supported range */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_VOLTAGE 0x5 +/* enum: Unsupported DDR speed */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SPEED 0x6 +/* enum: Unsupported DDR size */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SIZE 0x7 +/* enum: Unsupported DDR rank */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_RANK 0x8 +#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_LBN 8 +#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_WIDTH 8 +/* enum: Primary boot flash */ +#define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_PRIMARY 0x0 +/* enum: Secondary boot flash */ +#define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_SECONDARY 0x1 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_LBN 8 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_WIDTH 8 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_LBN 8 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_WIDTH 8 #define MCDI_EVENT_RX_ERR_RXQ_LBN 0 #define MCDI_EVENT_RX_ERR_RXQ_WIDTH 12 #define MCDI_EVENT_RX_ERR_TYPE_LBN 12 @@ -480,6 +557,22 @@ #define MCDI_EVENT_MUM_WATCHDOG 0x3 #define MCDI_EVENT_MUM_ERR_DATA_LBN 8 #define MCDI_EVENT_MUM_ERR_DATA_WIDTH 8 +#define MCDI_EVENT_DBRET_SEQ_LBN 0 +#define MCDI_EVENT_DBRET_SEQ_WIDTH 8 +#define MCDI_EVENT_SUC_ERR_TYPE_LBN 0 +#define MCDI_EVENT_SUC_ERR_TYPE_WIDTH 8 +/* enum: Corrupted or bad SUC application. */ +#define MCDI_EVENT_SUC_BAD_APP 0x1 +/* enum: SUC application reported an assert. */ +#define MCDI_EVENT_SUC_ASSERT 0x2 +/* enum: SUC application reported an exception. */ +#define MCDI_EVENT_SUC_EXCEPTION 0x3 +/* enum: SUC watchdog timer expired. */ +#define MCDI_EVENT_SUC_WATCHDOG 0x4 +#define MCDI_EVENT_SUC_ERR_ADDRESS_LBN 8 +#define MCDI_EVENT_SUC_ERR_ADDRESS_WIDTH 24 +#define MCDI_EVENT_SUC_ERR_DATA_LBN 8 +#define MCDI_EVENT_SUC_ERR_DATA_WIDTH 24 #define MCDI_EVENT_DATA_LBN 0 #define MCDI_EVENT_DATA_WIDTH 32 #define MCDI_EVENT_SRC_LBN 36 @@ -552,73 +645,99 @@ * been processed and it may now resend the command */ #define MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d +/* enum: MCDI command accepted. New commands can be issued but this command is + * not done yet. + */ +#define MCDI_EVENT_CODE_DBRET 0x1e +/* enum: The MC has detected a fault on the SUC */ +#define MCDI_EVENT_CODE_SUC 0x1f /* enum: Artificial event generated by host and posted via MC for test * purposes. */ #define MCDI_EVENT_CODE_TESTGEN 0xfa #define MCDI_EVENT_CMDDONE_DATA_OFST 0 +#define MCDI_EVENT_CMDDONE_DATA_LEN 4 #define MCDI_EVENT_CMDDONE_DATA_LBN 0 #define MCDI_EVENT_CMDDONE_DATA_WIDTH 32 #define MCDI_EVENT_LINKCHANGE_DATA_OFST 0 +#define MCDI_EVENT_LINKCHANGE_DATA_LEN 4 #define MCDI_EVENT_LINKCHANGE_DATA_LBN 0 #define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32 #define MCDI_EVENT_SENSOREVT_DATA_OFST 0 +#define MCDI_EVENT_SENSOREVT_DATA_LEN 4 #define MCDI_EVENT_SENSOREVT_DATA_LBN 0 #define MCDI_EVENT_SENSOREVT_DATA_WIDTH 32 #define MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0 +#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LEN 4 #define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0 #define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32 #define MCDI_EVENT_TX_ERR_DATA_OFST 0 +#define MCDI_EVENT_TX_ERR_DATA_LEN 4 #define MCDI_EVENT_TX_ERR_DATA_LBN 0 #define MCDI_EVENT_TX_ERR_DATA_WIDTH 32 /* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of * timestamp */ #define MCDI_EVENT_PTP_SECONDS_OFST 0 +#define MCDI_EVENT_PTP_SECONDS_LEN 4 #define MCDI_EVENT_PTP_SECONDS_LBN 0 #define MCDI_EVENT_PTP_SECONDS_WIDTH 32 /* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of * timestamp */ #define MCDI_EVENT_PTP_MAJOR_OFST 0 +#define MCDI_EVENT_PTP_MAJOR_LEN 4 #define MCDI_EVENT_PTP_MAJOR_LBN 0 #define MCDI_EVENT_PTP_MAJOR_WIDTH 32 /* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field * of timestamp */ #define MCDI_EVENT_PTP_NANOSECONDS_OFST 0 +#define MCDI_EVENT_PTP_NANOSECONDS_LEN 4 #define MCDI_EVENT_PTP_NANOSECONDS_LBN 0 #define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32 /* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of * timestamp */ #define MCDI_EVENT_PTP_MINOR_OFST 0 +#define MCDI_EVENT_PTP_MINOR_LEN 4 #define MCDI_EVENT_PTP_MINOR_LBN 0 #define MCDI_EVENT_PTP_MINOR_WIDTH 32 /* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet */ #define MCDI_EVENT_PTP_UUID_OFST 0 +#define MCDI_EVENT_PTP_UUID_LEN 4 #define MCDI_EVENT_PTP_UUID_LBN 0 #define MCDI_EVENT_PTP_UUID_WIDTH 32 #define MCDI_EVENT_RX_ERR_DATA_OFST 0 +#define MCDI_EVENT_RX_ERR_DATA_LEN 4 #define MCDI_EVENT_RX_ERR_DATA_LBN 0 #define MCDI_EVENT_RX_ERR_DATA_WIDTH 32 #define MCDI_EVENT_PAR_ERR_DATA_OFST 0 +#define MCDI_EVENT_PAR_ERR_DATA_LEN 4 #define MCDI_EVENT_PAR_ERR_DATA_LBN 0 #define MCDI_EVENT_PAR_ERR_DATA_WIDTH 32 #define MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0 +#define MCDI_EVENT_ECC_CORR_ERR_DATA_LEN 4 #define MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0 #define MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32 #define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0 +#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LEN 4 #define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0 #define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32 /* For CODE_PTP_TIME events, the major value of the PTP clock */ #define MCDI_EVENT_PTP_TIME_MAJOR_OFST 0 +#define MCDI_EVENT_PTP_TIME_MAJOR_LEN 4 #define MCDI_EVENT_PTP_TIME_MAJOR_LBN 0 #define MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32 /* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */ #define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36 #define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8 +/* For CODE_PTP_TIME events, most significant bits of the minor value of the + * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_19. + */ +#define MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_LBN 36 +#define MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_WIDTH 8 /* For CODE_PTP_TIME events where report sync status is enabled, indicates * whether the NIC clock has ever been set */ @@ -634,10 +753,17 @@ */ #define MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38 #define MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6 +/* For CODE_PTP_TIME events, most significant bits of the minor value of the + * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_21. + */ +#define MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_LBN 38 +#define MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_WIDTH 6 #define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LEN 4 #define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0 #define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32 #define MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LEN 4 #define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0 #define MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32 /* Zero means that the request has been completed or authorized, and the driver @@ -646,6 +772,10 @@ */ #define MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36 #define MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8 +#define MCDI_EVENT_DBRET_DATA_OFST 0 +#define MCDI_EVENT_DBRET_DATA_LEN 4 +#define MCDI_EVENT_DBRET_DATA_LBN 0 +#define MCDI_EVENT_DBRET_DATA_WIDTH 32 /* FCDI_EVENT structuredef */ #define FCDI_EVENT_LEN 8 @@ -662,6 +792,7 @@ /* enum: Fatal. */ #define FCDI_EVENT_LEVEL_FATAL 0x3 #define FCDI_EVENT_DATA_OFST 0 +#define FCDI_EVENT_DATA_LEN 4 #define FCDI_EVENT_LINK_STATE_STATUS_LBN 0 #define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1 #define FCDI_EVENT_LINK_DOWN 0x0 /* enum */ @@ -701,6 +832,7 @@ #define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */ #define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */ #define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0 +#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LEN 4 #define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0 #define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32 #define FCDI_EVENT_ASSERT_TYPE_LBN 36 @@ -708,12 +840,15 @@ #define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36 #define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8 #define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0 +#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LEN 4 #define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0 #define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32 #define FCDI_EVENT_LINK_STATE_DATA_OFST 0 +#define FCDI_EVENT_LINK_STATE_DATA_LEN 4 #define FCDI_EVENT_LINK_STATE_DATA_LBN 0 #define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32 #define FCDI_EVENT_PTP_STATE_OFST 0 +#define FCDI_EVENT_PTP_STATE_LEN 4 #define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */ #define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */ #define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */ @@ -722,6 +857,7 @@ #define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36 #define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8 #define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0 +#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LEN 4 #define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0 #define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32 /* Index of MC port being referred to */ @@ -729,9 +865,11 @@ #define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8 /* FC Port index that matches the MC port index in SRC */ #define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0 +#define FCDI_EVENT_PORT_CONFIG_DATA_LEN 4 #define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0 #define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32 #define FCDI_EVENT_BOOT_RESULT_OFST 0 +#define FCDI_EVENT_BOOT_RESULT_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */ #define FCDI_EVENT_BOOT_RESULT_LBN 0 @@ -748,14 +886,17 @@ #define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num)) /* Number of timestamps following */ #define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0 +#define FCDI_EXTENDED_EVENT_PPS_COUNT_LEN 4 #define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0 #define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32 /* Seconds field of a timestamp record */ #define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8 +#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LEN 4 #define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64 #define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32 /* Nanoseconds field of a timestamp record */ #define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12 +#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LEN 4 #define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96 #define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32 /* Timestamp records comprising the event */ @@ -783,6 +924,7 @@ /* enum: Fatal. */ #define MUM_EVENT_LEVEL_FATAL 0x3 #define MUM_EVENT_DATA_OFST 0 +#define MUM_EVENT_DATA_LEN 4 #define MUM_EVENT_SENSOR_ID_LBN 0 #define MUM_EVENT_SENSOR_ID_WIDTH 8 /* Enum values, see field(s): */ @@ -820,18 +962,23 @@ /* enum: Link fault has been asserted, or has cleared. */ #define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4 #define MUM_EVENT_SENSOR_DATA_OFST 0 +#define MUM_EVENT_SENSOR_DATA_LEN 4 #define MUM_EVENT_SENSOR_DATA_LBN 0 #define MUM_EVENT_SENSOR_DATA_WIDTH 32 #define MUM_EVENT_PORT_PHY_FLAGS_OFST 0 +#define MUM_EVENT_PORT_PHY_FLAGS_LEN 4 #define MUM_EVENT_PORT_PHY_FLAGS_LBN 0 #define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32 #define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_LEN 4 #define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0 #define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32 #define MUM_EVENT_PORT_PHY_CAPS_OFST 0 +#define MUM_EVENT_PORT_PHY_CAPS_LEN 4 #define MUM_EVENT_PORT_PHY_CAPS_LBN 0 #define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32 #define MUM_EVENT_PORT_PHY_TECH_OFST 0 +#define MUM_EVENT_PORT_PHY_TECH_LEN 4 #define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */ #define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */ #define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */ @@ -864,7 +1011,9 @@ /* MC_CMD_READ32_IN msgrequest */ #define MC_CMD_READ32_IN_LEN 8 #define MC_CMD_READ32_IN_ADDR_OFST 0 +#define MC_CMD_READ32_IN_ADDR_LEN 4 #define MC_CMD_READ32_IN_NUMWORDS_OFST 4 +#define MC_CMD_READ32_IN_NUMWORDS_LEN 4 /* MC_CMD_READ32_OUT msgresponse */ #define MC_CMD_READ32_OUT_LENMIN 4 @@ -882,13 +1031,14 @@ */ #define MC_CMD_WRITE32 0x2 -#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_WRITE32_IN msgrequest */ #define MC_CMD_WRITE32_IN_LENMIN 8 #define MC_CMD_WRITE32_IN_LENMAX 252 #define MC_CMD_WRITE32_IN_LEN(num) (4+4*(num)) #define MC_CMD_WRITE32_IN_ADDR_OFST 0 +#define MC_CMD_WRITE32_IN_ADDR_LEN 4 #define MC_CMD_WRITE32_IN_BUFFER_OFST 4 #define MC_CMD_WRITE32_IN_BUFFER_LEN 4 #define MC_CMD_WRITE32_IN_BUFFER_MINNUM 1 @@ -915,6 +1065,7 @@ * is a bitfield, with each bit as documented below. */ #define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0 +#define MC_CMD_COPYCODE_IN_SRC_ADDR_LEN 4 /* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */ #define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000 /* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and @@ -940,9 +1091,12 @@ #define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1 /* Destination address */ #define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4 +#define MC_CMD_COPYCODE_IN_DEST_ADDR_LEN 4 #define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8 +#define MC_CMD_COPYCODE_IN_NUMWORDS_LEN 4 /* Address of where to jump after copy. */ #define MC_CMD_COPYCODE_IN_JUMP_OFST 12 +#define MC_CMD_COPYCODE_IN_JUMP_LEN 4 /* enum: Control should return to the caller rather than jumping */ #define MC_CMD_COPYCODE_JUMP_NONE 0x1 @@ -956,12 +1110,13 @@ */ #define MC_CMD_SET_FUNC 0x4 -#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_SET_FUNC_IN msgrequest */ #define MC_CMD_SET_FUNC_IN_LEN 4 /* Set function */ #define MC_CMD_SET_FUNC_IN_FUNC_OFST 0 +#define MC_CMD_SET_FUNC_IN_FUNC_LEN 4 /* MC_CMD_SET_FUNC_OUT msgresponse */ #define MC_CMD_SET_FUNC_OUT_LEN 0 @@ -973,7 +1128,7 @@ */ #define MC_CMD_GET_BOOT_STATUS 0x5 -#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_GET_BOOT_STATUS_IN msgrequest */ #define MC_CMD_GET_BOOT_STATUS_IN_LEN 0 @@ -982,9 +1137,11 @@ #define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8 /* ?? */ #define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0 +#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_LEN 4 /* enum: indicates that the MC wasn't flash booted */ #define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef #define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_LEN 4 #define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0 #define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1 #define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1 @@ -1007,11 +1164,13 @@ #define MC_CMD_GET_ASSERTS_IN_LEN 4 /* Set to clear assertion */ #define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0 +#define MC_CMD_GET_ASSERTS_IN_CLEAR_LEN 4 /* MC_CMD_GET_ASSERTS_OUT msgresponse */ #define MC_CMD_GET_ASSERTS_OUT_LEN 140 /* Assertion status flag. */ #define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0 +#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_LEN 4 /* enum: No assertions have failed. */ #define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 /* enum: A system-level assertion has failed. */ @@ -1024,6 +1183,7 @@ #define MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5 /* Failing PC value */ #define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4 +#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_LEN 4 /* Saved GP regs */ #define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8 #define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4 @@ -1034,7 +1194,9 @@ #define MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057 /* Failing thread address */ #define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132 +#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_LEN 4 #define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136 +#define MC_CMD_GET_ASSERTS_OUT_RESERVED_LEN 4 /***********************************/ @@ -1050,12 +1212,14 @@ #define MC_CMD_LOG_CTRL_IN_LEN 8 /* Log destination */ #define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0 +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_LEN 4 /* enum: UART. */ #define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1 /* enum: Event queue. */ #define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2 /* Legacy argument. Must be zero. */ #define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4 +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_LEN 4 /* MC_CMD_LOG_CTRL_OUT msgresponse */ #define MC_CMD_LOG_CTRL_OUT_LEN 0 @@ -1076,23 +1240,29 @@ #define MC_CMD_GET_VERSION_EXT_IN_LEN 4 /* placeholder, set to 0 */ #define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0 +#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_LEN 4 /* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */ #define MC_CMD_GET_VERSION_V0_OUT_LEN 4 #define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 /* enum: Reserved version number to indicate "any" version. */ #define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff /* enum: Bootrom version value for Siena. */ #define MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000 /* enum: Bootrom version value for Huntington. */ #define MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001 +/* enum: Bootrom version value for Medford2. */ +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_MEDFORD2_BOOTROM 0xb0070002 /* MC_CMD_GET_VERSION_OUT msgresponse */ #define MC_CMD_GET_VERSION_OUT_LEN 32 /* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */ /* Enum values, see field(s): */ /* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */ #define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4 +#define MC_CMD_GET_VERSION_OUT_PCOL_LEN 4 /* 128bit mask of functions supported by the current firmware */ #define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8 #define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16 @@ -1104,9 +1274,11 @@ /* MC_CMD_GET_VERSION_EXT_OUT msgresponse */ #define MC_CMD_GET_VERSION_EXT_OUT_LEN 48 /* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */ /* Enum values, see field(s): */ /* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */ #define MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4 +#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_LEN 4 /* 128bit mask of functions supported by the current firmware */ #define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8 #define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16 @@ -1136,41 +1308,54 @@ #define MC_CMD_PTP_OP_ENABLE 0x1 /* enum: Disable PTP packet timestamping operation. */ #define MC_CMD_PTP_OP_DISABLE 0x2 -/* enum: Send a PTP packet. */ +/* enum: Send a PTP packet. This operation is used on Siena and Huntington. + * From Medford onwards it is not supported: on those platforms PTP transmit + * timestamping is done using the fast path. + */ #define MC_CMD_PTP_OP_TRANSMIT 0x3 /* enum: Read the current NIC time. */ #define MC_CMD_PTP_OP_READ_NIC_TIME 0x4 -/* enum: Get the current PTP status. */ +/* enum: Get the current PTP status. Note that the clock frequency returned (in + * Hz) is rounded to the nearest MHz (e.g. 666000000 for 666666666). + */ #define MC_CMD_PTP_OP_STATUS 0x5 /* enum: Adjust the PTP NIC's time. */ #define MC_CMD_PTP_OP_ADJUST 0x6 /* enum: Synchronize host and NIC time. */ #define MC_CMD_PTP_OP_SYNCHRONIZE 0x7 -/* enum: Basic manufacturing tests. */ +/* enum: Basic manufacturing tests. Siena PTP adapters only. */ #define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8 -/* enum: Packet based manufacturing tests. */ +/* enum: Packet based manufacturing tests. Siena PTP adapters only. */ #define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9 /* enum: Reset some of the PTP related statistics */ #define MC_CMD_PTP_OP_RESET_STATS 0xa /* enum: Debug operations to MC. */ #define MC_CMD_PTP_OP_DEBUG 0xb -/* enum: Read an FPGA register */ +/* enum: Read an FPGA register. Siena PTP adapters only. */ #define MC_CMD_PTP_OP_FPGAREAD 0xc -/* enum: Write an FPGA register */ +/* enum: Write an FPGA register. Siena PTP adapters only. */ #define MC_CMD_PTP_OP_FPGAWRITE 0xd /* enum: Apply an offset to the NIC clock */ #define MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe -/* enum: Change Apply an offset to the NIC clock */ +/* enum: Change the frequency correction applied to the NIC clock */ #define MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf -/* enum: Set the MC packet filter VLAN tags for received PTP packets */ +/* enum: Set the MC packet filter VLAN tags for received PTP packets. + * Deprecated for Huntington onwards. + */ #define MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10 -/* enum: Set the MC packet filter UUID for received PTP packets */ +/* enum: Set the MC packet filter UUID for received PTP packets. Deprecated for + * Huntington onwards. + */ #define MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11 -/* enum: Set the MC packet filter Domain for received PTP packets */ +/* enum: Set the MC packet filter Domain for received PTP packets. Deprecated + * for Huntington onwards. + */ #define MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12 -/* enum: Set the clock source */ +/* enum: Set the clock source. Required for snapper tests on Huntington and + * Medford. Not implemented for Siena or Medford2. + */ #define MC_CMD_PTP_OP_SET_CLK_SRC 0x13 -/* enum: Reset value of Timer Reg. */ +/* enum: Reset value of Timer Reg. Not implemented. */ #define MC_CMD_PTP_OP_RST_CLK 0x14 /* enum: Enable the forwarding of PPS events to the host */ #define MC_CMD_PTP_OP_PPS_ENABLE 0x15 @@ -1191,7 +1376,7 @@ /* enum: Unsubscribe to stop receiving time events */ #define MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19 /* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS - * input on the same NIC. + * input on the same NIC. Siena PTP adapters only. */ #define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a /* enum: Set the PTP sync status. Status is used by firmware to report to event @@ -1204,11 +1389,15 @@ /* MC_CMD_PTP_IN_ENABLE msgrequest */ #define MC_CMD_PTP_IN_ENABLE_LEN 16 #define MC_CMD_PTP_IN_CMD_OFST 0 +#define MC_CMD_PTP_IN_CMD_LEN 4 #define MC_CMD_PTP_IN_PERIPH_ID_OFST 4 -/* Event queue for PTP events */ +#define MC_CMD_PTP_IN_PERIPH_ID_LEN 4 +/* Not used. Events are always sent to function relative queue 0. */ #define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8 -/* PTP timestamping mode */ +#define MC_CMD_PTP_IN_ENABLE_QUEUE_LEN 4 +/* PTP timestamping mode. Not used from Huntington onwards. */ #define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12 +#define MC_CMD_PTP_IN_ENABLE_MODE_LEN 4 /* enum: PTP, version 1 */ #define MC_CMD_PTP_MODE_V1 0x0 /* enum: PTP, version 1, with VLAN headers - deprecated */ @@ -1225,16 +1414,21 @@ /* MC_CMD_PTP_IN_DISABLE msgrequest */ #define MC_CMD_PTP_IN_DISABLE_LEN 8 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* MC_CMD_PTP_IN_TRANSMIT msgrequest */ #define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13 #define MC_CMD_PTP_IN_TRANSMIT_LENMAX 252 #define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num)) /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Transmit packet length */ #define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8 +#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_LEN 4 /* Transmit packet data */ #define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12 #define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1 @@ -1244,17 +1438,30 @@ /* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */ #define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_READ_NIC_TIME_V2 msgrequest */ +#define MC_CMD_PTP_IN_READ_NIC_TIME_V2_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* MC_CMD_PTP_IN_STATUS msgrequest */ #define MC_CMD_PTP_IN_STATUS_LEN 8 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* MC_CMD_PTP_IN_ADJUST msgrequest */ #define MC_CMD_PTP_IN_ADJUST_LEN 24 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Frequency adjustment 40 bit fixed point ns */ #define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8 #define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8 @@ -1262,21 +1469,67 @@ #define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12 /* enum: Number of fractional bits in frequency adjustment */ #define MC_CMD_PTP_IN_ADJUST_BITS 0x28 +/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ + * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES + * field. + */ +#define MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c /* Time adjustment in seconds */ #define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16 +#define MC_CMD_PTP_IN_ADJUST_SECONDS_LEN 4 /* Time adjustment major value */ #define MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16 +#define MC_CMD_PTP_IN_ADJUST_MAJOR_LEN 4 /* Time adjustment in nanoseconds */ #define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20 +#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_LEN 4 /* Time adjustment minor value */ #define MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20 +#define MC_CMD_PTP_IN_ADJUST_MINOR_LEN 4 + +/* MC_CMD_PTP_IN_ADJUST_V2 msgrequest */ +#define MC_CMD_PTP_IN_ADJUST_V2_LEN 28 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Frequency adjustment 40 bit fixed point ns */ +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_OFST 8 +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LEN 8 +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_OFST 8 +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_OFST 12 +/* enum: Number of fractional bits in frequency adjustment */ +/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */ +/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ + * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES + * field. + */ +/* MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c */ +/* Time adjustment in seconds */ +#define MC_CMD_PTP_IN_ADJUST_V2_SECONDS_OFST 16 +#define MC_CMD_PTP_IN_ADJUST_V2_SECONDS_LEN 4 +/* Time adjustment major value */ +#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_OFST 16 +#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_LEN 4 +/* Time adjustment in nanoseconds */ +#define MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_OFST 20 +#define MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_LEN 4 +/* Time adjustment minor value */ +#define MC_CMD_PTP_IN_ADJUST_V2_MINOR_OFST 20 +#define MC_CMD_PTP_IN_ADJUST_V2_MINOR_LEN 4 +/* Upper 32bits of major time offset adjustment */ +#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_OFST 24 +#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_LEN 4 /* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */ #define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Number of time readings to capture */ #define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8 +#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_LEN 4 /* Host address in which to write "synchronization started" indication (64 * bits) */ @@ -1288,42 +1541,59 @@ /* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */ #define MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* MC_CMD_PTP_IN_MANFTEST_PACKET msgrequest */ #define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Enable or disable packet testing */ #define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8 +#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_LEN 4 /* MC_CMD_PTP_IN_RESET_STATS msgrequest */ #define MC_CMD_PTP_IN_RESET_STATS_LEN 8 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* Reset PTP statistics */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* MC_CMD_PTP_IN_DEBUG msgrequest */ #define MC_CMD_PTP_IN_DEBUG_LEN 12 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Debug operations */ #define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8 +#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_LEN 4 /* MC_CMD_PTP_IN_FPGAREAD msgrequest */ #define MC_CMD_PTP_IN_FPGAREAD_LEN 16 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ #define MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8 +#define MC_CMD_PTP_IN_FPGAREAD_ADDR_LEN 4 #define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12 +#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_LEN 4 /* MC_CMD_PTP_IN_FPGAWRITE msgrequest */ #define MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13 #define MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252 #define MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num)) /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ #define MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8 +#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_LEN 4 #define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12 #define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1 #define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1 @@ -1332,34 +1602,67 @@ /* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */ #define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Time adjustment in seconds */ #define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_LEN 4 /* Time adjustment major value */ #define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_LEN 4 /* Time adjustment in nanoseconds */ #define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_LEN 4 /* Time adjustment minor value */ #define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_LEN 4 + +/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2 msgrequest */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_LEN 20 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Time adjustment in seconds */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_LEN 4 +/* Time adjustment major value */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_LEN 4 +/* Time adjustment in nanoseconds */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_OFST 12 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_LEN 4 +/* Time adjustment minor value */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_OFST 12 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_LEN 4 +/* Upper 32bits of major time offset adjustment */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_OFST 16 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_LEN 4 /* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */ #define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Frequency adjustment 40 bit fixed point ns */ #define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8 #define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8 #define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8 #define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12 -/* enum: Number of fractional bits in frequency adjustment */ -/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */ +/* Enum values, see field(s): */ +/* MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST/FREQ */ /* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */ #define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Number of VLAN tags, 0 if not VLAN */ #define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8 +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_LEN 4 /* Set of VLAN tags to filter against */ #define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12 #define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4 @@ -1368,9 +1671,12 @@ /* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */ #define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* 1 to enable UUID filtering, 0 to disable */ #define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_LEN 4 /* UUID to filter against */ #define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12 #define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8 @@ -1380,18 +1686,25 @@ /* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */ #define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* 1 to enable Domain filtering, 0 to disable */ #define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8 +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_LEN 4 /* Domain number to filter against */ #define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12 +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_LEN 4 /* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */ #define MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Set the clock source. */ #define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8 +#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_LEN 4 /* enum: Internal. */ #define MC_CMD_PTP_CLK_SRC_INTERNAL 0x0 /* enum: External. */ @@ -1400,42 +1713,56 @@ /* MC_CMD_PTP_IN_RST_CLK msgrequest */ #define MC_CMD_PTP_IN_RST_CLK_LEN 8 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* Reset value of Timer Reg. */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */ #define MC_CMD_PTP_IN_PPS_ENABLE_LEN 12 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* Enable or disable */ #define MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4 +#define MC_CMD_PTP_IN_PPS_ENABLE_OP_LEN 4 /* enum: Enable */ #define MC_CMD_PTP_ENABLE_PPS 0x0 /* enum: Disable */ #define MC_CMD_PTP_DISABLE_PPS 0x1 -/* Queue id to send events back */ +/* Not used. Events are always sent to function relative queue 0. */ #define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8 +#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_LEN 4 /* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */ #define MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */ #define MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */ #define MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */ #define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Original field containing queue ID. Now extended to include flags. */ #define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_LEN 4 #define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0 #define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16 #define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31 @@ -1444,29 +1771,39 @@ /* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */ #define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Unsubscribe options */ #define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8 +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_LEN 4 /* enum: Unsubscribe a single queue */ #define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0 /* enum: Unsubscribe all queues */ #define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1 /* Event queue ID */ #define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12 +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_LEN 4 /* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */ #define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* 1 to enable PPS test mode, 0 to disable and return result. */ #define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8 +#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_LEN 4 /* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */ #define MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* NIC - Host System Clock Synchronization status */ #define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_LEN 4 /* enum: Host System clock and NIC clock are not in sync */ #define MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0 /* enum: Host System clock and NIC clock are synchronized */ @@ -1475,8 +1812,11 @@ * no longer in sync. */ #define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_LEN 4 #define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_LEN 4 #define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_LEN 4 /* MC_CMD_PTP_OUT msgresponse */ #define MC_CMD_PTP_OUT_LEN 0 @@ -1485,12 +1825,16 @@ #define MC_CMD_PTP_OUT_TRANSMIT_LEN 8 /* Value of seconds timestamp */ #define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0 +#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_LEN 4 /* Timestamp major value */ #define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0 +#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_LEN 4 /* Value of nanoseconds timestamp */ #define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4 +#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_LEN 4 /* Timestamp minor value */ #define MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4 +#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_LEN 4 /* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */ #define MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0 @@ -1502,47 +1846,85 @@ #define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8 /* Value of seconds timestamp */ #define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_LEN 4 /* Timestamp major value */ #define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_LEN 4 /* Value of nanoseconds timestamp */ #define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_LEN 4 /* Timestamp minor value */ #define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_LEN 4 + +/* MC_CMD_PTP_OUT_READ_NIC_TIME_V2 msgresponse */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_LEN 12 +/* Value of seconds timestamp */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_OFST 0 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_LEN 4 +/* Timestamp major value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_OFST 0 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_LEN 4 +/* Value of nanoseconds timestamp */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_OFST 4 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_LEN 4 +/* Timestamp minor value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_OFST 4 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_LEN 4 +/* Upper 32bits of major timestamp value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_OFST 8 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_LEN 4 /* MC_CMD_PTP_OUT_STATUS msgresponse */ #define MC_CMD_PTP_OUT_STATUS_LEN 64 /* Frequency of NIC's hardware clock */ #define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0 +#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_LEN 4 /* Number of packets transmitted and timestamped */ #define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4 +#define MC_CMD_PTP_OUT_STATUS_STATS_TX_LEN 4 /* Number of packets received and timestamped */ #define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8 +#define MC_CMD_PTP_OUT_STATUS_STATS_RX_LEN 4 /* Number of packets timestamped by the FPGA */ #define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12 +#define MC_CMD_PTP_OUT_STATUS_STATS_TS_LEN 4 /* Number of packets filter matched */ #define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16 +#define MC_CMD_PTP_OUT_STATUS_STATS_FM_LEN 4 /* Number of packets not filter matched */ #define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20 +#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_LEN 4 /* Number of PPS overflows (noise on input?) */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_LEN 4 /* Number of PPS bad periods */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_LEN 4 /* Minimum period of PPS pulse in nanoseconds */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_LEN 4 /* Maximum period of PPS pulse in nanoseconds */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_LEN 4 /* Last period of PPS pulse in nanoseconds */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_LEN 4 /* Mean period of PPS pulse in nanoseconds */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_LEN 4 /* Minimum offset of PPS pulse in nanoseconds (signed) */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_LEN 4 /* Maximum offset of PPS pulse in nanoseconds (signed) */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_LEN 4 /* Last offset of PPS pulse in nanoseconds (signed) */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_LEN 4 /* Mean offset of PPS pulse in nanoseconds (signed) */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_LEN 4 /* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20 @@ -1555,23 +1937,31 @@ #define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12 /* Host time immediately before NIC's hardware clock read */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_LEN 4 /* Value of seconds timestamp */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_LEN 4 /* Timestamp major value */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_LEN 4 /* Value of nanoseconds timestamp */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_LEN 4 /* Timestamp minor value */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_LEN 4 /* Host time immediately after NIC's hardware clock read */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_LEN 4 /* Number of nanoseconds waited after reading NIC's hardware clock */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_LEN 4 /* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */ #define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8 /* Results of testing */ #define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0 +#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_LEN 4 /* enum: Successful test */ #define MC_CMD_PTP_MANF_SUCCESS 0x0 /* enum: FPGA load failed */ @@ -1604,15 +1994,19 @@ #define MC_CMD_PTP_MANF_CLOCK_READ 0xe /* Presence of external oscillator */ #define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4 +#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_LEN 4 /* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */ #define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12 /* Results of testing */ #define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0 +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_LEN 4 /* Number of packets received by FPGA */ #define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4 +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_LEN 4 /* Number of packets received by Siena filters */ #define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8 +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_LEN 4 /* MC_CMD_PTP_OUT_FPGAREAD msgresponse */ #define MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1 @@ -1628,9 +2022,11 @@ /* Time format required/used by for this NIC. Applies to all PTP MCDI * operations that pass times between the host and firmware. If this operation * is not supported (older firmware) a format of seconds and nanoseconds should - * be assumed. + * be assumed. Note this enum is deprecated. Do not add to it- use the + * TIME_FORMAT field in MC_CMD_PTP_OUT_GET_ATTRIBUTES instead. */ #define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0 +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_LEN 4 /* enum: Times are in seconds and nanoseconds */ #define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0 /* enum: Major register has units of 16 second per tick, minor 8 ns per tick */ @@ -1646,12 +2042,16 @@ * be assumed. */ #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_LEN 4 /* enum: Times are in seconds and nanoseconds */ #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0 /* enum: Major register has units of 16 second per tick, minor 8 ns per tick */ #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1 /* enum: Major register has units of seconds, minor 2^-27s per tick */ #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2 +/* enum: Major register units are seconds, minor units are quarter nanoseconds + */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS 0x3 /* Minimum acceptable value for a corrected synchronization timeset. When * comparing host and NIC clock times, the MC returns a set of samples that * contain the host start and end time, the MC time when the host start was @@ -1660,46 +2060,66 @@ * end and start times minus the time that the MC waited for host end. */ #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_LEN 4 /* Various PTP capabilities */ #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_LEN 4 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_LBN 1 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_LBN 2 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN 3 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_WIDTH 1 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_LEN 4 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_LEN 4 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_LEN 4 /* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16 /* Uncorrected error on PTP transmit timestamps in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_LEN 4 /* Uncorrected error on PTP receive timestamps in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_LEN 4 /* Uncorrected error on PPS output in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_LEN 4 /* Uncorrected error on PPS input in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_LEN 4 /* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2 msgresponse */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN 24 /* Uncorrected error on PTP transmit timestamps in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_OFST 0 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_LEN 4 /* Uncorrected error on PTP receive timestamps in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_OFST 4 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_LEN 4 /* Uncorrected error on PPS output in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_OFST 8 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_LEN 4 /* Uncorrected error on PPS input in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_OFST 12 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_LEN 4 /* Uncorrected error on non-PTP transmit timestamps in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_OFST 16 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_LEN 4 /* Uncorrected error on non-PTP receive timestamps in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_OFST 20 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_LEN 4 /* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */ #define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4 /* Results of testing */ #define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0 +#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */ @@ -1713,14 +2133,17 @@ */ #define MC_CMD_CSR_READ32 0xc -#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_CSR_READ32_IN msgrequest */ #define MC_CMD_CSR_READ32_IN_LEN 12 /* Address */ #define MC_CMD_CSR_READ32_IN_ADDR_OFST 0 +#define MC_CMD_CSR_READ32_IN_ADDR_LEN 4 #define MC_CMD_CSR_READ32_IN_STEP_OFST 4 +#define MC_CMD_CSR_READ32_IN_STEP_LEN 4 #define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8 +#define MC_CMD_CSR_READ32_IN_NUMWORDS_LEN 4 /* MC_CMD_CSR_READ32_OUT msgresponse */ #define MC_CMD_CSR_READ32_OUT_LENMIN 4 @@ -1739,7 +2162,7 @@ */ #define MC_CMD_CSR_WRITE32 0xd -#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_CSR_WRITE32_IN msgrequest */ #define MC_CMD_CSR_WRITE32_IN_LENMIN 12 @@ -1747,7 +2170,9 @@ #define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num)) /* Address */ #define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0 +#define MC_CMD_CSR_WRITE32_IN_ADDR_LEN 4 #define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4 +#define MC_CMD_CSR_WRITE32_IN_STEP_LEN 4 #define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8 #define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4 #define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1 @@ -1756,6 +2181,7 @@ /* MC_CMD_CSR_WRITE32_OUT msgresponse */ #define MC_CMD_CSR_WRITE32_OUT_LEN 4 #define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0 +#define MC_CMD_CSR_WRITE32_OUT_STATUS_LEN 4 /***********************************/ @@ -1776,6 +2202,7 @@ * sensors. */ #define MC_CMD_HP_IN_SUBCMD_OFST 0 +#define MC_CMD_HP_IN_SUBCMD_LEN 4 /* enum: OCSD (Option Card Sensor Data) sub-command. */ #define MC_CMD_HP_IN_OCSD_SUBCMD 0x0 /* enum: Last known valid HP sub-command. */ @@ -1790,10 +2217,12 @@ * NULL.) */ #define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12 +#define MC_CMD_HP_IN_OCSD_INTERVAL_LEN 4 /* MC_CMD_HP_OUT msgresponse */ #define MC_CMD_HP_OUT_LEN 4 #define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0 +#define MC_CMD_HP_OUT_OCSD_STATUS_LEN 4 /* enum: OCSD stopped for this card. */ #define MC_CMD_HP_OUT_OCSD_STOPPED 0x1 /* enum: OCSD was successfully started with the address provided. */ @@ -1838,29 +2267,35 @@ * external devices. */ #define MC_CMD_MDIO_READ_IN_BUS_OFST 0 +#define MC_CMD_MDIO_READ_IN_BUS_LEN 4 /* enum: Internal. */ #define MC_CMD_MDIO_BUS_INTERNAL 0x0 /* enum: External. */ #define MC_CMD_MDIO_BUS_EXTERNAL 0x1 /* Port address */ #define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4 +#define MC_CMD_MDIO_READ_IN_PRTAD_LEN 4 /* Device Address or clause 22. */ #define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8 +#define MC_CMD_MDIO_READ_IN_DEVAD_LEN 4 /* enum: By default all the MCDI MDIO operations perform clause45 mode. If you * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22. */ #define MC_CMD_MDIO_CLAUSE22 0x20 /* Address */ #define MC_CMD_MDIO_READ_IN_ADDR_OFST 12 +#define MC_CMD_MDIO_READ_IN_ADDR_LEN 4 /* MC_CMD_MDIO_READ_OUT msgresponse */ #define MC_CMD_MDIO_READ_OUT_LEN 8 /* Value */ #define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0 +#define MC_CMD_MDIO_READ_OUT_VALUE_LEN 4 /* Status the MDIO commands return the raw status bits from the MDIO block. A * "good" transaction should have the DONE bit set and all other bits clear. */ #define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4 +#define MC_CMD_MDIO_READ_OUT_STATUS_LEN 4 /* enum: Good. */ #define MC_CMD_MDIO_STATUS_GOOD 0x8 @@ -1879,22 +2314,27 @@ * external devices. */ #define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0 +#define MC_CMD_MDIO_WRITE_IN_BUS_LEN 4 /* enum: Internal. */ /* MC_CMD_MDIO_BUS_INTERNAL 0x0 */ /* enum: External. */ /* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */ /* Port address */ #define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4 +#define MC_CMD_MDIO_WRITE_IN_PRTAD_LEN 4 /* Device Address or clause 22. */ #define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8 +#define MC_CMD_MDIO_WRITE_IN_DEVAD_LEN 4 /* enum: By default all the MCDI MDIO operations perform clause45 mode. If you * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22. */ /* MC_CMD_MDIO_CLAUSE22 0x20 */ /* Address */ #define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12 +#define MC_CMD_MDIO_WRITE_IN_ADDR_LEN 4 /* Value */ #define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16 +#define MC_CMD_MDIO_WRITE_IN_VALUE_LEN 4 /* MC_CMD_MDIO_WRITE_OUT msgresponse */ #define MC_CMD_MDIO_WRITE_OUT_LEN 4 @@ -1902,6 +2342,7 @@ * "good" transaction should have the DONE bit set and all other bits clear. */ #define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0 +#define MC_CMD_MDIO_WRITE_OUT_STATUS_LEN 4 /* enum: Good. */ /* MC_CMD_MDIO_STATUS_GOOD 0x8 */ @@ -1912,7 +2353,7 @@ */ #define MC_CMD_DBI_WRITE 0x12 -#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_DBI_WRITE_IN msgrequest */ #define MC_CMD_DBI_WRITE_IN_LENMIN 12 @@ -1932,9 +2373,11 @@ /* MC_CMD_DBIWROP_TYPEDEF structuredef */ #define MC_CMD_DBIWROP_TYPEDEF_LEN 12 #define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0 +#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LEN 4 #define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0 #define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32 #define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4 +#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LEN 4 #define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16 #define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16 #define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15 @@ -1944,6 +2387,7 @@ #define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32 #define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32 #define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8 +#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LEN 4 #define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64 #define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32 @@ -1959,13 +2403,16 @@ #define MC_CMD_PORT_READ32_IN_LEN 4 /* Address */ #define MC_CMD_PORT_READ32_IN_ADDR_OFST 0 +#define MC_CMD_PORT_READ32_IN_ADDR_LEN 4 /* MC_CMD_PORT_READ32_OUT msgresponse */ #define MC_CMD_PORT_READ32_OUT_LEN 8 /* Value */ #define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0 +#define MC_CMD_PORT_READ32_OUT_VALUE_LEN 4 /* Status */ #define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4 +#define MC_CMD_PORT_READ32_OUT_STATUS_LEN 4 /***********************************/ @@ -1979,13 +2426,16 @@ #define MC_CMD_PORT_WRITE32_IN_LEN 8 /* Address */ #define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0 +#define MC_CMD_PORT_WRITE32_IN_ADDR_LEN 4 /* Value */ #define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4 +#define MC_CMD_PORT_WRITE32_IN_VALUE_LEN 4 /* MC_CMD_PORT_WRITE32_OUT msgresponse */ #define MC_CMD_PORT_WRITE32_OUT_LEN 4 /* Status */ #define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0 +#define MC_CMD_PORT_WRITE32_OUT_STATUS_LEN 4 /***********************************/ @@ -1999,6 +2449,7 @@ #define MC_CMD_PORT_READ128_IN_LEN 4 /* Address */ #define MC_CMD_PORT_READ128_IN_ADDR_OFST 0 +#define MC_CMD_PORT_READ128_IN_ADDR_LEN 4 /* MC_CMD_PORT_READ128_OUT msgresponse */ #define MC_CMD_PORT_READ128_OUT_LEN 20 @@ -2007,6 +2458,7 @@ #define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16 /* Status */ #define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16 +#define MC_CMD_PORT_READ128_OUT_STATUS_LEN 4 /***********************************/ @@ -2020,6 +2472,7 @@ #define MC_CMD_PORT_WRITE128_IN_LEN 20 /* Address */ #define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0 +#define MC_CMD_PORT_WRITE128_IN_ADDR_LEN 4 /* Value */ #define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4 #define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16 @@ -2028,6 +2481,7 @@ #define MC_CMD_PORT_WRITE128_OUT_LEN 4 /* Status */ #define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0 +#define MC_CMD_PORT_WRITE128_OUT_STATUS_LEN 4 /* MC_CMD_CAPABILITIES structuredef */ #define MC_CMD_CAPABILITIES_LEN 4 @@ -2072,24 +2526,54 @@ #define MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136 #define MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num)) #define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0 +#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_LEN 4 #define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4 #define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32 -/* See MC_CMD_CAPABILITIES */ +/* Capabilities for Siena Port0 (see struct MC_CMD_CAPABILITIES). Unused on + * EF10 and later (use MC_CMD_GET_CAPABILITIES). + */ #define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36 -/* See MC_CMD_CAPABILITIES */ +#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_LEN 4 +/* Capabilities for Siena Port1 (see struct MC_CMD_CAPABILITIES). Unused on + * EF10 and later (use MC_CMD_GET_CAPABILITIES). + */ #define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40 +#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_LEN 4 +/* Base MAC address for Siena Port0. Unused on EF10 and later (use + * MC_CMD_GET_MAC_ADDRESSES). + */ #define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44 #define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6 +/* Base MAC address for Siena Port1. Unused on EF10 and later (use + * MC_CMD_GET_MAC_ADDRESSES). + */ #define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50 #define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6 +/* Size of MAC address pool for Siena Port0. Unused on EF10 and later (use + * MC_CMD_GET_MAC_ADDRESSES). + */ #define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_LEN 4 +/* Size of MAC address pool for Siena Port1. Unused on EF10 and later (use + * MC_CMD_GET_MAC_ADDRESSES). + */ #define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_LEN 4 +/* Increment between addresses in MAC address pool for Siena Port0. Unused on + * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES). + */ #define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_LEN 4 +/* Increment between addresses in MAC address pool for Siena Port1. Unused on + * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES). + */ #define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68 -/* This field contains a 16-bit value for each of the types of NVRAM area. The - * values are defined in the firmware/mc/platform/.c file for a specific board - * type, but otherwise have no meaning to the MC; they are used by the driver - * to manage selection of appropriate firmware updates. +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_LEN 4 +/* Siena only. This field contains a 16-bit value for each of the types of + * NVRAM area. The values are defined in the firmware/mc/platform/.c file for a + * specific board type, but otherwise have no meaning to the MC; they are used + * by the driver to manage selection of appropriate firmware updates. Unused on + * EF10 and later (use MC_CMD_NVRAM_METADATA). */ #define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72 #define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2 @@ -2103,7 +2587,7 @@ */ #define MC_CMD_DBI_READX 0x19 -#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_DBI_READX_IN msgrequest */ #define MC_CMD_DBI_READX_IN_LENMIN 8 @@ -2130,9 +2614,11 @@ /* MC_CMD_DBIRDOP_TYPEDEF structuredef */ #define MC_CMD_DBIRDOP_TYPEDEF_LEN 8 #define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0 +#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LEN 4 #define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0 #define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32 #define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4 +#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LEN 4 #define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16 #define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16 #define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15 @@ -2149,7 +2635,7 @@ */ #define MC_CMD_SET_RAND_SEED 0x1a -#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_SET_RAND_SEED_IN msgrequest */ #define MC_CMD_SET_RAND_SEED_IN_LEN 16 @@ -2198,14 +2684,17 @@ #define MC_CMD_DRV_ATTACH_IN_LEN 12 /* new state to set if UPDATE=1 */ #define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_LEN 4 #define MC_CMD_DRV_ATTACH_LBN 0 #define MC_CMD_DRV_ATTACH_WIDTH 1 #define MC_CMD_DRV_PREBOOT_LBN 1 #define MC_CMD_DRV_PREBOOT_WIDTH 1 /* 1 to set new state, or 0 to just report the existing state */ #define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4 +#define MC_CMD_DRV_ATTACH_IN_UPDATE_LEN 4 /* preferred datapath firmware (for Huntington; ignored for Siena) */ #define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8 +#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_LEN 4 /* enum: Prefer to use full featured firmware */ #define MC_CMD_FW_FULL_FEATURED 0x0 /* enum: Prefer to use firmware with fewer features but lower latency */ @@ -2229,13 +2718,16 @@ #define MC_CMD_DRV_ATTACH_OUT_LEN 4 /* previous or existing state, see the bitmask at NEW_STATE */ #define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0 +#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_LEN 4 /* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */ #define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8 /* previous or existing state, see the bitmask at NEW_STATE */ #define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0 +#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_LEN 4 /* Flags associated with this function */ #define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4 +#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_LEN 4 /* enum: Labels the lowest-numbered function visible to the OS */ #define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0 /* enum: The function can control the link state of the physical port it is @@ -2260,6 +2752,7 @@ #define MC_CMD_SHMUART_IN_LEN 4 /* ??? */ #define MC_CMD_SHMUART_IN_FLAG_OFST 0 +#define MC_CMD_SHMUART_IN_FLAG_LEN 4 /* MC_CMD_SHMUART_OUT msgresponse */ #define MC_CMD_SHMUART_OUT_LEN 0 @@ -2297,6 +2790,7 @@ * (TBD). */ #define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0 +#define MC_CMD_ENTITY_RESET_IN_FLAG_LEN 4 #define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0 #define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1 @@ -2314,8 +2808,10 @@ #define MC_CMD_PCIE_CREDITS_IN_LEN 8 /* poll period. 0 is disabled */ #define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0 +#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_LEN 4 /* wipe statistics */ #define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4 +#define MC_CMD_PCIE_CREDITS_IN_WIPE_LEN 4 /* MC_CMD_PCIE_CREDITS_OUT msgresponse */ #define MC_CMD_PCIE_CREDITS_OUT_LEN 16 @@ -2346,31 +2842,54 @@ /* MC_CMD_RXD_MONITOR_IN msgrequest */ #define MC_CMD_RXD_MONITOR_IN_LEN 12 #define MC_CMD_RXD_MONITOR_IN_QID_OFST 0 +#define MC_CMD_RXD_MONITOR_IN_QID_LEN 4 #define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4 +#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_LEN 4 #define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8 +#define MC_CMD_RXD_MONITOR_IN_WIPE_LEN 4 /* MC_CMD_RXD_MONITOR_OUT msgresponse */ #define MC_CMD_RXD_MONITOR_OUT_LEN 80 #define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0 +#define MC_CMD_RXD_MONITOR_OUT_QID_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48 +#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_LEN 4 /***********************************/ @@ -2379,13 +2898,14 @@ */ #define MC_CMD_PUTS 0x23 -#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_PUTS_IN msgrequest */ #define MC_CMD_PUTS_IN_LENMIN 13 #define MC_CMD_PUTS_IN_LENMAX 252 #define MC_CMD_PUTS_IN_LEN(num) (12+1*(num)) #define MC_CMD_PUTS_IN_DEST_OFST 0 +#define MC_CMD_PUTS_IN_DEST_LEN 4 #define MC_CMD_PUTS_IN_UART_LBN 0 #define MC_CMD_PUTS_IN_UART_WIDTH 1 #define MC_CMD_PUTS_IN_PORT_LBN 1 @@ -2417,6 +2937,7 @@ #define MC_CMD_GET_PHY_CFG_OUT_LEN 72 /* flags */ #define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0 +#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_LEN 4 #define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0 #define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1 #define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1 @@ -2433,8 +2954,10 @@ #define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1 /* ?? */ #define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4 +#define MC_CMD_GET_PHY_CFG_OUT_TYPE_LEN 4 /* Bitmask of supported capabilities */ #define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8 +#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_LEN 4 #define MC_CMD_PHY_CAP_10HDX_LBN 1 #define MC_CMD_PHY_CAP_10HDX_WIDTH 1 #define MC_CMD_PHY_CAP_10FDX_LBN 2 @@ -2459,17 +2982,39 @@ #define MC_CMD_PHY_CAP_40000FDX_WIDTH 1 #define MC_CMD_PHY_CAP_DDM_LBN 12 #define MC_CMD_PHY_CAP_DDM_WIDTH 1 +#define MC_CMD_PHY_CAP_100000FDX_LBN 13 +#define MC_CMD_PHY_CAP_100000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_25000FDX_LBN 14 +#define MC_CMD_PHY_CAP_25000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_50000FDX_LBN 15 +#define MC_CMD_PHY_CAP_50000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_BASER_FEC_LBN 16 +#define MC_CMD_PHY_CAP_BASER_FEC_WIDTH 1 +#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN 17 +#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_WIDTH 1 +#define MC_CMD_PHY_CAP_RS_FEC_LBN 18 +#define MC_CMD_PHY_CAP_RS_FEC_WIDTH 1 +#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN 19 +#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_WIDTH 1 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_LBN 20 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_WIDTH 1 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN 21 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_WIDTH 1 /* ?? */ #define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12 +#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_LEN 4 /* ?? */ #define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16 +#define MC_CMD_GET_PHY_CFG_OUT_PRT_LEN 4 /* ?? */ #define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20 +#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_LEN 4 /* ?? */ #define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24 #define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20 /* ?? */ #define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44 +#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_LEN 4 /* enum: Xaui. */ #define MC_CMD_MEDIA_XAUI 0x1 /* enum: CX4. */ @@ -2485,6 +3030,7 @@ /* enum: QSFP+. */ #define MC_CMD_MEDIA_QSFP_PLUS 0x7 #define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48 +#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_LEN 4 /* enum: Native clause 22 */ #define MC_CMD_MMD_CLAUSE22 0x0 #define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */ @@ -2515,6 +3061,7 @@ #define MC_CMD_START_BIST_IN_LEN 4 /* Type of test. */ #define MC_CMD_START_BIST_IN_TYPE_OFST 0 +#define MC_CMD_START_BIST_IN_TYPE_LEN 4 /* enum: Run the PHY's short cable BIST. */ #define MC_CMD_PHY_BIST_CABLE_SHORT 0x1 /* enum: Run the PHY's long cable BIST. */ @@ -2556,6 +3103,7 @@ #define MC_CMD_POLL_BIST_OUT_LEN 8 /* result */ #define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 +#define MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 /* enum: Running. */ #define MC_CMD_POLL_BIST_RUNNING 0x1 /* enum: Passed. */ @@ -2565,19 +3113,26 @@ /* enum: Timed-out. */ #define MC_CMD_POLL_BIST_TIMEOUT 0x4 #define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4 +#define MC_CMD_POLL_BIST_OUT_PRIVATE_LEN 4 /* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */ #define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36 /* result */ /* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ +/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */ /* Enum values, see field(s): */ /* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_LEN 4 #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_LEN 4 #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_LEN 4 #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_LEN 4 /* Status of each channel A */ #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_LEN 4 /* enum: Ok. */ #define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1 /* enum: Open. */ @@ -2590,14 +3145,17 @@ #define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9 /* Status of each channel B */ #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_LEN 4 /* Enum values, see field(s): */ /* CABLE_STATUS_A */ /* Status of each channel C */ #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_LEN 4 /* Enum values, see field(s): */ /* CABLE_STATUS_A */ /* Status of each channel D */ #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_LEN 4 /* Enum values, see field(s): */ /* CABLE_STATUS_A */ @@ -2605,9 +3163,11 @@ #define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8 /* result */ /* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ +/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */ /* Enum values, see field(s): */ /* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ #define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4 +#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_LEN 4 /* enum: Complete. */ #define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0 /* enum: Bus switch off I2C write. */ @@ -2631,9 +3191,11 @@ #define MC_CMD_POLL_BIST_OUT_MEM_LEN 36 /* result */ /* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ +/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */ /* Enum values, see field(s): */ /* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ #define MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4 +#define MC_CMD_POLL_BIST_OUT_MEM_TEST_LEN 4 /* enum: Test has completed. */ #define MC_CMD_POLL_BIST_MEM_COMPLETE 0x0 /* enum: RAM test - walk ones. */ @@ -2650,8 +3212,10 @@ #define MC_CMD_POLL_BIST_MEM_ECC 0x6 /* Failure address, only valid if result is POLL_BIST_FAILED */ #define MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8 +#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_LEN 4 /* Bus or address space to which the failure address corresponds */ #define MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12 +#define MC_CMD_POLL_BIST_OUT_MEM_BUS_LEN 4 /* enum: MC MIPS bus. */ #define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0 /* enum: CSR IREG bus. */ @@ -2672,14 +3236,19 @@ #define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX1 0x8 /* Pattern written to RAM / register */ #define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16 +#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_LEN 4 /* Actual value read from RAM / register */ #define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20 +#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_LEN 4 /* ECC error mask */ #define MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24 +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_LEN 4 /* ECC parity error mask */ #define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28 +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_LEN 4 /* ECC fatal error mask */ #define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32 +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_LEN 4 /***********************************/ @@ -2831,6 +3400,143 @@ /* Enum values, see field(s): */ /* 100M */ +/* MC_CMD_GET_LOOPBACK_MODES_OUT_V2 msgresponse: Supported loopback modes for + * newer NICs with 25G/50G/100G support + */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN 64 +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_OFST 4 +/* enum: None. */ +/* MC_CMD_LOOPBACK_NONE 0x0 */ +/* enum: Data. */ +/* MC_CMD_LOOPBACK_DATA 0x1 */ +/* enum: GMAC. */ +/* MC_CMD_LOOPBACK_GMAC 0x2 */ +/* enum: XGMII. */ +/* MC_CMD_LOOPBACK_XGMII 0x3 */ +/* enum: XGXS. */ +/* MC_CMD_LOOPBACK_XGXS 0x4 */ +/* enum: XAUI. */ +/* MC_CMD_LOOPBACK_XAUI 0x5 */ +/* enum: GMII. */ +/* MC_CMD_LOOPBACK_GMII 0x6 */ +/* enum: SGMII. */ +/* MC_CMD_LOOPBACK_SGMII 0x7 */ +/* enum: XGBR. */ +/* MC_CMD_LOOPBACK_XGBR 0x8 */ +/* enum: XFI. */ +/* MC_CMD_LOOPBACK_XFI 0x9 */ +/* enum: XAUI Far. */ +/* MC_CMD_LOOPBACK_XAUI_FAR 0xa */ +/* enum: GMII Far. */ +/* MC_CMD_LOOPBACK_GMII_FAR 0xb */ +/* enum: SGMII Far. */ +/* MC_CMD_LOOPBACK_SGMII_FAR 0xc */ +/* enum: XFI Far. */ +/* MC_CMD_LOOPBACK_XFI_FAR 0xd */ +/* enum: GPhy. */ +/* MC_CMD_LOOPBACK_GPHY 0xe */ +/* enum: PhyXS. */ +/* MC_CMD_LOOPBACK_PHYXS 0xf */ +/* enum: PCS. */ +/* MC_CMD_LOOPBACK_PCS 0x10 */ +/* enum: PMA-PMD. */ +/* MC_CMD_LOOPBACK_PMAPMD 0x11 */ +/* enum: Cross-Port. */ +/* MC_CMD_LOOPBACK_XPORT 0x12 */ +/* enum: XGMII-Wireside. */ +/* MC_CMD_LOOPBACK_XGMII_WS 0x13 */ +/* enum: XAUI Wireside. */ +/* MC_CMD_LOOPBACK_XAUI_WS 0x14 */ +/* enum: XAUI Wireside Far. */ +/* MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 */ +/* enum: XAUI Wireside near. */ +/* MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 */ +/* enum: GMII Wireside. */ +/* MC_CMD_LOOPBACK_GMII_WS 0x17 */ +/* enum: XFI Wireside. */ +/* MC_CMD_LOOPBACK_XFI_WS 0x18 */ +/* enum: XFI Wireside Far. */ +/* MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 */ +/* enum: PhyXS Wireside. */ +/* MC_CMD_LOOPBACK_PHYXS_WS 0x1a */ +/* enum: PMA lanes MAC-Serdes. */ +/* MC_CMD_LOOPBACK_PMA_INT 0x1b */ +/* enum: KR Serdes Parallel (Encoder). */ +/* MC_CMD_LOOPBACK_SD_NEAR 0x1c */ +/* enum: KR Serdes Serial. */ +/* MC_CMD_LOOPBACK_SD_FAR 0x1d */ +/* enum: PMA lanes MAC-Serdes Wireside. */ +/* MC_CMD_LOOPBACK_PMA_INT_WS 0x1e */ +/* enum: KR Serdes Parallel Wireside (Full PCS). */ +/* MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f */ +/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */ +/* MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 */ +/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */ +/* MC_CMD_LOOPBACK_SD_FEP_WS 0x21 */ +/* enum: KR Serdes Serial Wireside. */ +/* MC_CMD_LOOPBACK_SD_FES_WS 0x22 */ +/* enum: Near side of AOE Siena side port */ +/* MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 */ +/* enum: Medford Wireside datapath loopback */ +/* MC_CMD_LOOPBACK_DATA_WS 0x24 */ +/* enum: Force link up without setting up any physical loopback (snapper use + * only) + */ +/* MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_OFST 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_OFST 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_OFST 12 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_OFST 16 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_OFST 16 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_OFST 20 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_OFST 24 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_OFST 24 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_OFST 28 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_OFST 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_OFST 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_OFST 36 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported 25G loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_OFST 40 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_OFST 40 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_OFST 44 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported 50 loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_OFST 48 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_OFST 48 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_OFST 52 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported 100G loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_OFST 56 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_OFST 56 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_OFST 60 +/* Enum values, see field(s): */ +/* 100M */ + /***********************************/ /* MC_CMD_GET_LINK @@ -2848,17 +3554,22 @@ #define MC_CMD_GET_LINK_OUT_LEN 28 /* near-side advertised capabilities */ #define MC_CMD_GET_LINK_OUT_CAP_OFST 0 +#define MC_CMD_GET_LINK_OUT_CAP_LEN 4 /* link-partner advertised capabilities */ #define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4 +#define MC_CMD_GET_LINK_OUT_LP_CAP_LEN 4 /* Autonegotiated speed in mbit/s. The link may still be down even if this * reads non-zero. */ #define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8 +#define MC_CMD_GET_LINK_OUT_LINK_SPEED_LEN 4 /* Current loopback setting. */ #define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12 +#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ #define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16 +#define MC_CMD_GET_LINK_OUT_FLAGS_LEN 4 #define MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0 #define MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1 #define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1 @@ -2873,9 +3584,11 @@ #define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1 /* This returns the negotiated flow control value. */ #define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20 +#define MC_CMD_GET_LINK_OUT_FCNTL_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */ #define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24 +#define MC_CMD_GET_LINK_OUT_MAC_FAULT_LEN 4 #define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 #define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 #define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 @@ -2899,8 +3612,10 @@ #define MC_CMD_SET_LINK_IN_LEN 16 /* ??? */ #define MC_CMD_SET_LINK_IN_CAP_OFST 0 +#define MC_CMD_SET_LINK_IN_CAP_LEN 4 /* Flags */ #define MC_CMD_SET_LINK_IN_FLAGS_OFST 4 +#define MC_CMD_SET_LINK_IN_FLAGS_LEN 4 #define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0 #define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1 #define MC_CMD_SET_LINK_IN_POWEROFF_LBN 1 @@ -2909,12 +3624,14 @@ #define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1 /* Loopback mode. */ #define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8 +#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ /* A loopback speed of "0" is supported, and means (choose any available * speed). */ #define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12 +#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_LEN 4 /* MC_CMD_SET_LINK_OUT msgresponse */ #define MC_CMD_SET_LINK_OUT_LEN 0 @@ -2932,6 +3649,7 @@ #define MC_CMD_SET_ID_LED_IN_LEN 4 /* Set LED state. */ #define MC_CMD_SET_ID_LED_IN_STATE_OFST 0 +#define MC_CMD_SET_ID_LED_IN_STATE_LEN 4 #define MC_CMD_LED_OFF 0x0 /* enum */ #define MC_CMD_LED_ON 0x1 /* enum */ #define MC_CMD_LED_DEFAULT 0x2 /* enum */ @@ -2954,17 +3672,21 @@ * EtherII, VLAN, bug16011 padding). */ #define MC_CMD_SET_MAC_IN_MTU_OFST 0 +#define MC_CMD_SET_MAC_IN_MTU_LEN 4 #define MC_CMD_SET_MAC_IN_DRAIN_OFST 4 +#define MC_CMD_SET_MAC_IN_DRAIN_LEN 4 #define MC_CMD_SET_MAC_IN_ADDR_OFST 8 #define MC_CMD_SET_MAC_IN_ADDR_LEN 8 #define MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8 #define MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12 #define MC_CMD_SET_MAC_IN_REJECT_OFST 16 +#define MC_CMD_SET_MAC_IN_REJECT_LEN 4 #define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0 #define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1 #define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1 #define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1 #define MC_CMD_SET_MAC_IN_FCNTL_OFST 20 +#define MC_CMD_SET_MAC_IN_FCNTL_LEN 4 /* enum: Flow control is off. */ #define MC_CMD_FCNTL_OFF 0x0 /* enum: Respond to flow control. */ @@ -2978,6 +3700,7 @@ /* enum: Issue flow control. */ #define MC_CMD_FCNTL_GENERATE 0x5 #define MC_CMD_SET_MAC_IN_FLAGS_OFST 24 +#define MC_CMD_SET_MAC_IN_FLAGS_LEN 4 #define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0 #define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1 @@ -2987,17 +3710,21 @@ * EtherII, VLAN, bug16011 padding). */ #define MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0 +#define MC_CMD_SET_MAC_EXT_IN_MTU_LEN 4 #define MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4 +#define MC_CMD_SET_MAC_EXT_IN_DRAIN_LEN 4 #define MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8 #define MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8 #define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8 #define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12 #define MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_LEN 4 #define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0 #define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1 #define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1 #define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1 #define MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20 +#define MC_CMD_SET_MAC_EXT_IN_FCNTL_LEN 4 /* enum: Flow control is off. */ /* MC_CMD_FCNTL_OFF 0x0 */ /* enum: Respond to flow control. */ @@ -3011,6 +3738,7 @@ /* enum: Issue flow control. */ /* MC_CMD_FCNTL_GENERATE 0x5 */ #define MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24 +#define MC_CMD_SET_MAC_EXT_IN_FLAGS_LEN 4 #define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0 #define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1 /* Select which parameters to configure. A parameter will only be modified if @@ -3019,6 +3747,7 @@ * set). */ #define MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28 +#define MC_CMD_SET_MAC_EXT_IN_CONTROL_LEN 4 #define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0 #define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1 #define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1 @@ -3040,6 +3769,7 @@ * to 0. */ #define MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0 +#define MC_CMD_SET_MAC_V2_OUT_MTU_LEN 4 /***********************************/ @@ -3144,6 +3874,7 @@ #define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0 #define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4 #define MC_CMD_MAC_STATS_IN_CMD_OFST 8 +#define MC_CMD_MAC_STATS_IN_CMD_LEN 4 #define MC_CMD_MAC_STATS_IN_DMA_LBN 0 #define MC_CMD_MAC_STATS_IN_DMA_WIDTH 1 #define MC_CMD_MAC_STATS_IN_CLEAR_LBN 1 @@ -3158,9 +3889,16 @@ #define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1 #define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16 #define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16 +/* DMA length. Should be set to MAC_STATS_NUM_STATS * sizeof(uint64_t), as + * returned by MC_CMD_GET_CAPABILITIES_V4_OUT. For legacy firmware not + * supporting MC_CMD_GET_CAPABILITIES_V4_OUT, DMA_LEN should be set to + * MC_CMD_MAC_NSTATS * sizeof(uint64_t) + */ #define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12 +#define MC_CMD_MAC_STATS_IN_DMA_LEN_LEN 4 /* port id so vadapter stats can be provided */ #define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16 +#define MC_CMD_MAC_STATS_IN_PORT_ID_LEN 4 /* MC_CMD_MAC_STATS_OUT_DMA msgresponse */ #define MC_CMD_MAC_STATS_OUT_DMA_LEN 0 @@ -3305,9 +4043,126 @@ #define MC_CMD_GMAC_DMABUF_START 0x40 /* enum: End of GMAC stats buffer space, for Siena only. */ #define MC_CMD_GMAC_DMABUF_END 0x5f -#define MC_CMD_MAC_GENERATION_END 0x60 /* enum */ +/* enum: GENERATION_END value, used together with GENERATION_START to verify + * consistency of DMAd data. For legacy firmware / drivers without extended + * stats (more precisely, when DMA_LEN == MC_CMD_MAC_NSTATS * + * sizeof(uint64_t)), this entry holds the GENERATION_END value. Otherwise, + * this value is invalid/ reserved and GENERATION_END is written as the last + * 64-bit word of the DMA buffer (at DMA_LEN - sizeof(uint64_t)). Note that + * this is consistent with the legacy behaviour, in the sense that entry 96 is + * the last 64-bit word in the buffer when DMA_LEN == MC_CMD_MAC_NSTATS * + * sizeof(uint64_t). See SF-109306-TC, Section 9.2 for details. + */ +#define MC_CMD_MAC_GENERATION_END 0x60 #define MC_CMD_MAC_NSTATS 0x61 /* enum */ +/* MC_CMD_MAC_STATS_V2_OUT_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V2_OUT_DMA_LEN 0 + +/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V2*64))>>3) +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LEN 8 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_OFST 0 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_OFST 4 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2 +/* enum: Start of FEC stats buffer space, Medford2 and up */ +#define MC_CMD_MAC_FEC_DMABUF_START 0x61 +/* enum: Number of uncorrected FEC codewords on link (RS-FEC only for Medford2) + */ +#define MC_CMD_MAC_FEC_UNCORRECTED_ERRORS 0x61 +/* enum: Number of corrected FEC codewords on link (RS-FEC only for Medford2) + */ +#define MC_CMD_MAC_FEC_CORRECTED_ERRORS 0x62 +/* enum: Number of corrected 10-bit symbol errors, lane 0 (RS-FEC only) */ +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE0 0x63 +/* enum: Number of corrected 10-bit symbol errors, lane 1 (RS-FEC only) */ +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE1 0x64 +/* enum: Number of corrected 10-bit symbol errors, lane 2 (RS-FEC only) */ +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE2 0x65 +/* enum: Number of corrected 10-bit symbol errors, lane 3 (RS-FEC only) */ +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE3 0x66 +/* enum: This includes the space at offset 103 which is the final + * GENERATION_END in a MAC_STATS_V2 response and otherwise unused. + */ +#define MC_CMD_MAC_NSTATS_V2 0x68 +/* Other enum values, see field(s): */ +/* MC_CMD_MAC_STATS_OUT_NO_DMA/STATISTICS */ + +/* MC_CMD_MAC_STATS_V3_OUT_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V3_OUT_DMA_LEN 0 + +/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V3*64))>>3) +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LEN 8 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_OFST 0 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_OFST 4 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3 +/* enum: Start of CTPIO stats buffer space, Medford2 and up */ +#define MC_CMD_MAC_CTPIO_DMABUF_START 0x68 +/* enum: Number of CTPIO fallbacks because a DMA packet was in progress on the + * target VI + */ +#define MC_CMD_MAC_CTPIO_VI_BUSY_FALLBACK 0x68 +/* enum: Number of times a CTPIO send wrote beyond frame end (informational + * only) + */ +#define MC_CMD_MAC_CTPIO_LONG_WRITE_SUCCESS 0x69 +/* enum: Number of CTPIO failures because the TX doorbell was written before + * the end of the frame data + */ +#define MC_CMD_MAC_CTPIO_MISSING_DBELL_FAIL 0x6a +/* enum: Number of CTPIO failures because the internal FIFO overflowed */ +#define MC_CMD_MAC_CTPIO_OVERFLOW_FAIL 0x6b +/* enum: Number of CTPIO failures because the host did not deliver data fast + * enough to avoid MAC underflow + */ +#define MC_CMD_MAC_CTPIO_UNDERFLOW_FAIL 0x6c +/* enum: Number of CTPIO failures because the host did not deliver all the + * frame data within the timeout + */ +#define MC_CMD_MAC_CTPIO_TIMEOUT_FAIL 0x6d +/* enum: Number of CTPIO failures because the frame data arrived out of order + * or with gaps + */ +#define MC_CMD_MAC_CTPIO_NONCONTIG_WR_FAIL 0x6e +/* enum: Number of CTPIO failures because the host started a new frame before + * completing the previous one + */ +#define MC_CMD_MAC_CTPIO_FRM_CLOBBER_FAIL 0x6f +/* enum: Number of CTPIO failures because a write was not a multiple of 32 bits + * or not 32-bit aligned + */ +#define MC_CMD_MAC_CTPIO_INVALID_WR_FAIL 0x70 +/* enum: Number of CTPIO fallbacks because another VI on the same port was + * sending a CTPIO frame + */ +#define MC_CMD_MAC_CTPIO_VI_CLOBBER_FALLBACK 0x71 +/* enum: Number of CTPIO fallbacks because target VI did not have CTPIO enabled + */ +#define MC_CMD_MAC_CTPIO_UNQUALIFIED_FALLBACK 0x72 +/* enum: Number of CTPIO fallbacks because length in header was less than 29 + * bytes + */ +#define MC_CMD_MAC_CTPIO_RUNT_FALLBACK 0x73 +/* enum: Total number of successful CTPIO sends on this port */ +#define MC_CMD_MAC_CTPIO_SUCCESS 0x74 +/* enum: Total number of CTPIO fallbacks on this port */ +#define MC_CMD_MAC_CTPIO_FALLBACK 0x75 +/* enum: Total number of CTPIO poisoned frames on this port, whether erased or + * not + */ +#define MC_CMD_MAC_CTPIO_POISON 0x76 +/* enum: Total number of CTPIO erased frames on this port */ +#define MC_CMD_MAC_CTPIO_ERASE 0x77 +/* enum: This includes the space at offset 120 which is the final + * GENERATION_END in a MAC_STATS_V3 response and otherwise unused. + */ +#define MC_CMD_MAC_NSTATS_V3 0x79 +/* Other enum values, see field(s): */ +/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA/STATISTICS */ + /***********************************/ /* MC_CMD_SRIOV @@ -3318,21 +4173,28 @@ /* MC_CMD_SRIOV_IN msgrequest */ #define MC_CMD_SRIOV_IN_LEN 12 #define MC_CMD_SRIOV_IN_ENABLE_OFST 0 +#define MC_CMD_SRIOV_IN_ENABLE_LEN 4 #define MC_CMD_SRIOV_IN_VI_BASE_OFST 4 +#define MC_CMD_SRIOV_IN_VI_BASE_LEN 4 #define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8 +#define MC_CMD_SRIOV_IN_VF_COUNT_LEN 4 /* MC_CMD_SRIOV_OUT msgresponse */ #define MC_CMD_SRIOV_OUT_LEN 8 #define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0 +#define MC_CMD_SRIOV_OUT_VI_SCALE_LEN 4 #define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4 +#define MC_CMD_SRIOV_OUT_VF_TOTAL_LEN 4 /* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */ #define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32 /* this is only used for the first record */ #define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LEN 4 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LEN 4 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8 @@ -3342,6 +4204,7 @@ #define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LEN 4 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */ #define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32 @@ -3352,6 +4215,7 @@ #define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LEN 4 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32 @@ -3403,10 +4267,12 @@ /* MC_CMD_WOL_FILTER_SET_IN msgrequest */ #define MC_CMD_WOL_FILTER_SET_IN_LEN 192 #define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 +#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 #define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */ #define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */ /* A type value of 1 is unused. */ #define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 +#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 /* enum: Magic */ #define MC_CMD_WOL_TYPE_MAGIC 0x0 /* enum: MS Windows Magic */ @@ -3428,7 +4294,9 @@ /* MC_CMD_WOL_FILTER_SET_IN_MAGIC msgrequest */ #define MC_CMD_WOL_FILTER_SET_IN_MAGIC_LEN 16 /* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ /* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ #define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8 #define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8 #define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8 @@ -3437,9 +4305,13 @@ /* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */ #define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20 /* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ /* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ #define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_LEN 4 #define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST 12 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_LEN 4 #define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST 16 #define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_LEN 2 #define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST 18 @@ -3448,7 +4320,9 @@ /* MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN msgrequest */ #define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_LEN 44 /* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ /* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ #define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST 8 #define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_LEN 16 #define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST 24 @@ -3461,7 +4335,9 @@ /* MC_CMD_WOL_FILTER_SET_IN_BITMAP msgrequest */ #define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN 187 /* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ /* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ #define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST 8 #define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_LEN 48 #define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_OFST 56 @@ -3476,8 +4352,11 @@ /* MC_CMD_WOL_FILTER_SET_IN_LINK msgrequest */ #define MC_CMD_WOL_FILTER_SET_IN_LINK_LEN 12 /* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ /* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ #define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_LEN 4 #define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0 #define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1 #define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1 @@ -3486,6 +4365,7 @@ /* MC_CMD_WOL_FILTER_SET_OUT msgresponse */ #define MC_CMD_WOL_FILTER_SET_OUT_LEN 4 #define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0 +#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_LEN 4 /***********************************/ @@ -3499,6 +4379,7 @@ /* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */ #define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4 #define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0 +#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_LEN 4 /* MC_CMD_WOL_FILTER_REMOVE_OUT msgresponse */ #define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0 @@ -3516,6 +4397,7 @@ /* MC_CMD_WOL_FILTER_RESET_IN msgrequest */ #define MC_CMD_WOL_FILTER_RESET_IN_LEN 4 #define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0 +#define MC_CMD_WOL_FILTER_RESET_IN_MASK_LEN 4 #define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */ #define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */ @@ -3556,6 +4438,7 @@ #define MC_CMD_NVRAM_TYPES_OUT_LEN 4 /* Bit mask of supported types. */ #define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0 +#define MC_CMD_NVRAM_TYPES_OUT_TYPES_LEN 4 /* enum: Disabled callisto. */ #define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0 /* enum: MC firmware. */ @@ -3612,47 +4495,65 @@ /* MC_CMD_NVRAM_INFO_IN msgrequest */ #define MC_CMD_NVRAM_INFO_IN_LEN 4 #define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_INFO_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ /* MC_CMD_NVRAM_INFO_OUT msgresponse */ #define MC_CMD_NVRAM_INFO_OUT_LEN 24 #define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0 +#define MC_CMD_NVRAM_INFO_OUT_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ #define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4 +#define MC_CMD_NVRAM_INFO_OUT_SIZE_LEN 4 #define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8 +#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_LEN 4 #define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12 +#define MC_CMD_NVRAM_INFO_OUT_FLAGS_LEN 4 #define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0 #define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1 #define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1 #define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_LBN 5 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_WIDTH 1 #define MC_CMD_NVRAM_INFO_OUT_CMAC_LBN 6 #define MC_CMD_NVRAM_INFO_OUT_CMAC_WIDTH 1 #define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7 #define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1 #define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16 +#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_LEN 4 #define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20 +#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_LEN 4 /* MC_CMD_NVRAM_INFO_V2_OUT msgresponse */ #define MC_CMD_NVRAM_INFO_V2_OUT_LEN 28 #define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_OFST 0 +#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ #define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_OFST 4 +#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_LEN 4 #define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_OFST 8 +#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_LEN 4 #define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12 +#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_LEN 4 #define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0 #define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1 #define MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1 #define MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_LBN 5 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_WIDTH 1 #define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7 #define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1 #define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16 +#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_LEN 4 #define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20 +#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_LEN 4 /* Writes must be multiples of this size. Added to support the MUM on Sorrento. */ #define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_OFST 24 +#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_LEN 4 /***********************************/ @@ -3670,6 +4571,7 @@ */ #define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4 #define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ @@ -3680,9 +4582,11 @@ */ #define MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN 8 #define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ #define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_OFST 4 +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_LEN 4 #define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0 #define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1 @@ -3703,20 +4607,26 @@ /* MC_CMD_NVRAM_READ_IN msgrequest */ #define MC_CMD_NVRAM_READ_IN_LEN 12 #define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_READ_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ #define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4 +#define MC_CMD_NVRAM_READ_IN_OFFSET_LEN 4 /* amount to read in bytes */ #define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8 +#define MC_CMD_NVRAM_READ_IN_LENGTH_LEN 4 /* MC_CMD_NVRAM_READ_IN_V2 msgrequest */ #define MC_CMD_NVRAM_READ_IN_V2_LEN 16 #define MC_CMD_NVRAM_READ_IN_V2_TYPE_OFST 0 +#define MC_CMD_NVRAM_READ_IN_V2_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ #define MC_CMD_NVRAM_READ_IN_V2_OFFSET_OFST 4 +#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_LEN 4 /* amount to read in bytes */ #define MC_CMD_NVRAM_READ_IN_V2_LENGTH_OFST 8 +#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_LEN 4 /* Optional control info. If a partition is stored with an A/B versioning * scheme (i.e. in more than one physical partition in NVRAM) the host can set * this to control which underlying physical partition is used to read data @@ -3726,6 +4636,7 @@ * verifying by reading with MODE=TARGET_BACKUP. */ #define MC_CMD_NVRAM_READ_IN_V2_MODE_OFST 12 +#define MC_CMD_NVRAM_READ_IN_V2_MODE_LEN 4 /* enum: Same as omitting MODE: caller sees data in current partition unless it * holds the write lock in which case it sees data in the partition it is * updating. @@ -3765,10 +4676,13 @@ #define MC_CMD_NVRAM_WRITE_IN_LENMAX 252 #define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num)) #define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_WRITE_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ #define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4 +#define MC_CMD_NVRAM_WRITE_IN_OFFSET_LEN 4 #define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8 +#define MC_CMD_NVRAM_WRITE_IN_LENGTH_LEN 4 #define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12 #define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1 #define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1 @@ -3791,10 +4705,13 @@ /* MC_CMD_NVRAM_ERASE_IN msgrequest */ #define MC_CMD_NVRAM_ERASE_IN_LEN 12 #define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_ERASE_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ #define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4 +#define MC_CMD_NVRAM_ERASE_IN_OFFSET_LEN 4 #define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8 +#define MC_CMD_NVRAM_ERASE_IN_LENGTH_LEN 4 /* MC_CMD_NVRAM_ERASE_OUT msgresponse */ #define MC_CMD_NVRAM_ERASE_OUT_LEN 0 @@ -3815,9 +4732,11 @@ */ #define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8 #define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ #define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4 +#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_LEN 4 /* MC_CMD_NVRAM_UPDATE_FINISH_V2_IN msgrequest: Extended NVRAM_UPDATE_FINISH * request with additional flags indicating version of NVRAM_UPDATE commands in @@ -3826,10 +4745,13 @@ */ #define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN 12 #define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ #define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_OFST 4 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_LEN 4 #define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_OFST 8 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_LEN 4 #define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0 #define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1 @@ -3848,16 +4770,19 @@ * This process takes a few seconds to complete. So is likely to take more than * the MCDI timeout. Hence signature verification is initiated when * MC_CMD_NVRAM_UPDATE_FINISH_V2_IN is received by the firmware, however, the - * MCDI command returns immediately with error code EAGAIN. Subsequent - * NVRAM_UPDATE_FINISH_V2_IN requests also return EAGAIN if the verification is - * in progress. Once the verification has completed, this response payload - * includes the results of the signature verification. Note that the nvram lock - * in firmware is only released after the verification has completed and the - * host has read back the result code from firmware. + * MCDI command is run in a background MCDI processing thread. This response + * payload includes the results of the signature verification. Note that the + * per-partition nvram lock in firmware is only released after the verification + * has completed. */ #define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN 4 /* Result of nvram update completion processing */ #define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_LEN 4 +/* enum: Invalid return code; only non-zero values are defined. Defined as + * unknown for backwards compatibility with NVRAM_UPDATE_FINISH_OUT. + */ +#define MC_CMD_NVRAM_VERIFY_RC_UNKNOWN 0x0 /* enum: Verify succeeded without any errors. */ #define MC_CMD_NVRAM_VERIFY_RC_SUCCESS 0x1 /* enum: CMS format verification failed due to an internal error. */ @@ -3884,6 +4809,12 @@ * Trusted approver's list. */ #define MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH 0xb +/* enum: The image contains a test-signed certificate, but the adapter accepts + * only production signed images. + */ +#define MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc +/* enum: The image has a lower security level than the current firmware. */ +#define MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE 0xd /***********************************/ @@ -3911,6 +4842,7 @@ /* MC_CMD_REBOOT_IN msgrequest */ #define MC_CMD_REBOOT_IN_LEN 4 #define MC_CMD_REBOOT_IN_FLAGS_OFST 0 +#define MC_CMD_REBOOT_IN_FLAGS_LEN 4 #define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */ /* MC_CMD_REBOOT_OUT msgresponse */ @@ -3947,11 +4879,12 @@ */ #define MC_CMD_REBOOT_MODE 0x3f -#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_REBOOT_MODE_IN msgrequest */ #define MC_CMD_REBOOT_MODE_IN_LEN 4 #define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0 +#define MC_CMD_REBOOT_MODE_IN_VALUE_LEN 4 /* enum: Normal. */ #define MC_CMD_REBOOT_MODE_NORMAL 0x0 /* enum: Power-on Reset. */ @@ -3966,6 +4899,7 @@ /* MC_CMD_REBOOT_MODE_OUT msgresponse */ #define MC_CMD_REBOOT_MODE_OUT_LEN 4 #define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0 +#define MC_CMD_REBOOT_MODE_OUT_VALUE_LEN 4 /***********************************/ @@ -4001,7 +4935,7 @@ */ #define MC_CMD_SENSOR_INFO 0x41 -#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_SENSOR_INFO_IN msgrequest */ #define MC_CMD_SENSOR_INFO_IN_LEN 0 @@ -4015,12 +4949,14 @@ * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc. */ #define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0 +#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_LEN 4 /* MC_CMD_SENSOR_INFO_OUT msgresponse */ #define MC_CMD_SENSOR_INFO_OUT_LENMIN 4 #define MC_CMD_SENSOR_INFO_OUT_LENMAX 252 #define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num)) #define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0 +#define MC_CMD_SENSOR_INFO_OUT_MASK_LEN 4 /* enum: Controller temperature: degC */ #define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0 /* enum: Phy common temperature: degC */ @@ -4183,6 +5119,20 @@ #define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f /* enum: Board temperature (back): degC */ #define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50 +/* enum: 1.8v power current: mA */ +#define MC_CMD_SENSOR_IN_I1V8 0x51 +/* enum: 2.5v power current: mA */ +#define MC_CMD_SENSOR_IN_I2V5 0x52 +/* enum: 3.3v power current: mA */ +#define MC_CMD_SENSOR_IN_I3V3 0x53 +/* enum: 12v power current: mA */ +#define MC_CMD_SENSOR_IN_I12V0 0x54 +/* enum: 1.3v power: mV */ +#define MC_CMD_SENSOR_IN_1V3 0x55 +/* enum: 1.3v power current: mA */ +#define MC_CMD_SENSOR_IN_I1V3 0x56 +/* enum: Not a sensor: reserved for the next page flag */ +#define MC_CMD_SENSOR_PAGE2_NEXT 0x5f /* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */ #define MC_CMD_SENSOR_ENTRY_OFST 4 #define MC_CMD_SENSOR_ENTRY_LEN 8 @@ -4196,6 +5146,7 @@ #define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252 #define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num)) #define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0 +#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_SENSOR_INFO_OUT */ #define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31 @@ -4247,7 +5198,7 @@ */ #define MC_CMD_READ_SENSORS 0x42 -#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_READ_SENSORS_IN msgrequest */ #define MC_CMD_READ_SENSORS_IN_LEN 8 @@ -4266,6 +5217,7 @@ #define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4 /* Size in bytes of host buffer. */ #define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8 +#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_LEN 4 /* MC_CMD_READ_SENSORS_OUT msgresponse */ #define MC_CMD_READ_SENSORS_OUT_LEN 0 @@ -4319,6 +5271,7 @@ /* MC_CMD_GET_PHY_STATE_OUT msgresponse */ #define MC_CMD_GET_PHY_STATE_OUT_LEN 4 #define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0 +#define MC_CMD_GET_PHY_STATE_OUT_STATE_LEN 4 /* enum: Ok. */ #define MC_CMD_PHY_STATE_OK 0x1 /* enum: Faulty. */ @@ -4355,6 +5308,7 @@ /* MC_CMD_WOL_FILTER_GET_OUT msgresponse */ #define MC_CMD_WOL_FILTER_GET_OUT_LEN 4 #define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0 +#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_LEN 4 /***********************************/ @@ -4371,6 +5325,7 @@ #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num)) #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 #define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */ #define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */ #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4 @@ -4381,13 +5336,16 @@ /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */ #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14 /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */ +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */ #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_LEN 4 /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */ #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42 /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */ +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */ #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10 @@ -4398,6 +5356,7 @@ /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */ #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_LEN 4 /***********************************/ @@ -4412,7 +5371,9 @@ /* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */ #define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8 #define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 #define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4 +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_LEN 4 /* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */ #define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0 @@ -4451,6 +5412,7 @@ #define MC_CMD_TESTASSERT_V2_IN_LEN 4 /* How to provoke the assertion */ #define MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0 +#define MC_CMD_TESTASSERT_V2_IN_TYPE_LEN 4 /* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless * you're testing firmware, this is what you want. */ @@ -4486,6 +5448,7 @@ #define MC_CMD_WORKAROUND_IN_LEN 8 /* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */ #define MC_CMD_WORKAROUND_IN_TYPE_OFST 0 +#define MC_CMD_WORKAROUND_IN_TYPE_LEN 4 /* enum: Bug 17230 work around. */ #define MC_CMD_WORKAROUND_BUG17230 0x1 /* enum: Bug 35388 work around (unsafe EVQ writes). */ @@ -4514,6 +5477,7 @@ * the workaround */ #define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4 +#define MC_CMD_WORKAROUND_IN_ENABLED_LEN 4 /* MC_CMD_WORKAROUND_OUT msgresponse */ #define MC_CMD_WORKAROUND_OUT_LEN 0 @@ -4523,6 +5487,7 @@ */ #define MC_CMD_WORKAROUND_EXT_OUT_LEN 4 #define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0 +#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_LEN 4 #define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0 #define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1 @@ -4543,6 +5508,7 @@ /* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */ #define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4 #define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_LEN 4 /* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */ #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5 @@ -4550,6 +5516,7 @@ #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num)) /* in bytes */ #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_LEN 4 #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4 #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1 #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1 @@ -4568,12 +5535,14 @@ /* MC_CMD_NVRAM_TEST_IN msgrequest */ #define MC_CMD_NVRAM_TEST_IN_LEN 4 #define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_TEST_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ /* MC_CMD_NVRAM_TEST_OUT msgresponse */ #define MC_CMD_NVRAM_TEST_OUT_LEN 4 #define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0 +#define MC_CMD_NVRAM_TEST_OUT_RESULT_LEN 4 /* enum: Passed. */ #define MC_CMD_NVRAM_TEST_PASS 0x0 /* enum: Failed. */ @@ -4594,12 +5563,16 @@ #define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16 /* 0-6 low->high de-emph. */ #define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0 +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_LEN 4 /* 0-8 low->high ref.V */ #define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4 +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_LEN 4 /* 0-8 0-8 low->high boost */ #define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8 +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_LEN 4 /* 0-8 low->high ref.V */ #define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12 +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_LEN 4 /* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */ #define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0 @@ -4608,10 +5581,13 @@ #define MC_CMD_MRSFP_TWEAK_OUT_LEN 12 /* input bits */ #define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0 +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_LEN 4 /* output bits */ #define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_LEN 4 /* direction */ #define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_LEN 4 /* enum: Out. */ #define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0 /* enum: In. */ @@ -4626,21 +5602,26 @@ */ #define MC_CMD_SENSOR_SET_LIMS 0x4e -#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */ #define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20 #define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0 +#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */ /* interpretation is is sensor-specific. */ #define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4 +#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4 /* interpretation is is sensor-specific. */ #define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8 +#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4 /* interpretation is is sensor-specific. */ #define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12 +#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4 /* interpretation is is sensor-specific. */ #define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16 +#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4 /* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */ #define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0 @@ -4657,9 +5638,13 @@ /* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */ #define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16 #define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_LEN 4 #define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_LEN 4 #define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_LEN 4 #define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_LEN 4 /***********************************/ @@ -4680,6 +5665,7 @@ #define MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num)) /* total number of partitions */ #define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0 +#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_LEN 4 /* type ID code for each of NUM_PARTITIONS partitions */ #define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4 #define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4 @@ -4700,6 +5686,7 @@ #define MC_CMD_NVRAM_METADATA_IN_LEN 4 /* Partition type ID code */ #define MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_METADATA_IN_TYPE_LEN 4 /* MC_CMD_NVRAM_METADATA_OUT msgresponse */ #define MC_CMD_NVRAM_METADATA_OUT_LENMIN 20 @@ -4707,7 +5694,9 @@ #define MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num)) /* Partition type ID code */ #define MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0 +#define MC_CMD_NVRAM_METADATA_OUT_TYPE_LEN 4 #define MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4 +#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_LEN 4 #define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0 #define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1 #define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1 @@ -4716,6 +5705,7 @@ #define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1 /* Subtype ID code for content of this partition */ #define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8 +#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_LEN 4 /* 1st component of W.X.Y.Z version number for content of this partition */ #define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12 #define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2 @@ -4756,8 +5746,10 @@ #define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2 /* Number of allocated MAC addresses */ #define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8 +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_LEN 4 /* Spacing of allocated MAC addresses */ #define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12 +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_LEN 4 /***********************************/ @@ -4772,6 +5764,7 @@ #define MC_CMD_CLP_IN_LEN 4 /* Sub operation */ #define MC_CMD_CLP_IN_OP_OFST 0 +#define MC_CMD_CLP_IN_OP_LEN 4 /* enum: Return to factory default settings */ #define MC_CMD_CLP_OP_DEFAULT 0x1 /* enum: Set MAC address */ @@ -4789,6 +5782,7 @@ /* MC_CMD_CLP_IN_DEFAULT msgrequest */ #define MC_CMD_CLP_IN_DEFAULT_LEN 4 /* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ /* MC_CMD_CLP_OUT_DEFAULT msgresponse */ #define MC_CMD_CLP_OUT_DEFAULT_LEN 0 @@ -4796,6 +5790,7 @@ /* MC_CMD_CLP_IN_SET_MAC msgrequest */ #define MC_CMD_CLP_IN_SET_MAC_LEN 12 /* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ /* MAC address assigned to port */ #define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4 #define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6 @@ -4809,6 +5804,7 @@ /* MC_CMD_CLP_IN_GET_MAC msgrequest */ #define MC_CMD_CLP_IN_GET_MAC_LEN 4 /* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ /* MC_CMD_CLP_OUT_GET_MAC msgresponse */ #define MC_CMD_CLP_OUT_GET_MAC_LEN 8 @@ -4822,6 +5818,7 @@ /* MC_CMD_CLP_IN_SET_BOOT msgrequest */ #define MC_CMD_CLP_IN_SET_BOOT_LEN 5 /* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ /* Boot flag */ #define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4 #define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1 @@ -4832,6 +5829,7 @@ /* MC_CMD_CLP_IN_GET_BOOT msgrequest */ #define MC_CMD_CLP_IN_GET_BOOT_LEN 4 /* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ /* MC_CMD_CLP_OUT_GET_BOOT msgresponse */ #define MC_CMD_CLP_OUT_GET_BOOT_LEN 4 @@ -4849,11 +5847,12 @@ */ #define MC_CMD_MUM 0x57 -#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_MUM_IN msgrequest */ #define MC_CMD_MUM_IN_LEN 4 #define MC_CMD_MUM_IN_OP_HDR_OFST 0 +#define MC_CMD_MUM_IN_OP_HDR_LEN 4 #define MC_CMD_MUM_IN_OP_LBN 0 #define MC_CMD_MUM_IN_OP_WIDTH 8 /* enum: NULL MCDI command to MUM */ @@ -4893,26 +5892,32 @@ #define MC_CMD_MUM_IN_NULL_LEN 4 /* MUM cmd header */ #define MC_CMD_MUM_IN_CMD_OFST 0 +#define MC_CMD_MUM_IN_CMD_LEN 4 /* MC_CMD_MUM_IN_GET_VERSION msgrequest */ #define MC_CMD_MUM_IN_GET_VERSION_LEN 4 /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ /* MC_CMD_MUM_IN_READ msgrequest */ #define MC_CMD_MUM_IN_READ_LEN 16 /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ /* ID of (device connected to MUM) to read from registers of */ #define MC_CMD_MUM_IN_READ_DEVICE_OFST 4 +#define MC_CMD_MUM_IN_READ_DEVICE_LEN 4 /* enum: Hittite HMC1035 clock generator on Sorrento board */ #define MC_CMD_MUM_DEV_HITTITE 0x1 /* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */ #define MC_CMD_MUM_DEV_HITTITE_NIC 0x2 /* 32-bit address to read from */ #define MC_CMD_MUM_IN_READ_ADDR_OFST 8 +#define MC_CMD_MUM_IN_READ_ADDR_LEN 4 /* Number of words to read. */ #define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12 +#define MC_CMD_MUM_IN_READ_NUMWORDS_LEN 4 /* MC_CMD_MUM_IN_WRITE msgrequest */ #define MC_CMD_MUM_IN_WRITE_LENMIN 16 @@ -4920,12 +5925,15 @@ #define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num)) /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ /* ID of (device connected to MUM) to write to registers of */ #define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4 +#define MC_CMD_MUM_IN_WRITE_DEVICE_LEN 4 /* enum: Hittite HMC1035 clock generator on Sorrento board */ /* MC_CMD_MUM_DEV_HITTITE 0x1 */ /* 32-bit address to write to */ #define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8 +#define MC_CMD_MUM_IN_WRITE_ADDR_LEN 4 /* Words to write */ #define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12 #define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4 @@ -4938,12 +5946,16 @@ #define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num)) /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ /* MUM I2C cmd code */ #define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4 +#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_LEN 4 /* Number of bytes to write */ #define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8 +#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_LEN 4 /* Number of bytes to read */ #define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12 +#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_LEN 4 /* Bytes to write */ #define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16 #define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1 @@ -4954,21 +5966,28 @@ #define MC_CMD_MUM_IN_LOG_LEN 8 /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_LOG_OP_OFST 4 +#define MC_CMD_MUM_IN_LOG_OP_LEN 4 #define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */ /* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */ #define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ /* MC_CMD_MUM_IN_LOG_OP_OFST 4 */ +/* MC_CMD_MUM_IN_LOG_OP_LEN 4 */ /* Enable/disable debug output to UART */ #define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8 +#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_LEN 4 /* MC_CMD_MUM_IN_GPIO msgrequest */ #define MC_CMD_MUM_IN_GPIO_LEN 8 /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_HDR_LEN 4 #define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0 #define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8 #define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */ @@ -4981,40 +6000,56 @@ /* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */ #define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_LEN 4 /* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */ #define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_LEN 4 /* The first 32-bit word to be written to the GPIO OUT register. */ #define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8 +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_LEN 4 /* The second 32-bit word to be written to the GPIO OUT register. */ #define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12 +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_LEN 4 /* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */ #define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_LEN 4 /* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */ #define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_LEN 4 /* The first 32-bit word to be written to the GPIO OUT ENABLE register. */ #define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8 +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_LEN 4 /* The second 32-bit word to be written to the GPIO OUT ENABLE register. */ #define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12 +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_LEN 4 /* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */ #define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_LEN 4 /* MC_CMD_MUM_IN_GPIO_OP msgrequest */ #define MC_CMD_MUM_IN_GPIO_OP_LEN 8 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_HDR_LEN 4 #define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8 #define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8 #define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */ @@ -5027,26 +6062,34 @@ /* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */ #define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_LEN 4 /* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */ #define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_LEN 4 #define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24 #define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8 /* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */ #define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_LEN 4 #define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24 #define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8 /* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */ #define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_LEN 4 #define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24 #define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8 @@ -5054,7 +6097,9 @@ #define MC_CMD_MUM_IN_READ_SENSORS_LEN 8 /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4 +#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_LEN 4 #define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0 #define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8 #define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8 @@ -5064,13 +6109,16 @@ #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12 /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ /* Bit-mask of clocks to be programmed */ #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_LEN 4 #define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */ #define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */ #define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */ /* Control flags for clock programming */ #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_LEN 4 #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0 #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1 #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1 @@ -5082,19 +6130,24 @@ #define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8 /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ /* Enable/Disable FPGA config from flash */ #define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4 +#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_LEN 4 /* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */ #define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4 /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ /* MC_CMD_MUM_IN_QSFP msgrequest */ #define MC_CMD_MUM_IN_QSFP_LEN 12 /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_QSFP_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_HDR_LEN 4 #define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0 #define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4 #define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */ @@ -5104,52 +6157,77 @@ #define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */ #define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */ #define MC_CMD_MUM_IN_QSFP_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_IDX_LEN 4 /* MC_CMD_MUM_IN_QSFP_INIT msgrequest */ #define MC_CMD_MUM_IN_QSFP_INIT_LEN 16 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_INIT_HDR_LEN 4 #define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_INIT_IDX_LEN 4 #define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12 +#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_LEN 4 /* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */ #define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_LEN 4 #define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_LEN 4 #define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_LEN 4 #define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_LEN 4 #define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_LEN 4 /* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */ #define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_LEN 4 #define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_LEN 4 /* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */ #define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_LEN 4 #define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_LEN 4 #define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_LEN 4 /* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */ #define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_LEN 4 #define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_LEN 4 /* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */ #define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_LEN 4 #define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_LEN 4 /* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */ #define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4 /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ /* MC_CMD_MUM_OUT msgresponse */ #define MC_CMD_MUM_OUT_LEN 0 @@ -5160,6 +6238,7 @@ /* MC_CMD_MUM_OUT_GET_VERSION msgresponse */ #define MC_CMD_MUM_OUT_GET_VERSION_LEN 12 #define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0 +#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_LEN 4 #define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4 #define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8 #define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4 @@ -5197,8 +6276,10 @@ #define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8 /* The first 32-bit word read from the GPIO IN register. */ #define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_LEN 4 /* The second 32-bit word read from the GPIO IN register. */ #define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4 +#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_LEN 4 /* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */ #define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0 @@ -5207,8 +6288,10 @@ #define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8 /* The first 32-bit word read from the GPIO OUT register. */ #define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_LEN 4 /* The second 32-bit word read from the GPIO OUT register. */ #define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4 +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_LEN 4 /* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */ #define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0 @@ -5216,11 +6299,14 @@ /* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */ #define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8 #define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_LEN 4 #define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4 +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_LEN 4 /* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */ #define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4 #define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_LEN 4 /* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */ #define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0 @@ -5249,6 +6335,7 @@ /* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */ #define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4 #define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0 +#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_LEN 4 /* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */ #define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0 @@ -5256,6 +6343,7 @@ /* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */ #define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4 #define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0 +#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_LEN 4 /* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */ #define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0 @@ -5263,7 +6351,9 @@ /* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */ #define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8 #define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_LEN 4 #define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_LEN 4 #define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0 #define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1 #define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1 @@ -5272,6 +6362,7 @@ /* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */ #define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4 #define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_LEN 4 /* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */ #define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5 @@ -5279,6 +6370,7 @@ #define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num)) /* in bytes */ #define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_LEN 4 #define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4 #define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1 #define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1 @@ -5287,11 +6379,14 @@ /* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */ #define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8 #define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_LEN 4 #define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_LEN 4 /* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */ #define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4 #define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_LEN 4 /* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */ #define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24 @@ -5299,12 +6394,14 @@ #define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num)) /* Discrete (soldered) DDR resistor strap info */ #define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_LEN 4 #define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0 #define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16 #define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16 #define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16 /* Number of SODIMM info records */ #define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_LEN 4 /* Array of SODIMM info records */ #define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8 #define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8 @@ -5365,6 +6462,7 @@ /* EVB_PORT_ID structuredef */ #define EVB_PORT_ID_LEN 4 #define EVB_PORT_ID_PORT_ID_OFST 0 +#define EVB_PORT_ID_PORT_ID_LEN 4 /* enum: An invalid port handle. */ #define EVB_PORT_ID_NULL 0x0 /* enum: The port assigned to this function.. */ @@ -5460,6 +6558,10 @@ #define NVRAM_PARTITION_TYPE_FC_LOG 0xb04 /* enum: MUM firmware partition */ #define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00 +/* enum: SUC firmware partition (this is intentionally an alias of + * MUM_FIRMWARE) + */ +#define NVRAM_PARTITION_TYPE_SUC_FIRMWARE 0xc00 /* enum: MUM Non-volatile log output partition. */ #define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01 /* enum: MUM Application table partition. */ @@ -5474,8 +6576,8 @@ #define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06 /* enum: UEFI expansion ROM if separate from PXE */ #define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00 -/* enum: Spare partition 0 */ -#define NVRAM_PARTITION_TYPE_SPARE_0 0x1000 +/* enum: Used by the expansion ROM for logging */ +#define NVRAM_PARTITION_TYPE_PXE_LOG 0x1000 /* enum: Used for XIP code of shmbooted images */ #define NVRAM_PARTITION_TYPE_XIP_SCRATCH 0x1100 /* enum: Spare partition 2 */ @@ -5488,6 +6590,27 @@ #define NVRAM_PARTITION_TYPE_SPARE_4 0x1400 /* enum: Spare partition 5 */ #define NVRAM_PARTITION_TYPE_SPARE_5 0x1500 +/* enum: Partition for reporting MC status. See mc_flash_layout.h + * medford_mc_status_hdr_t for layout on Medford. + */ +#define NVRAM_PARTITION_TYPE_STATUS 0x1600 +/* enum: Spare partition 13 */ +#define NVRAM_PARTITION_TYPE_SPARE_13 0x1700 +/* enum: Spare partition 14 */ +#define NVRAM_PARTITION_TYPE_SPARE_14 0x1800 +/* enum: Spare partition 15 */ +#define NVRAM_PARTITION_TYPE_SPARE_15 0x1900 +/* enum: Spare partition 16 */ +#define NVRAM_PARTITION_TYPE_SPARE_16 0x1a00 +/* enum: Factory defaults for dynamic configuration */ +#define NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS 0x1b00 +/* enum: Factory defaults for expansion ROM configuration */ +#define NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS 0x1c00 +/* enum: Field Replaceable Unit inventory information for use on IPMI + * platforms. See SF-119124-PS. The STATIC_CONFIG partition may contain a + * subset of the information stored in this partition. + */ +#define NVRAM_PARTITION_TYPE_FRU_INFORMATION 0x1d00 /* enum: Start of reserved value range (firmware may use for any purpose) */ #define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00 /* enum: End of reserved value range (firmware may use for any purpose) */ @@ -5502,6 +6625,7 @@ /* LICENSED_APP_ID structuredef */ #define LICENSED_APP_ID_LEN 4 #define LICENSED_APP_ID_ID_OFST 0 +#define LICENSED_APP_ID_ID_LEN 4 /* enum: OpenOnload */ #define LICENSED_APP_ID_ONLOAD 0x1 /* enum: PTP timestamping */ @@ -5526,6 +6650,14 @@ #define LICENSED_APP_ID_SOLARCAPTURE_TAP 0x400 /* enum: Capture SolarSystem 40G */ #define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_40G 0x800 +/* enum: Capture SolarSystem 1G */ +#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_1G 0x1000 +/* enum: ScaleOut Onload */ +#define LICENSED_APP_ID_SCALEOUT_ONLOAD 0x2000 +/* enum: SCS Network Analytics Dashboard */ +#define LICENSED_APP_ID_DSHBRD 0x4000 +/* enum: SolarCapture Trading Analytics */ +#define LICENSED_APP_ID_SCATRD 0x8000 #define LICENSED_APP_ID_ID_LBN 0 #define LICENSED_APP_ID_ID_WIDTH 32 @@ -5590,6 +6722,14 @@ #define LICENSED_V3_APPS_SOLARCAPTURE_TAP_WIDTH 1 #define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_LBN 11 #define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1 +#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_LBN 13 +#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_WIDTH 1 +#define LICENSED_V3_APPS_DSHBRD_LBN 14 +#define LICENSED_V3_APPS_DSHBRD_WIDTH 1 +#define LICENSED_V3_APPS_SCATRD_LBN 15 +#define LICENSED_V3_APPS_SCATRD_WIDTH 1 #define LICENSED_V3_APPS_MASK_LBN 0 #define LICENSED_V3_APPS_MASK_WIDTH 64 @@ -5636,6 +6776,18 @@ #define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1 /* enum: This is a TX completion event, not a timestamp */ #define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0 +/* enum: This is a TX completion event for a CTPIO transmit. The event format + * is the same as for TX_EV_COMPLETION. + */ +#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_COMPLETION 0x11 +/* enum: This is the low part of a TX timestamp for a CTPIO transmission. The + * event format is the same as for TX_EV_TSTAMP_LO + */ +#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_LO 0x12 +/* enum: This is the high part of a TX timestamp for a CTPIO transmission. The + * event format is the same as for TX_EV_TSTAMP_HI + */ +#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_HI 0x13 /* enum: This is the low part of a TX timestamp event */ #define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51 /* enum: This is the high part of a TX timestamp event */ @@ -5669,6 +6821,19 @@ #define RSS_MODE_HASH_SELECTOR_LBN 0 #define RSS_MODE_HASH_SELECTOR_WIDTH 8 +/* CTPIO_STATS_MAP structuredef */ +#define CTPIO_STATS_MAP_LEN 4 +/* The (function relative) VI number */ +#define CTPIO_STATS_MAP_VI_OFST 0 +#define CTPIO_STATS_MAP_VI_LEN 2 +#define CTPIO_STATS_MAP_VI_LBN 0 +#define CTPIO_STATS_MAP_VI_WIDTH 16 +/* The target bucket for the VI */ +#define CTPIO_STATS_MAP_BUCKET_OFST 2 +#define CTPIO_STATS_MAP_BUCKET_LEN 2 +#define CTPIO_STATS_MAP_BUCKET_LBN 16 +#define CTPIO_STATS_MAP_BUCKET_WIDTH 16 + /***********************************/ /* MC_CMD_READ_REGS @@ -5676,7 +6841,7 @@ */ #define MC_CMD_READ_REGS 0x50 -#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_READ_REGS_IN msgrequest */ #define MC_CMD_READ_REGS_IN_LEN 0 @@ -5709,17 +6874,22 @@ #define MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num)) /* Size, in entries */ #define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0 +#define MC_CMD_INIT_EVQ_IN_SIZE_LEN 4 /* Desired instance. Must be set to a specific instance, which is a function * local queue index. */ #define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4 +#define MC_CMD_INIT_EVQ_IN_INSTANCE_LEN 4 /* The initial timer value. The load value is ignored if the timer mode is DIS. */ #define MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8 +#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_LEN 4 /* The reload value is ignored in one-shot modes */ #define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12 +#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_LEN 4 /* tbd */ #define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAGS_LEN 4 #define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0 #define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1 #define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1 @@ -5735,6 +6905,7 @@ #define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6 #define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1 #define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20 +#define MC_CMD_INIT_EVQ_IN_TMR_MODE_LEN 4 /* enum: Disabled */ #define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0 /* enum: Immediate */ @@ -5745,13 +6916,16 @@ #define MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3 /* Target EVQ for wakeups if in wakeup mode. */ #define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24 +#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_LEN 4 /* Target interrupt if in interrupting mode (note union with target EVQ). Use * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test * purposes. */ #define MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24 +#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_LEN 4 /* Event Counter Mode. */ #define MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28 +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_LEN 4 /* enum: Disabled */ #define MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0 /* enum: Disabled */ @@ -5762,6 +6936,7 @@ #define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3 /* Event queue packet count threshold. */ #define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32 +#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_LEN 4 /* 64-bit address of 4k of 4k-aligned host memory buffer */ #define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36 #define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8 @@ -5774,6 +6949,7 @@ #define MC_CMD_INIT_EVQ_OUT_LEN 4 /* Only valid if INTRFLAG was true */ #define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0 +#define MC_CMD_INIT_EVQ_OUT_IRQ_LEN 4 /* MC_CMD_INIT_EVQ_V2_IN msgrequest */ #define MC_CMD_INIT_EVQ_V2_IN_LENMIN 44 @@ -5781,17 +6957,22 @@ #define MC_CMD_INIT_EVQ_V2_IN_LEN(num) (36+8*(num)) /* Size, in entries */ #define MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0 +#define MC_CMD_INIT_EVQ_V2_IN_SIZE_LEN 4 /* Desired instance. Must be set to a specific instance, which is a function * local queue index. */ #define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4 +#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_LEN 4 /* The initial timer value. The load value is ignored if the timer mode is DIS. */ #define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_OFST 8 +#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_LEN 4 /* The reload value is ignored in one-shot modes */ #define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_OFST 12 +#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_LEN 4 /* tbd */ #define MC_CMD_INIT_EVQ_V2_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_LEN 4 #define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_LBN 0 #define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_WIDTH 1 #define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_LBN 1 @@ -5828,6 +7009,7 @@ */ #define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO 0x3 #define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_OFST 20 +#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_LEN 4 /* enum: Disabled */ #define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS 0x0 /* enum: Immediate */ @@ -5838,13 +7020,16 @@ #define MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF 0x3 /* Target EVQ for wakeups if in wakeup mode. */ #define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_OFST 24 +#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_LEN 4 /* Target interrupt if in interrupting mode (note union with target EVQ). Use * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test * purposes. */ #define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_OFST 24 +#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_LEN 4 /* Event Counter Mode. */ #define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_OFST 28 +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_LEN 4 /* enum: Disabled */ #define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS 0x0 /* enum: Disabled */ @@ -5855,6 +7040,7 @@ #define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RXTX 0x3 /* Event queue packet count threshold. */ #define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_OFST 32 +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_LEN 4 /* 64-bit address of 4k of 4k-aligned host memory buffer */ #define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36 #define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8 @@ -5867,8 +7053,10 @@ #define MC_CMD_INIT_EVQ_V2_OUT_LEN 8 /* Only valid if INTRFLAG was true */ #define MC_CMD_INIT_EVQ_V2_OUT_IRQ_OFST 0 +#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_LEN 4 /* Actual configuration applied on the card */ #define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_OFST 4 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_LEN 4 #define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_LBN 0 #define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_WIDTH 1 #define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_LBN 1 @@ -5916,17 +7104,22 @@ #define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num)) /* Size, in entries */ #define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0 +#define MC_CMD_INIT_RXQ_IN_SIZE_LEN 4 /* The EVQ to send events to. This is an index originally specified to INIT_EVQ */ #define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_LEN 4 /* The value to put in the event data. Check hardware spec. for valid range. */ #define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_IN_LABEL_LEN 4 /* Desired instance. Must be set to a specific instance, which is a function * local queue index. */ #define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_IN_INSTANCE_LEN 4 /* There will be more flags here. */ #define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_IN_FLAGS_LEN 4 #define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0 #define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1 #define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1 @@ -5945,8 +7138,10 @@ #define MC_CMD_INIT_RXQ_IN_UNUSED_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_IN_OWNER_ID_LEN 4 /* The port ID associated with the v-adaptor which should contain this DMAQ. */ #define MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_IN_PORT_ID_LEN 4 /* 64-bit address of 4k of 4k-aligned host memory buffer */ #define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28 #define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8 @@ -5961,17 +7156,22 @@ #define MC_CMD_INIT_RXQ_EXT_IN_LEN 544 /* Size, in entries */ #define MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0 +#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_LEN 4 /* The EVQ to send events to. This is an index originally specified to INIT_EVQ */ #define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_LEN 4 /* The value to put in the event data. Check hardware spec. for valid range. */ #define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_LEN 4 /* Desired instance. Must be set to a specific instance, which is a function * local queue index. */ #define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_LEN 4 /* There will be more flags here. */ #define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_LEN 4 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1 @@ -6007,8 +7207,10 @@ #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_LEN 4 /* The port ID associated with the v-adaptor which should contain this DMAQ. */ #define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_LEN 4 /* 64-bit address of 4k of 4k-aligned host memory buffer */ #define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28 #define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8 @@ -6017,6 +7219,7 @@ #define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64 /* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */ #define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540 +#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_LEN 4 /* MC_CMD_INIT_RXQ_OUT msgresponse */ #define MC_CMD_INIT_RXQ_OUT_LEN 0 @@ -6040,18 +7243,23 @@ #define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num)) /* Size, in entries */ #define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0 +#define MC_CMD_INIT_TXQ_IN_SIZE_LEN 4 /* The EVQ to send events to. This is an index originally specified to * INIT_EVQ. */ #define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_LEN 4 /* The value to put in the event data. Check hardware spec. for valid range. */ #define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8 +#define MC_CMD_INIT_TXQ_IN_LABEL_LEN 4 /* Desired instance. Must be set to a specific instance, which is a function * local queue index. */ #define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_TXQ_IN_INSTANCE_LEN 4 /* There will be more flags here. */ #define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAGS_LEN 4 #define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0 #define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1 #define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1 @@ -6072,8 +7280,10 @@ #define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_TXQ_IN_OWNER_ID_LEN 4 /* The port ID associated with the v-adaptor which should contain this DMAQ. */ #define MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_TXQ_IN_PORT_ID_LEN 4 /* 64-bit address of 4k of 4k-aligned host memory buffer */ #define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28 #define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8 @@ -6088,18 +7298,23 @@ #define MC_CMD_INIT_TXQ_EXT_IN_LEN 544 /* Size, in entries */ #define MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0 +#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_LEN 4 /* The EVQ to send events to. This is an index originally specified to * INIT_EVQ. */ #define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_LEN 4 /* The value to put in the event data. Check hardware spec. for valid range. */ #define MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8 +#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_LEN 4 /* Desired instance. Must be set to a specific instance, which is a function * local queue index. */ #define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_LEN 4 /* There will be more flags here. */ #define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_LEN 4 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1 @@ -6122,10 +7337,14 @@ #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_LBN 13 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_LBN 14 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_LEN 4 /* The port ID associated with the v-adaptor which should contain this DMAQ. */ #define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_LEN 4 /* 64-bit address of 4k of 4k-aligned host memory buffer */ #define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28 #define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8 @@ -6135,6 +7354,7 @@ #define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64 /* Flags related to Qbb flow control mode. */ #define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_LEN 4 #define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0 #define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1 #define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1 @@ -6161,6 +7381,7 @@ * passed to INIT_EVQ */ #define MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0 +#define MC_CMD_FINI_EVQ_IN_INSTANCE_LEN 4 /* MC_CMD_FINI_EVQ_OUT msgresponse */ #define MC_CMD_FINI_EVQ_OUT_LEN 0 @@ -6178,6 +7399,7 @@ #define MC_CMD_FINI_RXQ_IN_LEN 4 /* Instance of RXQ to destroy */ #define MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0 +#define MC_CMD_FINI_RXQ_IN_INSTANCE_LEN 4 /* MC_CMD_FINI_RXQ_OUT msgresponse */ #define MC_CMD_FINI_RXQ_OUT_LEN 0 @@ -6195,6 +7417,7 @@ #define MC_CMD_FINI_TXQ_IN_LEN 4 /* Instance of TXQ to destroy */ #define MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0 +#define MC_CMD_FINI_TXQ_IN_INSTANCE_LEN 4 /* MC_CMD_FINI_TXQ_OUT msgresponse */ #define MC_CMD_FINI_TXQ_OUT_LEN 0 @@ -6212,6 +7435,7 @@ #define MC_CMD_DRIVER_EVENT_IN_LEN 12 /* Handle of target EVQ */ #define MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0 +#define MC_CMD_DRIVER_EVENT_IN_EVQ_LEN 4 /* Bits 0 - 63 of event */ #define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4 #define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8 @@ -6237,6 +7461,7 @@ #define MC_CMD_PROXY_CMD_IN_LEN 4 /* The handle of the target function. */ #define MC_CMD_PROXY_CMD_IN_TARGET_OFST 0 +#define MC_CMD_PROXY_CMD_IN_TARGET_LEN 4 #define MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0 #define MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16 #define MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16 @@ -6252,6 +7477,7 @@ #define MC_PROXY_STATUS_BUFFER_LEN 16 /* Handle allocated by the firmware for this proxy transaction */ #define MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0 +#define MC_PROXY_STATUS_BUFFER_HANDLE_LEN 4 /* enum: An invalid handle. */ #define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0 #define MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0 @@ -6282,6 +7508,7 @@ * elevated privilege mask granted to the requesting function. */ #define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12 +#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LEN 4 #define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96 #define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32 @@ -6298,6 +7525,7 @@ /* MC_CMD_PROXY_CONFIGURE_IN msgrequest */ #define MC_CMD_PROXY_CONFIGURE_IN_LEN 108 #define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0 +#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_LEN 4 #define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0 #define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS @@ -6309,6 +7537,7 @@ #define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8 /* Must be a power of 2 */ #define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12 +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_LEN 4 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS * of blocks, each of the size REPLY_BLOCK_SIZE. */ @@ -6318,6 +7547,7 @@ #define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20 /* Must be a power of 2 */ #define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24 +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_LEN 4 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if * host intends to complete proxied operations by using MC_CMD_PROXY_CMD. @@ -6328,8 +7558,10 @@ #define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32 /* Must be a power of 2, or zero if this buffer is not provided */ #define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36 +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_LEN 4 /* Applies to all three buffers */ #define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40 +#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_LEN 4 /* A bit mask defining which MCDI operations may be proxied */ #define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44 #define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64 @@ -6337,6 +7569,7 @@ /* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */ #define MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112 #define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_LEN 4 #define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0 #define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS @@ -6348,6 +7581,7 @@ #define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8 /* Must be a power of 2 */ #define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_LEN 4 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS * of blocks, each of the size REPLY_BLOCK_SIZE. */ @@ -6357,6 +7591,7 @@ #define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20 /* Must be a power of 2 */ #define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_LEN 4 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if * host intends to complete proxied operations by using MC_CMD_PROXY_CMD. @@ -6367,12 +7602,15 @@ #define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32 /* Must be a power of 2, or zero if this buffer is not provided */ #define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_LEN 4 /* Applies to all three buffers */ #define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_LEN 4 /* A bit mask defining which MCDI operations may be proxied */ #define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44 #define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64 #define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_LEN 4 /* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */ #define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0 @@ -6392,7 +7630,9 @@ /* MC_CMD_PROXY_COMPLETE_IN msgrequest */ #define MC_CMD_PROXY_COMPLETE_IN_LEN 12 #define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0 +#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_LEN 4 #define MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4 +#define MC_CMD_PROXY_COMPLETE_IN_STATUS_LEN 4 /* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply * is stored in the REPLY_BUFF. */ @@ -6408,6 +7648,7 @@ */ #define MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3 #define MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8 +#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_LEN 4 /* MC_CMD_PROXY_COMPLETE_OUT msgresponse */ #define MC_CMD_PROXY_COMPLETE_OUT_LEN 0 @@ -6427,17 +7668,22 @@ #define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8 /* Owner ID to use */ #define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_LEN 4 /* Size of buffer table pages to use, in bytes (note that only a few values are * legal on any specific hardware). */ #define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_LEN 4 /* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */ #define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12 #define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_LEN 4 #define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_LEN 4 /* Buffer table IDs for use in DMA descriptors. */ #define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_LEN 4 /***********************************/ @@ -6453,10 +7699,13 @@ #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268 #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num)) #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_LEN 4 /* ID */ #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4 /* Num entries */ #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4 /* Buffer table entry address */ #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12 #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8 @@ -6479,48 +7728,11 @@ /* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */ #define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4 #define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0 +#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_LEN 4 /* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */ #define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0 -/* PORT_CONFIG_ENTRY structuredef */ -#define PORT_CONFIG_ENTRY_LEN 16 -/* External port number (label) */ -#define PORT_CONFIG_ENTRY_EXT_NUMBER_OFST 0 -#define PORT_CONFIG_ENTRY_EXT_NUMBER_LEN 1 -#define PORT_CONFIG_ENTRY_EXT_NUMBER_LBN 0 -#define PORT_CONFIG_ENTRY_EXT_NUMBER_WIDTH 8 -/* Port core location */ -#define PORT_CONFIG_ENTRY_CORE_OFST 1 -#define PORT_CONFIG_ENTRY_CORE_LEN 1 -#define PORT_CONFIG_ENTRY_STANDALONE 0x0 /* enum */ -#define PORT_CONFIG_ENTRY_MASTER 0x1 /* enum */ -#define PORT_CONFIG_ENTRY_SLAVE 0x2 /* enum */ -#define PORT_CONFIG_ENTRY_CORE_LBN 8 -#define PORT_CONFIG_ENTRY_CORE_WIDTH 8 -/* Internal number (HW resource) relative to the core */ -#define PORT_CONFIG_ENTRY_INT_NUMBER_OFST 2 -#define PORT_CONFIG_ENTRY_INT_NUMBER_LEN 1 -#define PORT_CONFIG_ENTRY_INT_NUMBER_LBN 16 -#define PORT_CONFIG_ENTRY_INT_NUMBER_WIDTH 8 -/* Reserved */ -#define PORT_CONFIG_ENTRY_RSVD_OFST 3 -#define PORT_CONFIG_ENTRY_RSVD_LEN 1 -#define PORT_CONFIG_ENTRY_RSVD_LBN 24 -#define PORT_CONFIG_ENTRY_RSVD_WIDTH 8 -/* Bitmask of KR lanes used by the port */ -#define PORT_CONFIG_ENTRY_LANES_OFST 4 -#define PORT_CONFIG_ENTRY_LANES_LBN 32 -#define PORT_CONFIG_ENTRY_LANES_WIDTH 32 -/* Port capabilities (MC_CMD_PHY_CAP_*) */ -#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_OFST 8 -#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_LBN 64 -#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_WIDTH 32 -/* Reserved (align to 16 bytes) */ -#define PORT_CONFIG_ENTRY_RSVD2_OFST 12 -#define PORT_CONFIG_ENTRY_RSVD2_LBN 96 -#define PORT_CONFIG_ENTRY_RSVD2_WIDTH 32 - /***********************************/ /* MC_CMD_FILTER_OP @@ -6534,6 +7746,7 @@ #define MC_CMD_FILTER_OP_IN_LEN 108 /* identifies the type of operation requested */ #define MC_CMD_FILTER_OP_IN_OP_OFST 0 +#define MC_CMD_FILTER_OP_IN_OP_LEN 4 /* enum: single-recipient filter insert */ #define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0 /* enum: single-recipient filter remove */ @@ -6554,8 +7767,10 @@ /* The port ID associated with the v-adaptor which should contain this filter. */ #define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12 +#define MC_CMD_FILTER_OP_IN_PORT_ID_LEN 4 /* fields to include in match criteria */ #define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_LEN 4 #define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0 #define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1 #define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1 @@ -6586,6 +7801,7 @@ #define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 /* receive destination */ #define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20 +#define MC_CMD_FILTER_OP_IN_RX_DEST_LEN 4 /* enum: drop packets */ #define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0 /* enum: receive to host */ @@ -6598,8 +7814,10 @@ #define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4 /* receive queue handle (for multiple queue modes, this is the base queue) */ #define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24 +#define MC_CMD_FILTER_OP_IN_RX_QUEUE_LEN 4 /* receive mode */ #define MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28 +#define MC_CMD_FILTER_OP_IN_RX_MODE_LEN 4 /* enum: receive to just the specified queue */ #define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0 /* enum: receive to multiple queues using RSS context */ @@ -6614,13 +7832,16 @@ * MC_CMD_DOT1P_MAPPING_ALLOC. */ #define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32 +#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_LEN 4 /* transmit domain (reserved; set to 0) */ #define MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36 +#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_LEN 4 /* transmit destination (either set the MAC and/or PM bits for explicit * control, or set this field to TX_DEST_DEFAULT for sensible default * behaviour) */ #define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40 +#define MC_CMD_FILTER_OP_IN_TX_DEST_LEN 4 /* enum: request default behaviour (based on filter type) */ #define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff #define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0 @@ -6653,8 +7874,10 @@ #define MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2 /* Firmware defined register 0 to match (reserved; set to 0) */ #define MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68 +#define MC_CMD_FILTER_OP_IN_FWDEF0_LEN 4 /* Firmware defined register 1 to match (reserved; set to 0) */ #define MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72 +#define MC_CMD_FILTER_OP_IN_FWDEF1_LEN 4 /* source IP address to match (as bytes in network order; set last 12 bytes to * 0 for IPv4 address) */ @@ -6673,6 +7896,7 @@ #define MC_CMD_FILTER_OP_EXT_IN_LEN 172 /* identifies the type of operation requested */ #define MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0 +#define MC_CMD_FILTER_OP_EXT_IN_OP_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_FILTER_OP_IN/OP */ /* filter handle (for remove / unsubscribe operations) */ @@ -6683,8 +7907,10 @@ /* The port ID associated with the v-adaptor which should contain this filter. */ #define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12 +#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_LEN 4 /* fields to include in match criteria */ #define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_LEN 4 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1 @@ -6743,6 +7969,7 @@ #define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 /* receive destination */ #define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20 +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_LEN 4 /* enum: drop packets */ #define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0 /* enum: receive to host */ @@ -6755,8 +7982,10 @@ #define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4 /* receive queue handle (for multiple queue modes, this is the base queue) */ #define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24 +#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_LEN 4 /* receive mode */ #define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28 +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_LEN 4 /* enum: receive to just the specified queue */ #define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0 /* enum: receive to multiple queues using RSS context */ @@ -6771,13 +8000,16 @@ * MC_CMD_DOT1P_MAPPING_ALLOC. */ #define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32 +#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_LEN 4 /* transmit domain (reserved; set to 0) */ #define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_LEN 4 /* transmit destination (either set the MAC and/or PM bits for explicit * control, or set this field to TX_DEST_DEFAULT for sensible default * behaviour) */ #define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_LEN 4 /* enum: request default behaviour (based on filter type) */ #define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff #define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0 @@ -6810,11 +8042,13 @@ #define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2 /* Firmware defined register 0 to match (reserved; set to 0) */ #define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68 +#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_LEN 4 /* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP * protocol is GRE) to match (as bytes in network order; set last byte to 0 for * VXLAN/NVGRE, or 1 for Geneve) */ #define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_LEN 4 #define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0 #define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24 #define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24 @@ -6880,10 +8114,12 @@ * to 0) */ #define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_LEN 4 /* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set * to 0) */ #define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_LEN 4 /* VXLAN/NVGRE inner frame source IP address to match (as bytes in network * order; set last 12 bytes to 0 for IPv4 address) */ @@ -6899,6 +8135,7 @@ #define MC_CMD_FILTER_OP_OUT_LEN 12 /* identifies the type of operation requested */ #define MC_CMD_FILTER_OP_OUT_OP_OFST 0 +#define MC_CMD_FILTER_OP_OUT_OP_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_FILTER_OP_IN/OP */ /* Returned filter handle (for insert / subscribe operations). Note that these @@ -6918,6 +8155,7 @@ #define MC_CMD_FILTER_OP_EXT_OUT_LEN 12 /* identifies the type of operation requested */ #define MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0 +#define MC_CMD_FILTER_OP_EXT_OUT_OP_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_FILTER_OP_EXT_IN/OP */ /* Returned filter handle (for insert / subscribe operations). Note that these @@ -6944,6 +8182,7 @@ #define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4 /* identifies the type of operation requested */ #define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_LEN 4 /* enum: read the list of supported RX filter matches */ #define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1 /* enum: read flags indicating restrictions on filter insertion for the calling @@ -6966,10 +8205,12 @@ #define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num)) /* identifies the type of operation requested */ #define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */ /* number of supported match types */ #define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_LEN 4 /* array of supported match types (valid MATCH_FIELDS values for * MC_CMD_FILTER_OP) sorted in decreasing priority order */ @@ -6982,10 +8223,12 @@ #define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8 /* identifies the type of operation requested */ #define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */ /* bitfield of filter insertion restrictions */ #define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_LEN 4 #define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0 #define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1 @@ -7005,11 +8248,16 @@ #define MC_CMD_PARSER_DISP_RW_IN_LEN 32 /* identifies the target of the operation */ #define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0 +#define MC_CMD_PARSER_DISP_RW_IN_TARGET_LEN 4 /* enum: RX dispatcher CPU */ #define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0 /* enum: TX dispatcher CPU */ #define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1 -/* enum: Lookup engine (with original metadata format) */ +/* enum: Lookup engine (with original metadata format). Deprecated; used only + * by cmdclient as a fallback for very old Huntington firmware, and not + * supported in firmware beyond v6.4.0.1005. Use LUE_VERSIONED_METADATA + * instead. + */ #define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2 /* enum: Lookup engine (with requested metadata format) */ #define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3 @@ -7021,26 +8269,33 @@ #define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5 /* identifies the type of operation requested */ #define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4 -/* enum: read a word of DICPU DMEM or a LUE entry */ +#define MC_CMD_PARSER_DISP_RW_IN_OP_LEN 4 +/* enum: Read a word of DICPU DMEM or a LUE entry */ #define MC_CMD_PARSER_DISP_RW_IN_READ 0x0 -/* enum: write a word of DICPU DMEM or a LUE entry */ +/* enum: Write a word of DICPU DMEM or a LUE entry. */ #define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1 -/* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */ +/* enum: Read-modify-write a word of DICPU DMEM (not valid for LUE). */ #define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2 /* data memory address (DICPU targets) or LUE index (LUE targets) */ #define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8 +#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_LEN 4 /* selector (for MISC_STATE target) */ #define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8 +#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_LEN 4 /* enum: Port to datapath mapping */ #define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1 /* value to write (for DMEM writes) */ #define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12 +#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_LEN 4 /* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */ #define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12 +#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_LEN 4 /* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */ #define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16 +#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_LEN 4 /* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */ #define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12 +#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_LEN 4 /* value to write (for LUE writes) */ #define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12 #define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20 @@ -7049,6 +8304,7 @@ #define MC_CMD_PARSER_DISP_RW_OUT_LEN 52 /* value read (for DMEM reads) */ #define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0 +#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_LEN 4 /* value read (for LUE reads) */ #define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0 #define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20 @@ -7093,6 +8349,7 @@ #define MC_CMD_SET_PF_COUNT_IN_LEN 4 /* New number of PFs on the device. */ #define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0 +#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_LEN 4 /* MC_CMD_SET_PF_COUNT_OUT msgresponse */ #define MC_CMD_SET_PF_COUNT_OUT_LEN 0 @@ -7113,6 +8370,7 @@ #define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4 /* Identifies the port assignment for this function. */ #define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0 +#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_LEN 4 /***********************************/ @@ -7127,6 +8385,7 @@ #define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4 /* Identifies the port assignment for this function. */ #define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0 +#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_LEN 4 /* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */ #define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0 @@ -7144,8 +8403,10 @@ #define MC_CMD_ALLOC_VIS_IN_LEN 8 /* The minimum number of VIs that is acceptable */ #define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0 +#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_LEN 4 /* The maximum number of VIs that would be useful */ #define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4 +#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_LEN 4 /* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request. * Use extended version in new code. @@ -7153,21 +8414,26 @@ #define MC_CMD_ALLOC_VIS_OUT_LEN 8 /* The number of VIs allocated on this function */ #define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0 +#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_LEN 4 /* The base absolute VI number allocated to this function. Required to * correctly interpret wakeup events. */ #define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4 +#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_LEN 4 /* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */ #define MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12 /* The number of VIs allocated on this function */ #define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0 +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_LEN 4 /* The base absolute VI number allocated to this function. Required to * correctly interpret wakeup events. */ #define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4 +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_LEN 4 /* Function's port vi_shift value (always 0 on Huntington) */ #define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8 +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_LEN 4 /***********************************/ @@ -7201,15 +8467,20 @@ #define MC_CMD_GET_SRIOV_CFG_OUT_LEN 20 /* Number of VFs currently enabled. */ #define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_LEN 4 /* Max number of VFs before sriov stride and offset may need to be changed. */ #define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_LEN 4 #define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8 +#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_LEN 4 #define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0 #define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1 /* RID offset of first VF from PF. */ #define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_LEN 4 /* RID offset of each subsequent VF from the previous. */ #define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_LEN 4 /***********************************/ @@ -7224,19 +8495,24 @@ #define MC_CMD_SET_SRIOV_CFG_IN_LEN 20 /* Number of VFs currently enabled. */ #define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_LEN 4 /* Max number of VFs before sriov stride and offset may need to be changed. */ #define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_LEN 4 #define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8 +#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_LEN 4 #define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0 #define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1 /* RID offset of first VF from PF, or 0 for no change, or * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset. */ #define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_LEN 4 /* RID offset of each subsequent VF from the previous, 0 for no change, or * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride. */ #define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_LEN 4 /* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */ #define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0 @@ -7258,12 +8534,15 @@ #define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12 /* The number of VIs allocated on this function */ #define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0 +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_LEN 4 /* The base absolute VI number allocated to this function. Required to * correctly interpret wakeup events. */ #define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4 +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_LEN 4 /* Function's port vi_shift value (always 0 on Huntington) */ #define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8 +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_LEN 4 /***********************************/ @@ -7278,6 +8557,7 @@ #define MC_CMD_DUMP_VI_STATE_IN_LEN 4 /* The VI number to query. */ #define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0 +#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_LEN 4 /* MC_CMD_DUMP_VI_STATE_OUT msgresponse */ #define MC_CMD_DUMP_VI_STATE_OUT_LEN 96 @@ -7311,6 +8591,7 @@ #define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24 /* Combined metadata field. */ #define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4 #define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0 #define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16 #define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16 @@ -7392,6 +8673,7 @@ #define MC_CMD_ALLOC_PIOBUF_OUT_LEN 4 /* Handle for allocated push I/O buffer. */ #define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0 +#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_LEN 4 /***********************************/ @@ -7406,6 +8688,7 @@ #define MC_CMD_FREE_PIOBUF_IN_LEN 4 /* Handle for allocated push I/O buffer. */ #define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0 +#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_LEN 4 /* MC_CMD_FREE_PIOBUF_OUT msgresponse */ #define MC_CMD_FREE_PIOBUF_OUT_LEN 0 @@ -7423,6 +8706,7 @@ #define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4 /* VI number to get information for. */ #define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0 +#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4 /* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */ #define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4 @@ -7445,6 +8729,7 @@ #define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19 #define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1 #define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_LEN 4 /***********************************/ @@ -7459,6 +8744,7 @@ #define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8 /* VI number to set information for. */ #define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4 /* Transaction processing steering hint 1 for use with the Rx Queue. */ #define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4 #define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1 @@ -7478,6 +8764,7 @@ #define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51 #define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1 #define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_LEN 4 /* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */ #define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0 @@ -7494,6 +8781,7 @@ /* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */ #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4 /* enum: MISC. */ #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0 /* enum: IDO. */ @@ -7506,10 +8794,12 @@ /* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */ #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */ /* Amalgamated TLP info word. */ #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_LEN 4 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1 @@ -7557,10 +8847,12 @@ /* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */ #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */ /* Amalgamated TLP info word. */ #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_LEN 4 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0 @@ -7627,6 +8919,7 @@ * in a command from the host.) */ #define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_LEN 4 #define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */ #define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */ #define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */ @@ -7636,6 +8929,7 @@ * mc_flash_layout.h.) */ #define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_LEN 4 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ #define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ @@ -7672,12 +8966,14 @@ #define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff /* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */ #define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LEN 4 /* enum: Last chunk, containing checksum rather than data */ #define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff /* enum: Abort download of this item */ #define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe /* Length of this chunk in bytes */ #define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_LEN 4 /* Data for this chunk */ #define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16 #define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4 @@ -7688,8 +8984,10 @@ #define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8 /* Same as MC_CMD_ERR field, but included as 0 in success cases */ #define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_LEN 4 /* Extra status information */ #define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_LEN 4 /* enum: Code download OK, completed. */ #define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0 /* enum: Code download aborted as requested. */ @@ -7726,6 +9024,7 @@ #define MC_CMD_GET_CAPABILITIES_OUT_LEN 20 /* First word of flags. */ #define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_LEN 4 #define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3 #define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4 @@ -7793,6 +9092,8 @@ #define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1 /* enum: Packed stream RXDP firmware */ #define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_RULES_ENGINE 0x5 /* enum: BIST RXDP firmware */ #define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a /* enum: RXDP Test firmware image 1 */ @@ -7813,6 +9114,8 @@ #define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 /* enum: RXDP Test firmware image 9 */ #define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_SLOW 0x10c /* TxDPCPU firmware id. */ #define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6 #define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2 @@ -7822,6 +9125,8 @@ #define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1 /* enum: High packet rate TXDP firmware */ #define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_RULES_ENGINE 0x5 /* enum: BIST TXDP firmware */ #define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d /* enum: TXDP Test firmware image 1 */ @@ -7848,7 +9153,9 @@ * (Huntington development only) */ #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 -/* enum: Virtual switching (full feature) RX PD production firmware */ +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant RX PD firmware using PM rather than MAC * (Huntington development only) @@ -7864,6 +9171,8 @@ #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine RX PD production firmware */ #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: reserved value - do not use (bug69716) */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED_9 0x9 /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* enum: RX PD firmware parsing but not filtering network overlay tunnel @@ -7888,7 +9197,9 @@ * (Huntington development only) */ #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 -/* enum: Virtual switching (full feature) TX PD production firmware */ +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant TX PD firmware using PM rather than MAC * (Huntington development only) @@ -7901,12 +9212,16 @@ #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine TX PD production firmware */ #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: reserved value - do not use (bug69716) */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED_9 0x9 /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* Hardware capabilities of NIC */ #define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_LEN 4 /* Licensed capabilities */ #define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_LEN 4 /* MC_CMD_GET_CAPABILITIES_V2_IN msgrequest */ #define MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0 @@ -7915,6 +9230,7 @@ #define MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72 /* First word of flags. */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_LEN 4 #define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3 #define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4 @@ -7982,6 +9298,8 @@ #define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1 /* enum: Packed stream RXDP firmware */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_RULES_ENGINE 0x5 /* enum: BIST RXDP firmware */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a /* enum: RXDP Test firmware image 1 */ @@ -8002,6 +9320,8 @@ #define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 /* enum: RXDP Test firmware image 9 */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_SLOW 0x10c /* TxDPCPU firmware id. */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2 @@ -8011,6 +9331,8 @@ #define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1 /* enum: High packet rate TXDP firmware */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_RULES_ENGINE 0x5 /* enum: BIST TXDP firmware */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d /* enum: TXDP Test firmware image 1 */ @@ -8037,7 +9359,9 @@ * (Huntington development only) */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 -/* enum: Virtual switching (full feature) RX PD production firmware */ +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant RX PD firmware using PM rather than MAC * (Huntington development only) @@ -8053,6 +9377,8 @@ #define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine RX PD production firmware */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: reserved value - do not use (bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED_9 0x9 /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* enum: RX PD firmware parsing but not filtering network overlay tunnel @@ -8077,7 +9403,9 @@ * (Huntington development only) */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 -/* enum: Virtual switching (full feature) TX PD production firmware */ +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant TX PD firmware using PM rather than MAC * (Huntington development only) @@ -8090,14 +9418,19 @@ #define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine TX PD production firmware */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: reserved value - do not use (bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED_9 0x9 /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* Hardware capabilities of NIC */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_LEN 4 /* Licensed capabilities */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_LEN 4 /* Second word of flags. Not present on older firmware (check the length). */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_LEN 4 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1 @@ -8124,6 +9457,18 @@ #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 #define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 /* Number of FATSOv2 contexts per datapath supported by this NIC. Not present * on older firmware (check the length). */ @@ -8181,9 +9526,10 @@ #define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2 /* MC_CMD_GET_CAPABILITIES_V3_OUT msgresponse */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 73 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 76 /* First word of flags. */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_LEN 4 #define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3 #define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4 @@ -8251,6 +9597,8 @@ #define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1 /* enum: Packed stream RXDP firmware */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_RULES_ENGINE 0x5 /* enum: BIST RXDP firmware */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a /* enum: RXDP Test firmware image 1 */ @@ -8271,6 +9619,8 @@ #define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 /* enum: RXDP Test firmware image 9 */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_SLOW 0x10c /* TxDPCPU firmware id. */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_OFST 6 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_LEN 2 @@ -8280,6 +9630,8 @@ #define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1 /* enum: High packet rate TXDP firmware */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_RULES_ENGINE 0x5 /* enum: BIST TXDP firmware */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d /* enum: TXDP Test firmware image 1 */ @@ -8306,7 +9658,9 @@ * (Huntington development only) */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 -/* enum: Virtual switching (full feature) RX PD production firmware */ +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant RX PD firmware using PM rather than MAC * (Huntington development only) @@ -8322,6 +9676,8 @@ #define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine RX PD production firmware */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: reserved value - do not use (bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED_9 0x9 /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* enum: RX PD firmware parsing but not filtering network overlay tunnel @@ -8346,7 +9702,9 @@ * (Huntington development only) */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 -/* enum: Virtual switching (full feature) TX PD production firmware */ +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant TX PD firmware using PM rather than MAC * (Huntington development only) @@ -8359,14 +9717,19 @@ #define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine TX PD production firmware */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: reserved value - do not use (bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED_9 0x9 /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* Hardware capabilities of NIC */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_LEN 4 /* Licensed capabilities */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_LEN 4 /* Second word of flags. Not present on older firmware (check the length). */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_LEN 4 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1 @@ -8393,6 +9756,18 @@ #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 #define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 /* Number of FATSOv2 contexts per datapath supported by this NIC. Not present * on older firmware (check the length). */ @@ -8463,6 +9838,348 @@ #define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1 /* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 + +/* MC_CMD_GET_CAPABILITIES_V4_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_LEN 78 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: reserved value - do not use (bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED_9 0x9 +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: reserved value - do not use (bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED_9 0x9 +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present + * on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_LEN 2 /***********************************/ @@ -8502,6 +10219,7 @@ #define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4 /* the bucket id */ #define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0 +#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_LEN 4 /***********************************/ @@ -8516,6 +10234,7 @@ #define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4 /* the bucket id */ #define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0 +#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_LEN 4 /* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */ #define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0 @@ -8533,17 +10252,22 @@ #define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8 /* the bucket id */ #define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0 +#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_LEN 4 /* the rate in mbps */ #define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4 +#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_LEN 4 /* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */ #define MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12 /* the bucket id */ #define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0 +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_LEN 4 /* the rate in mbps */ #define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4 +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_LEN 4 /* the desired maximum fill level */ #define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8 +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_LEN 4 /* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */ #define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0 @@ -8561,10 +10285,13 @@ #define MC_CMD_TCM_TXQ_INIT_IN_LEN 28 /* the txq id */ #define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0 +#define MC_CMD_TCM_TXQ_INIT_IN_QID_LEN 4 /* the static priority associated with the txq */ #define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4 +#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_LEN 4 /* bitmask of the priority queues this txq is inserted into when inserted. */ #define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_LEN 4 #define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0 #define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1 #define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1 @@ -8573,25 +10300,32 @@ #define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1 /* the reaction point (RP) bucket */ #define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12 +#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_LEN 4 /* an already reserved bucket (typically set to bucket associated with outer * vswitch) */ #define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16 +#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_LEN 4 /* an already reserved bucket (typically set to bucket associated with inner * vswitch) */ #define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20 +#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_LEN 4 /* the min bucket (typically for ETS/minimum bandwidth) */ #define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24 +#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_LEN 4 /* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32 /* the txq id */ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_LEN 4 /* the static priority associated with the txq */ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_LEN 4 /* bitmask of the priority queues this txq is inserted into when inserted. */ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_LEN 4 #define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0 #define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1 #define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1 @@ -8600,18 +10334,23 @@ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1 /* the reaction point (RP) bucket */ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_LEN 4 /* an already reserved bucket (typically set to bucket associated with outer * vswitch) */ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_LEN 4 /* an already reserved bucket (typically set to bucket associated with inner * vswitch) */ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_LEN 4 /* the min bucket (typically for ETS/minimum bandwidth) */ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_LEN 4 /* the static priority associated with the txq */ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_LEN 4 /* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */ #define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0 @@ -8629,8 +10368,10 @@ #define MC_CMD_LINK_PIOBUF_IN_LEN 8 /* Handle for allocated push I/O buffer. */ #define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0 +#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_LEN 4 /* Function Local Instance (VI) number. */ #define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4 +#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4 /* MC_CMD_LINK_PIOBUF_OUT msgresponse */ #define MC_CMD_LINK_PIOBUF_OUT_LEN 0 @@ -8648,6 +10389,7 @@ #define MC_CMD_UNLINK_PIOBUF_IN_LEN 4 /* Function Local Instance (VI) number. */ #define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0 +#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4 /* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */ #define MC_CMD_UNLINK_PIOBUF_OUT_LEN 0 @@ -8665,8 +10407,10 @@ #define MC_CMD_VSWITCH_ALLOC_IN_LEN 16 /* The port to connect to the v-switch's upstream port. */ #define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 /* The type of v-switch to create. */ #define MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4 +#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_LEN 4 /* enum: VLAN */ #define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1 /* enum: VEB */ @@ -8679,6 +10423,7 @@ #define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5 /* Flags controlling v-port creation */ #define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8 +#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_LEN 4 #define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 #define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 /* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators, @@ -8689,6 +10434,7 @@ * v-ports with this number of tags. */ #define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12 +#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_LEN 4 /* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */ #define MC_CMD_VSWITCH_ALLOC_OUT_LEN 0 @@ -8706,6 +10452,7 @@ #define MC_CMD_VSWITCH_FREE_IN_LEN 4 /* The port to which the v-switch is connected. */ #define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_LEN 4 /* MC_CMD_VSWITCH_FREE_OUT msgresponse */ #define MC_CMD_VSWITCH_FREE_OUT_LEN 0 @@ -8725,6 +10472,7 @@ #define MC_CMD_VSWITCH_QUERY_IN_LEN 4 /* The port to which the v-switch is connected. */ #define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_LEN 4 /* MC_CMD_VSWITCH_QUERY_OUT msgresponse */ #define MC_CMD_VSWITCH_QUERY_OUT_LEN 0 @@ -8742,8 +10490,10 @@ #define MC_CMD_VPORT_ALLOC_IN_LEN 20 /* The port to which the v-switch is connected. */ #define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 /* The type of the new v-port. */ #define MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4 +#define MC_CMD_VPORT_ALLOC_IN_TYPE_LEN 4 /* enum: VLAN (obsolete) */ #define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1 /* enum: VEB (obsolete) */ @@ -8764,6 +10514,7 @@ #define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6 /* Flags controlling v-port creation */ #define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8 +#define MC_CMD_VPORT_ALLOC_IN_FLAGS_LEN 4 #define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 #define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 #define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1 @@ -8773,8 +10524,10 @@ * v-switch. */ #define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12 +#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_LEN 4 /* The actual VLAN tags to insert/remove */ #define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_LEN 4 #define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0 #define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16 #define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16 @@ -8784,6 +10537,7 @@ #define MC_CMD_VPORT_ALLOC_OUT_LEN 4 /* The handle of the new v-port */ #define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_LEN 4 /***********************************/ @@ -8798,6 +10552,7 @@ #define MC_CMD_VPORT_FREE_IN_LEN 4 /* The handle of the v-port */ #define MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_FREE_IN_VPORT_ID_LEN 4 /* MC_CMD_VPORT_FREE_OUT msgresponse */ #define MC_CMD_VPORT_FREE_OUT_LEN 0 @@ -8815,18 +10570,23 @@ #define MC_CMD_VADAPTOR_ALLOC_IN_LEN 30 /* The port to connect to the v-adaptor's port. */ #define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 /* Flags controlling v-adaptor creation */ #define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_LEN 4 #define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0 #define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1 #define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1 #define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 /* The number of VLAN tags to strip on receive */ #define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12 +#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_LEN 4 /* The number of VLAN tags to transparently insert/remove. */ #define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16 +#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_LEN 4 /* The actual VLAN tags to insert/remove */ #define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_LEN 4 #define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0 #define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16 #define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16 @@ -8853,6 +10613,7 @@ #define MC_CMD_VADAPTOR_FREE_IN_LEN 4 /* The port to which the v-adaptor is connected. */ #define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_LEN 4 /* MC_CMD_VADAPTOR_FREE_OUT msgresponse */ #define MC_CMD_VADAPTOR_FREE_OUT_LEN 0 @@ -8870,6 +10631,7 @@ #define MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10 /* The port to which the v-adaptor is connected. */ #define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_LEN 4 /* The new MAC address to assign to this v-adaptor */ #define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4 #define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6 @@ -8890,6 +10652,7 @@ #define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4 /* The port to which the v-adaptor is connected. */ #define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_LEN 4 /* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */ #define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6 @@ -8910,15 +10673,19 @@ #define MC_CMD_VADAPTOR_QUERY_IN_LEN 4 /* The port to which the v-adaptor is connected. */ #define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_LEN 4 /* MC_CMD_VADAPTOR_QUERY_OUT msgresponse */ #define MC_CMD_VADAPTOR_QUERY_OUT_LEN 12 /* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */ #define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_OFST 0 +#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_LEN 4 /* The v-adaptor flags as defined at MC_CMD_VADAPTOR_ALLOC. */ #define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_OFST 4 +#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_LEN 4 /* The number of VLAN tags that may still be added */ #define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 8 +#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4 /***********************************/ @@ -8933,8 +10700,10 @@ #define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8 /* The port to assign. */ #define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0 +#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_LEN 4 /* The target function to modify. */ #define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4 +#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_LEN 4 #define MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0 #define MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16 #define MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16 @@ -8955,9 +10724,13 @@ /* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */ #define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17 #define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_LEN 4 #define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_LEN 4 #define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_LEN 4 #define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_LEN 4 /* Write enable bits 0-3, set to write, clear to read. */ #define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128 #define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4 @@ -8969,9 +10742,13 @@ */ #define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16 #define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_LEN 4 #define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_LEN 4 #define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_LEN 4 #define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_LEN 4 /***********************************/ @@ -8986,11 +10763,13 @@ #define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4 /* The handle of the owning upstream port */ #define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 /* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */ #define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4 /* The handle of the new Onload stack */ #define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0 +#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_LEN 4 /***********************************/ @@ -9005,6 +10784,7 @@ #define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4 /* The handle of the Onload stack */ #define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0 +#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_LEN 4 /* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */ #define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0 @@ -9022,8 +10802,10 @@ #define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12 /* The handle of the owning upstream port */ #define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 /* The type of context to allocate */ #define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4 +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_LEN 4 /* enum: Allocate a context for exclusive use. The key and indirection table * must be explicitly configured. */ @@ -9037,6 +10819,7 @@ * in the indirection table will be in the range 0 to NUM_QUEUES-1. */ #define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8 +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_LEN 4 /* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */ #define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4 @@ -9045,6 +10828,7 @@ * handle. */ #define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_LEN 4 /* enum: guaranteed invalid RSS context handle value */ #define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff @@ -9061,6 +10845,7 @@ #define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4 /* The handle of the RSS context */ #define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_LEN 4 /* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */ #define MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0 @@ -9078,6 +10863,7 @@ #define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44 /* The handle of the RSS context */ #define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_LEN 4 /* The 40-byte Toeplitz hash key (TBD endianness issues?) */ #define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4 #define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40 @@ -9098,6 +10884,7 @@ #define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4 /* The handle of the RSS context */ #define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_LEN 4 /* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */ #define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44 @@ -9118,6 +10905,7 @@ #define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132 /* The handle of the RSS context */ #define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_LEN 4 /* The 128-byte indirection table (1 byte per entry) */ #define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4 #define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128 @@ -9138,6 +10926,7 @@ #define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4 /* The handle of the RSS context */ #define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_LEN 4 /* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */ #define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132 @@ -9158,6 +10947,7 @@ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8 /* The handle of the RSS context */ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4 /* Hash control flags. The _EN bits are always supported, but new modes are * available when ADDITIONAL_RSS_MODES is reported by MC_CMD_GET_CAPABILITIES: * in this case, the MODE fields may be set to non-zero values, and will take @@ -9171,6 +10961,7 @@ * particular packet type.) */ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_LEN 4 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1 @@ -9210,6 +11001,7 @@ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4 /* The handle of the RSS context */ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4 /* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8 @@ -9227,6 +11019,7 @@ * always be used for a SET regardless of old/new driver vs. old/new firmware. */ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_LEN 4 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1 @@ -9263,11 +11056,13 @@ #define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8 /* The handle of the owning upstream port */ #define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 /* Number of queues spanned by this mapping, in the range 1-64; valid fixed * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and * referenced RSS contexts must span no more than this number. */ #define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4 +#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_LEN 4 /* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */ #define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4 @@ -9276,6 +11071,7 @@ * handle. */ #define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0 +#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_LEN 4 /* enum: guaranteed invalid .1p mapping handle value */ #define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff @@ -9292,6 +11088,7 @@ #define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4 /* The handle of the .1p mapping */ #define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0 +#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_LEN 4 /* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */ #define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0 @@ -9309,6 +11106,7 @@ #define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36 /* The handle of the .1p mapping */ #define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0 +#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4 /* Per-priority mappings (1 32-bit word per entry - an offset or RSS context * handle) */ @@ -9331,6 +11129,7 @@ #define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4 /* The handle of the .1p mapping */ #define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0 +#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4 /* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */ #define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36 @@ -9356,10 +11155,13 @@ #define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12 /* Base absolute interrupt vector number. */ #define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0 +#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_LEN 4 /* Number of interrupt vectors allocate to this PF. */ #define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4 +#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_LEN 4 /* Number of interrupt vectors to allocate per VF. */ #define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8 +#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_LEN 4 /***********************************/ @@ -9376,10 +11178,13 @@ * let the system find a suitable base. */ #define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0 +#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_LEN 4 /* Number of interrupt vectors allocate to this PF. */ #define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4 +#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_LEN 4 /* Number of interrupt vectors to allocate per VF. */ #define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8 +#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_LEN 4 /* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */ #define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0 @@ -9397,6 +11202,7 @@ #define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10 /* The handle of the v-port */ #define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_LEN 4 /* MAC address to add */ #define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4 #define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6 @@ -9417,6 +11223,7 @@ #define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10 /* The handle of the v-port */ #define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_LEN 4 /* MAC address to add */ #define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4 #define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6 @@ -9437,6 +11244,7 @@ #define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4 /* The handle of the v-port */ #define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_LEN 4 /* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */ #define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4 @@ -9444,6 +11252,7 @@ #define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num)) /* The number of MAC addresses returned */ #define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_LEN 4 /* Array of MAC addresses */ #define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4 #define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6 @@ -9465,8 +11274,10 @@ #define MC_CMD_VPORT_RECONFIGURE_IN_LEN 44 /* The handle of the v-port */ #define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_LEN 4 /* Flags requesting what should be changed. */ #define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4 +#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_LEN 4 #define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0 #define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1 #define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1 @@ -9476,14 +11287,17 @@ * v-switch. */ #define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_OFST 8 +#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_LEN 4 /* The actual VLAN tags to insert/remove */ #define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_LEN 4 #define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0 #define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16 #define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16 #define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16 /* The number of MAC addresses to add */ #define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_OFST 16 +#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_LEN 4 /* MAC addresses to add */ #define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_OFST 20 #define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_LEN 6 @@ -9492,6 +11306,7 @@ /* MC_CMD_VPORT_RECONFIGURE_OUT msgresponse */ #define MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4 #define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0 +#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_LEN 4 #define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0 #define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1 @@ -9508,15 +11323,18 @@ #define MC_CMD_EVB_PORT_QUERY_IN_LEN 4 /* The handle of the v-port */ #define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0 +#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_LEN 4 /* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */ #define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8 /* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */ #define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0 +#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_LEN 4 /* The number of VLAN tags that may be used on a v-adaptor connected to this * EVB port. */ #define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4 +#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4 /***********************************/ @@ -9528,14 +11346,16 @@ */ #define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab -#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */ #define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8 /* Index of the first buffer table entry. */ #define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4 /* Number of buffer table entries to dump. */ #define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4 /* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */ #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12 @@ -9559,6 +11379,7 @@ /* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */ #define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4 #define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0 +#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_LEN 4 #define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0 #define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1 #define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1 @@ -9588,6 +11409,7 @@ /* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */ #define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4 #define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0 +#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_LEN 4 #define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0 #define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1 #define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1 @@ -9611,8 +11433,10 @@ #define MC_CMD_GET_CLOCK_OUT_LEN 8 /* System frequency, MHz */ #define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0 +#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_LEN 4 /* DPCPU frequency, MHz */ #define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4 +#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_LEN 4 /***********************************/ @@ -9621,36 +11445,43 @@ */ #define MC_CMD_SET_CLOCK 0xad -#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_SET_CLOCK_IN msgrequest */ #define MC_CMD_SET_CLOCK_IN_LEN 28 /* Requested frequency in MHz for system clock domain */ #define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0 +#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_LEN 4 /* enum: Leave the system clock domain frequency unchanged */ #define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0 /* Requested frequency in MHz for inter-core clock domain */ #define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4 +#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_LEN 4 /* enum: Leave the inter-core clock domain frequency unchanged */ #define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0 /* Requested frequency in MHz for DPCPU clock domain */ #define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8 +#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_LEN 4 /* enum: Leave the DPCPU clock domain frequency unchanged */ #define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0 /* Requested frequency in MHz for PCS clock domain */ #define MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12 +#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_LEN 4 /* enum: Leave the PCS clock domain frequency unchanged */ #define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0 /* Requested frequency in MHz for MC clock domain */ #define MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16 +#define MC_CMD_SET_CLOCK_IN_MC_FREQ_LEN 4 /* enum: Leave the MC clock domain frequency unchanged */ #define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0 /* Requested frequency in MHz for rmon clock domain */ #define MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20 +#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_LEN 4 /* enum: Leave the rmon clock domain frequency unchanged */ #define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0 /* Requested frequency in MHz for vswitch clock domain */ #define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24 +#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_LEN 4 /* enum: Leave the vswitch clock domain frequency unchanged */ #define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0 @@ -9658,30 +11489,37 @@ #define MC_CMD_SET_CLOCK_OUT_LEN 28 /* Resulting system frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0 +#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_LEN 4 /* enum: The system clock domain doesn't exist */ #define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0 /* Resulting inter-core frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4 +#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_LEN 4 /* enum: The inter-core clock domain doesn't exist / isn't used */ #define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0 /* Resulting DPCPU frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8 +#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_LEN 4 /* enum: The dpcpu clock domain doesn't exist */ #define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0 /* Resulting PCS frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12 +#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_LEN 4 /* enum: The PCS clock domain doesn't exist / isn't controlled */ #define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0 /* Resulting MC frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16 +#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_LEN 4 /* enum: The MC clock domain doesn't exist / isn't controlled */ #define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0 /* Resulting rmon frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20 +#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_LEN 4 /* enum: The rmon clock domain doesn't exist / isn't controlled */ #define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0 /* Resulting vswitch frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24 +#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_LEN 4 /* enum: The vswitch clock domain doesn't exist / isn't controlled */ #define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0 @@ -9692,11 +11530,12 @@ */ #define MC_CMD_DPCPU_RPC 0xae -#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_DPCPU_RPC_IN msgrequest */ #define MC_CMD_DPCPU_RPC_IN_LEN 36 #define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0 +#define MC_CMD_DPCPU_RPC_IN_CPU_LEN 4 /* enum: RxDPCPU0 */ #define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0 /* enum: TxDPCPU0 */ @@ -9761,12 +11600,15 @@ #define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24 /* Register data to write. Only valid in write/write-read. */ #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_LEN 4 /* Register address. */ #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_LEN 4 /* MC_CMD_DPCPU_RPC_OUT msgresponse */ #define MC_CMD_DPCPU_RPC_OUT_LEN 36 #define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0 +#define MC_CMD_DPCPU_RPC_OUT_RC_LEN 4 /* DATA */ #define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4 #define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32 @@ -9777,9 +11619,13 @@ #define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12 #define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24 #define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_LEN 4 #define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_LEN 4 #define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_LEN 4 #define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_LEN 4 /***********************************/ @@ -9794,6 +11640,7 @@ #define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4 /* Interrupt level relative to base for function. */ #define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0 +#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_LEN 4 /* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */ #define MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0 @@ -9811,6 +11658,7 @@ #define MC_CMD_SHMBOOT_OP_IN_LEN 4 /* Identifies the operation to perform */ #define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0 +#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_LEN 4 /* enum: Copy slave_data section to the slave core. (Greenport only) */ #define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0 @@ -9824,13 +11672,16 @@ */ #define MC_CMD_CAP_BLK_READ 0xe7 -#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_CAP_BLK_READ_IN msgrequest */ #define MC_CMD_CAP_BLK_READ_IN_LEN 12 #define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0 +#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_LEN 4 #define MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4 +#define MC_CMD_CAP_BLK_READ_IN_ADDR_LEN 4 #define MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8 +#define MC_CMD_CAP_BLK_READ_IN_COUNT_LEN 4 /* MC_CMD_CAP_BLK_READ_OUT msgresponse */ #define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8 @@ -9850,53 +11701,77 @@ */ #define MC_CMD_DUMP_DO 0xe8 -#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_DUMP_DO_IN msgrequest */ #define MC_CMD_DUMP_DO_IN_LEN 52 #define MC_CMD_DUMP_DO_IN_PADDING_OFST 0 +#define MC_CMD_DUMP_DO_IN_PADDING_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_OFST 4 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */ #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */ #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */ #define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */ #define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */ #define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */ #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4 #define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */ #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4 #define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */ #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4 /* enum: The uart port this command was received over (if using a uart * transport) */ #define MC_CMD_DUMP_DO_IN_UART_PORT_SRC 0xff #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */ #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */ #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */ #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4 /* MC_CMD_DUMP_DO_OUT msgresponse */ #define MC_CMD_DUMP_DO_OUT_LEN 4 #define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_OFST 0 +#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_LEN 4 /***********************************/ @@ -9905,41 +11780,64 @@ */ #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9 -#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */ #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_OFST 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC */ #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */ #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_OFST 28 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPFILE_DST */ #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */ #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4 /***********************************/ @@ -9950,17 +11848,20 @@ */ #define MC_CMD_SET_PSU 0xea -#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_SET_PSU_IN msgrequest */ #define MC_CMD_SET_PSU_IN_LEN 12 #define MC_CMD_SET_PSU_IN_PARAM_OFST 0 +#define MC_CMD_SET_PSU_IN_PARAM_LEN 4 #define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */ #define MC_CMD_SET_PSU_IN_RAIL_OFST 4 +#define MC_CMD_SET_PSU_IN_RAIL_LEN 4 #define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */ #define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */ /* desired value, eg voltage in mV */ #define MC_CMD_SET_PSU_IN_VALUE_OFST 8 +#define MC_CMD_SET_PSU_IN_VALUE_LEN 4 /* MC_CMD_SET_PSU_OUT msgresponse */ #define MC_CMD_SET_PSU_OUT_LEN 0 @@ -9980,7 +11881,9 @@ /* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */ #define MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8 #define MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0 +#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_LEN 4 #define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4 +#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_LEN 4 /***********************************/ @@ -10016,12 +11919,16 @@ #define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num)) /* CRC32 over OFFSET, LENGTH, RESERVED, DATA */ #define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0 +#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_LEN 4 /* Offset at which to write the data */ #define MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4 +#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_LEN 4 /* Length of data */ #define MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8 +#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_LEN 4 /* Reserved for future use */ #define MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12 +#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_LEN 4 #define MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16 #define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1 #define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0 @@ -10044,12 +11951,16 @@ #define MC_CMD_UART_RECV_DATA_OUT_LEN 16 /* CRC32 over OFFSET, LENGTH, RESERVED */ #define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0 +#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_LEN 4 /* Offset from which to read the data */ #define MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4 +#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_LEN 4 /* Length of data */ #define MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8 +#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_LEN 4 /* Reserved for future use */ #define MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12 +#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_LEN 4 /* MC_CMD_UART_RECV_DATA_IN msgresponse */ #define MC_CMD_UART_RECV_DATA_IN_LENMIN 16 @@ -10057,12 +11968,16 @@ #define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num)) /* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */ #define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0 +#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_LEN 4 /* Offset at which to write the data */ #define MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4 +#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_LEN 4 /* Length of data */ #define MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8 +#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_LEN 4 /* Reserved for future use */ #define MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12 +#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_LEN 4 #define MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16 #define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1 #define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0 @@ -10075,14 +11990,16 @@ */ #define MC_CMD_READ_FUSES 0xf0 -#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_READ_FUSES_IN msgrequest */ #define MC_CMD_READ_FUSES_IN_LEN 8 /* Offset in OTP to read */ #define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0 +#define MC_CMD_READ_FUSES_IN_OFFSET_LEN 4 /* Length of data to read in bytes */ #define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4 +#define MC_CMD_READ_FUSES_IN_LENGTH_LEN 4 /* MC_CMD_READ_FUSES_OUT msgresponse */ #define MC_CMD_READ_FUSES_OUT_LENMIN 4 @@ -10090,6 +12007,7 @@ #define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num)) /* Length of returned OTP data in bytes */ #define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0 +#define MC_CMD_READ_FUSES_OUT_LENGTH_LEN 4 /* Returned data */ #define MC_CMD_READ_FUSES_OUT_DATA_OFST 4 #define MC_CMD_READ_FUSES_OUT_DATA_LEN 1 @@ -10197,6 +12115,60 @@ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9 /* enum: CTLE EQ Resistor (0-7, Medford) */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa +/* enum: CTLE gain (0-31, Medford2) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_GAIN 0xb +/* enum: CTLE pole (0-31, Medford2) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_POLE 0xc +/* enum: CTLE peaking (0-31, Medford2) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_PEAK 0xd +/* enum: DFE Tap1 - even path (Medford2 - 6 bit signed (-29 - +29)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_EVEN 0xe +/* enum: DFE Tap1 - odd path (Medford2 - 6 bit signed (-29 - +29)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_ODD 0xf +/* enum: DFE Tap2 (Medford2 - 6 bit signed (-20 - +20)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x10 +/* enum: DFE Tap3 (Medford2 - 6 bit signed (-20 - +20)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x11 +/* enum: DFE Tap4 (Medford2 - 6 bit signed (-20 - +20)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x12 +/* enum: DFE Tap5 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x13 +/* enum: DFE Tap6 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP6 0x14 +/* enum: DFE Tap7 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP7 0x15 +/* enum: DFE Tap8 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP8 0x16 +/* enum: DFE Tap9 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP9 0x17 +/* enum: DFE Tap10 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP10 0x18 +/* enum: DFE Tap11 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP11 0x19 +/* enum: DFE Tap12 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP12 0x1a +/* enum: I/Q clk offset (Medford2 - 4 bit signed (-5 - +5))) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_IQ_OFF 0x1b +/* enum: Negative h1 polarity data sampler offset calibration code, even path + * (Medford2 - 6 bit signed (-29 - +29))) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_EVEN 0x1c +/* enum: Negative h1 polarity data sampler offset calibration code, odd path + * (Medford2 - 6 bit signed (-29 - +29))) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_ODD 0x1d +/* enum: Positive h1 polarity data sampler offset calibration code, even path + * (Medford2 - 6 bit signed (-29 - +29))) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_EVEN 0x1e +/* enum: Positive h1 polarity data sampler offset calibration code, odd path + * (Medford2 - 6 bit signed (-29 - +29))) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_ODD 0x1f +/* enum: CDR calibration loop code (Medford2) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_PVT 0x20 +/* enum: CDR integral loop code (Medford2) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_INTEG 0x21 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ @@ -10268,7 +12240,7 @@ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8 -/* enum: TX Amplitude (Huntington, Medford) */ +/* enum: TX Amplitude (Huntington, Medford, Medford2) */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0 /* enum: De-Emphasis Tap1 Magnitude (0-7) (Huntington) */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1 @@ -10290,9 +12262,9 @@ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9 /* enum: TX Amplitude Fine control (Medford) */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE 0xa -/* enum: Pre-shoot Tap (Medford) */ +/* enum: Pre-shoot Tap (Medford, Medford2) */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV 0xb -/* enum: De-emphasis Tap (Medford) */ +/* enum: De-emphasis Tap (Medford, Medford2) */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY 0xc #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3 @@ -10361,7 +12333,24 @@ /* Align the arguments to 32 bits */ #define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1 #define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3 +/* Port-relative lane to scan eye on */ #define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_OFST 4 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_LEN 4 + +/* MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN msgrequest */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LEN 12 +/* Requested operation */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_LEN 3 +/* Port-relative lane to scan eye on */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_OFST 4 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_LEN 4 +/* Scan duration / cycle count */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_OFST 8 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_LEN 4 /* MC_CMD_KR_TUNE_START_EYE_PLOT_OUT msgresponse */ #define MC_CMD_KR_TUNE_START_EYE_PLOT_OUT_LEN 0 @@ -10393,10 +12382,12 @@ #define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1 #define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3 #define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_LEN 4 /* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */ #define MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4 #define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0 +#define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_LEN 4 /***********************************/ @@ -10594,6 +12585,7 @@ #define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1 #define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3 #define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4 +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_LEN 4 /* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */ #define MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0 @@ -10636,6 +12628,7 @@ #define MC_CMD_LICENSING_IN_LEN 4 /* identifies the type of operation requested */ #define MC_CMD_LICENSING_IN_OP_OFST 0 +#define MC_CMD_LICENSING_IN_OP_LEN 4 /* enum: re-read and apply licenses after a license key partition update; note * that this operation returns a zero-length response */ @@ -10647,23 +12640,30 @@ #define MC_CMD_LICENSING_OUT_LEN 28 /* count of application keys which are valid */ #define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0 +#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_LEN 4 /* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with * MC_CMD_FC_OP_LICENSE) */ #define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4 +#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_LEN 4 /* count of application keys which are invalid due to being blacklisted */ #define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8 +#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_LEN 4 /* count of application keys which are invalid due to being unverifiable */ #define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12 +#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_LEN 4 /* count of application keys which are invalid due to being for the wrong node */ #define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16 +#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_LEN 4 /* licensing state (for diagnostics; the exact meaning of the bits in this * field are private to the firmware) */ #define MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20 +#define MC_CMD_LICENSING_OUT_LICENSING_STATE_LEN 4 /* licensing subsystem self-test report (for manftest) */ #define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24 +#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_LEN 4 /* enum: licensing subsystem self-test failed */ #define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0 /* enum: licensing subsystem self-test passed */ @@ -10683,6 +12683,7 @@ #define MC_CMD_LICENSING_V3_IN_LEN 4 /* identifies the type of operation requested */ #define MC_CMD_LICENSING_V3_IN_OP_OFST 0 +#define MC_CMD_LICENSING_V3_IN_OP_LEN 4 /* enum: re-read and apply licenses after a license key partition update; note * that this operation returns a zero-length response */ @@ -10696,20 +12697,26 @@ #define MC_CMD_LICENSING_V3_OUT_LEN 88 /* count of keys which are valid */ #define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_OFST 0 +#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_LEN 4 /* sum of UNVERIFIABLE_KEYS + WRONG_NODE_KEYS (for compatibility with * MC_CMD_FC_OP_LICENSE) */ #define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_OFST 4 +#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_LEN 4 /* count of keys which are invalid due to being unverifiable */ #define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_OFST 8 +#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_LEN 4 /* count of keys which are invalid due to being for the wrong node */ #define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_OFST 12 +#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_LEN 4 /* licensing state (for diagnostics; the exact meaning of the bits in this * field are private to the firmware) */ #define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_OFST 16 +#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_LEN 4 /* licensing subsystem self-test report (for manftest) */ #define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20 +#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_LEN 4 /* enum: licensing subsystem self-test failed */ #define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0 /* enum: licensing subsystem self-test passed */ @@ -10750,8 +12757,10 @@ #define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num)) /* type of license (eg 3) */ #define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_LEN 4 /* length of the license ID (in bytes) */ #define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_LEN 4 /* the unique license ID of the adapter */ #define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8 #define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1 @@ -10789,11 +12798,13 @@ #define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4 /* application ID to query (LICENSED_APP_ID_xxx) */ #define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0 +#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_LEN 4 /* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */ #define MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4 /* state of this application */ #define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0 +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_LEN 4 /* enum: no (or invalid) license is present for the application */ #define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0 /* enum: a valid license is present for the application */ @@ -10824,6 +12835,7 @@ #define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4 /* state of this application */ #define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_LEN 4 /* enum: no (or invalid) license is present for the application */ #define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0 /* enum: a valid license is present for the application */ @@ -10874,8 +12886,10 @@ #define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num)) /* application ID */ #define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0 +#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_LEN 4 /* the type of operation requested */ #define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4 +#define MC_CMD_LICENSED_APP_OP_IN_OP_LEN 4 /* enum: validate application */ #define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0 /* enum: mask application */ @@ -10900,8 +12914,10 @@ #define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72 /* application ID */ #define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_LEN 4 /* the type of operation requested */ #define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_LEN 4 /* validation challenge */ #define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8 #define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64 @@ -10910,6 +12926,7 @@ #define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68 /* feature expiry (time_t) */ #define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_LEN 4 /* validation response */ #define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4 #define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64 @@ -10918,10 +12935,13 @@ #define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12 /* application ID */ #define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0 +#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_LEN 4 /* the type of operation requested */ #define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4 +#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_LEN 4 /* flag */ #define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8 +#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_LEN 4 /* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */ #define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0 @@ -10959,8 +12979,10 @@ #define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96 /* application expiry time */ #define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_LEN 4 /* application expiry units */ #define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_LEN 4 /* enum: expiry units are accounting units */ #define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0 /* enum: expiry units are calendar days */ @@ -10984,7 +13006,7 @@ */ #define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5 -#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_GENERAL +#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_ADMIN /* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */ #define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12 @@ -10995,6 +13017,7 @@ #define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4 /* whether to turn on or turn off the masked features */ #define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4 /* enum: turn the features off */ #define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0 /* enum: turn the features back on */ @@ -11014,12 +13037,13 @@ */ #define MC_CMD_LICENSING_V3_TEMPORARY 0xd6 -#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_GENERAL +#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_ADMIN /* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */ #define MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4 /* operation code */ #define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_LEN 4 /* enum: install a new license, overwriting any existing temporary license. * This is an asynchronous operation owing to the time taken to validate an * ECDSA license @@ -11037,6 +13061,7 @@ /* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */ #define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164 #define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_LEN 4 /* ECDSA license and signature */ #define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4 #define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160 @@ -11044,15 +13069,18 @@ /* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */ #define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4 #define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_LEN 4 /* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */ #define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4 #define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_LEN 4 /* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */ #define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12 /* status code */ #define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_LEN 4 /* enum: finished validating and installing license */ #define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0 /* enum: license validation and installation in progress */ @@ -11084,14 +13112,17 @@ #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16 /* configuration flags */ #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4 #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0 #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1 #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1 #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1 /* receive queue handle (for RSS mode, this is the base queue) */ #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4 /* receive mode */ #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4 /* enum: receive to just the specified queue */ #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0 /* enum: receive to multiple queues using RSS context */ @@ -11101,6 +13132,7 @@ * of 0xFFFFFFFF is guaranteed never to be a valid handle. */ #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4 /* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */ #define MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0 @@ -11123,20 +13155,24 @@ #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16 /* configuration flags */ #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4 #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0 #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1 #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1 #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1 /* receiving queue handle (for RSS mode, this is the base queue) */ #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4 /* receive mode */ #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4 /* enum: receiving to just the specified queue */ #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0 /* enum: receiving to multiple queues using RSS context */ #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1 /* RSS context (for RX_MODE_RSS) */ #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4 /***********************************/ @@ -11153,6 +13189,7 @@ #define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num)) /* the type of configuration setting to change */ #define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4 /* enum: Per-TXQ enable for multicast UDP destination lookup for possible * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.) */ @@ -11166,6 +13203,7 @@ * on the type of configuration setting being changed */ #define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4 /* new value: the details depend on the type of configuration setting being * changed */ @@ -11190,12 +13228,14 @@ #define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8 /* the type of configuration setting to read */ #define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0 +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */ /* handle for the entity to query: queue handle, EVB port ID, etc. depending on * the type of configuration setting being read */ #define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4 +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4 /* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */ #define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4 @@ -11228,12 +13268,15 @@ #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16 /* configuration flags */ #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4 #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0 #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1 /* receive queue handle (for RSS mode, this is the base queue) */ #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4 /* receive mode */ #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4 /* enum: receive to just the specified queue */ #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0 /* enum: receive to multiple queues using RSS context */ @@ -11243,6 +13286,7 @@ * of 0xFFFFFFFF is guaranteed never to be a valid handle. */ #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4 /* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */ #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0 @@ -11265,18 +13309,22 @@ #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16 /* configuration flags */ #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4 #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0 #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1 /* receiving queue handle (for RSS mode, this is the base queue) */ #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4 /* receive mode */ #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4 /* enum: receiving to just the specified queue */ #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0 /* enum: receiving to multiple queues using RSS context */ #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1 /* RSS context (for RX_MODE_RSS) */ #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4 /***********************************/ @@ -11291,16 +13339,22 @@ #define MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8 /* The rx queue to get stats for. */ #define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0 +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_LEN 4 #define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4 +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_LEN 4 #define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0 #define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1 /* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */ #define MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16 #define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_LEN 4 #define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_LEN 4 #define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_LEN 4 #define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_LEN 4 /***********************************/ @@ -11309,6 +13363,8 @@ */ #define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd +#define MC_CMD_0xfd_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */ #define MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0 @@ -11316,20 +13372,27 @@ #define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28 /* The maximum number of PFs the device can expose */ #define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_LEN 4 /* The maximum number of VFs the device can expose in total */ #define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_LEN 4 /* The maximum number of MSI-X vectors the device can provide in total */ #define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_LEN 4 /* the number of MSI-X vectors the device will allocate by default to each PF */ #define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_LEN 4 /* the number of MSI-X vectors the device will allocate by default to each VF */ #define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_LEN 4 /* the maximum number of MSI-X vectors the device can allocate to any one PF */ #define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_LEN 4 /* the maximum number of MSI-X vectors the device can allocate to any one VF */ #define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_LEN 4 /***********************************/ @@ -11347,10 +13410,13 @@ #define MC_CMD_GET_PORT_MODES_OUT_LEN 12 /* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) */ #define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0 +#define MC_CMD_GET_PORT_MODES_OUT_MODES_LEN 4 /* Default (canonical) board mode */ #define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4 +#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_LEN 4 /* Current board mode */ #define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8 +#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_LEN 4 /***********************************/ @@ -11359,21 +13425,26 @@ */ #define MC_CMD_READ_ATB 0x100 -#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_READ_ATB_IN msgrequest */ #define MC_CMD_READ_ATB_IN_LEN 16 #define MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0 +#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_LEN 4 #define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */ #define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */ #define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */ #define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4 +#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_LEN 4 #define MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8 +#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_LEN 4 #define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12 +#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_LEN 4 /* MC_CMD_READ_ATB_OUT msgresponse */ #define MC_CMD_READ_ATB_OUT_LEN 4 #define MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0 +#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_LEN 4 /***********************************/ @@ -11390,7 +13461,9 @@ /* Each workaround is represented by a single bit according to the enums below. */ #define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0 +#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_LEN 4 #define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4 +#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_LEN 4 /* enum: Bug 17230 work around. */ #define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2 /* enum: Bug 35388 work around (unsafe EVQ writes). */ @@ -11425,6 +13498,7 @@ * 1,3 = 0x00030001 */ #define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_LEN 4 #define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0 #define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16 #define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16 @@ -11434,6 +13508,7 @@ * set to 1. */ #define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4 +#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_LEN 4 #define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */ #define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */ #define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */ @@ -11460,6 +13535,10 @@ * only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT. */ #define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000 +/* enum: Privilege for insecure commands. Commands that belong to this group + * are not permitted on secure adapters regardless of the privilege mask. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE 0x4000 /* enum: Set this bit to indicate that a new privilege mask is to be set, * otherwise the command will only read the existing mask. */ @@ -11469,6 +13548,7 @@ #define MC_CMD_PRIVILEGE_MASK_OUT_LEN 4 /* For an admin function, always all the privileges are reported. */ #define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0 +#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_LEN 4 /***********************************/ @@ -11485,12 +13565,14 @@ * e.g. VF 1,3 = 0x00030001 */ #define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_LEN 4 #define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0 #define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16 #define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16 #define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16 /* New link state mode to be set */ #define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4 +#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_LEN 4 #define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */ #define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */ #define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */ @@ -11501,11 +13583,12 @@ /* MC_CMD_LINK_STATE_MODE_OUT msgresponse */ #define MC_CMD_LINK_STATE_MODE_OUT_LEN 4 #define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0 +#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_LEN 4 /***********************************/ /* MC_CMD_GET_SNAPSHOT_LENGTH - * Obtain the curent range of allowable values for the SNAPSHOT_LENGTH + * Obtain the current range of allowable values for the SNAPSHOT_LENGTH * parameter to MC_CMD_INIT_RXQ. */ #define MC_CMD_GET_SNAPSHOT_LENGTH 0x101 @@ -11519,8 +13602,10 @@ #define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8 /* Minimum acceptable snapshot length. */ #define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0 +#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_LEN 4 /* Maximum acceptable snapshot length. */ #define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4 +#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_LEN 4 /***********************************/ @@ -11529,7 +13614,7 @@ */ #define MC_CMD_FUSE_DIAGS 0x102 -#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_FUSE_DIAGS_IN msgrequest */ #define MC_CMD_FUSE_DIAGS_IN_LEN 0 @@ -11538,28 +13623,40 @@ #define MC_CMD_FUSE_DIAGS_OUT_LEN 48 /* Total number of mismatched bits between pairs in area 0 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0 +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_LEN 4 /* Total number of unexpectedly clear (set in B but not A) bits in area 0 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4 +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_LEN 4 /* Total number of unexpectedly clear (set in A but not B) bits in area 0 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8 +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_LEN 4 /* Checksum of data after logical OR of pairs in area 0 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12 +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_LEN 4 /* Total number of mismatched bits between pairs in area 1 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16 +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_LEN 4 /* Total number of unexpectedly clear (set in B but not A) bits in area 1 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20 +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_LEN 4 /* Total number of unexpectedly clear (set in A but not B) bits in area 1 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24 +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_LEN 4 /* Checksum of data after logical OR of pairs in area 1 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28 +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_LEN 4 /* Total number of mismatched bits between pairs in area 2 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32 +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_LEN 4 /* Total number of unexpectedly clear (set in B but not A) bits in area 2 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36 +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_LEN 4 /* Total number of unexpectedly clear (set in A but not B) bits in area 2 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40 +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_LEN 4 /* Checksum of data after logical OR of pairs in area 2 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44 +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_LEN 4 /***********************************/ @@ -11576,6 +13673,7 @@ #define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16 /* The groups of functions to have their privilege masks modified. */ #define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_LEN 4 #define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */ #define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */ #define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */ @@ -11584,6 +13682,7 @@ #define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */ /* For VFS_OF_PF specify the PF, for ONE specify the target function */ #define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_LEN 4 #define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0 #define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16 #define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16 @@ -11592,10 +13691,12 @@ * refer to the command MC_CMD_PRIVILEGE_MASK */ #define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8 +#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_LEN 4 /* Privileges to be removed from the target functions. For privilege * definitions refer to the command MC_CMD_PRIVILEGE_MASK */ #define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12 +#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_LEN 4 /* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */ #define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0 @@ -11613,8 +13714,10 @@ #define MC_CMD_XPM_READ_BYTES_IN_LEN 8 /* Start address (byte) */ #define MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0 +#define MC_CMD_XPM_READ_BYTES_IN_ADDR_LEN 4 /* Count (bytes) */ #define MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4 +#define MC_CMD_XPM_READ_BYTES_IN_COUNT_LEN 4 /* MC_CMD_XPM_READ_BYTES_OUT msgresponse */ #define MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0 @@ -11633,7 +13736,7 @@ */ #define MC_CMD_XPM_WRITE_BYTES 0x104 -#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */ #define MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8 @@ -11641,8 +13744,10 @@ #define MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num)) /* Start address (byte) */ #define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0 +#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_LEN 4 /* Count (bytes) */ #define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4 +#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_LEN 4 /* Data */ #define MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8 #define MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1 @@ -11659,14 +13764,16 @@ */ #define MC_CMD_XPM_READ_SECTOR 0x105 -#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_XPM_READ_SECTOR_IN msgrequest */ #define MC_CMD_XPM_READ_SECTOR_IN_LEN 8 /* Sector index */ #define MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0 +#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_LEN 4 /* Sector size */ #define MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4 +#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_LEN 4 /* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */ #define MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4 @@ -11674,9 +13781,11 @@ #define MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num)) /* Sector type */ #define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0 +#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_LEN 4 #define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */ #define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */ #define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_DATA 0x3 /* enum */ #define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */ /* Sector data */ #define MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4 @@ -11691,7 +13800,7 @@ */ #define MC_CMD_XPM_WRITE_SECTOR 0x106 -#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */ #define MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12 @@ -11708,10 +13817,12 @@ #define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3 /* Sector type */ #define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4 +#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */ /* Sector size */ #define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8 +#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_LEN 4 /* Sector data */ #define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12 #define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1 @@ -11722,6 +13833,7 @@ #define MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4 /* New sector index */ #define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0 +#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_LEN 4 /***********************************/ @@ -11730,12 +13842,13 @@ */ #define MC_CMD_XPM_INVALIDATE_SECTOR 0x107 -#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */ #define MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4 /* Sector index */ #define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0 +#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_LEN 4 /* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */ #define MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0 @@ -11747,14 +13860,16 @@ */ #define MC_CMD_XPM_BLANK_CHECK 0x108 -#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */ #define MC_CMD_XPM_BLANK_CHECK_IN_LEN 8 /* Start address (byte) */ #define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0 +#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_LEN 4 /* Count (bytes) */ #define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4 +#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_LEN 4 /* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */ #define MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4 @@ -11762,6 +13877,7 @@ #define MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num)) /* Total number of bad (non-blank) locations */ #define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0 +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_LEN 4 /* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit * into MCDI response) */ @@ -11777,14 +13893,16 @@ */ #define MC_CMD_XPM_REPAIR 0x109 -#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_XPM_REPAIR_IN msgrequest */ #define MC_CMD_XPM_REPAIR_IN_LEN 8 /* Start address (byte) */ #define MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0 +#define MC_CMD_XPM_REPAIR_IN_ADDR_LEN 4 /* Count (bytes) */ #define MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4 +#define MC_CMD_XPM_REPAIR_IN_COUNT_LEN 4 /* MC_CMD_XPM_REPAIR_OUT msgresponse */ #define MC_CMD_XPM_REPAIR_OUT_LEN 0 @@ -11797,7 +13915,7 @@ */ #define MC_CMD_XPM_DECODER_TEST 0x10a -#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_XPM_DECODER_TEST_IN msgrequest */ #define MC_CMD_XPM_DECODER_TEST_IN_LEN 0 @@ -11816,7 +13934,7 @@ */ #define MC_CMD_XPM_WRITE_TEST 0x10b -#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_XPM_WRITE_TEST_IN msgrequest */ #define MC_CMD_XPM_WRITE_TEST_IN_LEN 0 @@ -11842,10 +13960,13 @@ #define MC_CMD_EXEC_SIGNED_IN_LEN 28 /* the length of code to include in the CMAC */ #define MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0 +#define MC_CMD_EXEC_SIGNED_IN_CODELEN_LEN 4 /* the length of date to include in the CMAC */ #define MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4 +#define MC_CMD_EXEC_SIGNED_IN_DATALEN_LEN 4 /* the XPM sector containing the key to use */ #define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8 +#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_LEN 4 /* the expected CMAC value */ #define MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12 #define MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16 @@ -11868,11 +13989,34 @@ #define MC_CMD_PREPARE_SIGNED_IN_LEN 4 /* the length of data area to clear */ #define MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0 +#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_LEN 4 /* MC_CMD_PREPARE_SIGNED_OUT msgresponse */ #define MC_CMD_PREPARE_SIGNED_OUT_LEN 0 +/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4 +/* UDP port (the standard ports are named below but any port may be used) */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2 +/* enum: the IANA allocated UDP port for VXLAN */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5 +/* enum: the IANA allocated UDP port for Geneve */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16 +/* tunnel encapsulation protocol (only those named below are supported) */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2 +/* enum: This port will be used for VXLAN on both IPv4 and IPv6 */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0 +/* enum: This port will be used for Geneve on both IPv4 and IPv6 */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16 + + /***********************************/ /* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS * Configure UDP ports for tunnel encapsulation hardware acceleration. The @@ -11913,27 +14057,6 @@ #define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0 #define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1 -/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */ -#define TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4 -/* UDP port (the standard ports are named below but any port may be used) */ -#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0 -#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2 -/* enum: the IANA allocated UDP port for VXLAN */ -#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5 -/* enum: the IANA allocated UDP port for Geneve */ -#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1 -#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0 -#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16 -/* tunnel encapsulation protocol (only those named below are supported) */ -#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2 -#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2 -/* enum: VXLAN */ -#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0 -/* enum: Geneve */ -#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1 -#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16 -#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16 - /***********************************/ /* MC_CMD_RX_BALANCING @@ -11950,12 +14073,16 @@ #define MC_CMD_RX_BALANCING_IN_LEN 16 /* The RX port whose upconverter table will be modified */ #define MC_CMD_RX_BALANCING_IN_PORT_OFST 0 +#define MC_CMD_RX_BALANCING_IN_PORT_LEN 4 /* The VLAN priority associated to the table index and vFIFO */ #define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 4 +#define MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 4 /* The resulting bit of SRC^DST for indexing the table */ #define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 8 +#define MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 4 /* The RX engine to which the vFIFO in the table entry will point to */ #define MC_CMD_RX_BALANCING_IN_ENG_OFST 12 +#define MC_CMD_RX_BALANCING_IN_ENG_LEN 4 /* MC_CMD_RX_BALANCING_OUT msgresponse */ #define MC_CMD_RX_BALANCING_OUT_LEN 0 @@ -11976,8 +14103,10 @@ #define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LEN(num) (8+1*(num)) /* The tag to be appended */ #define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_OFST 0 +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_LEN 4 /* The length of the data */ #define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_OFST 4 +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_LEN 4 /* The data to be contained in the TLV structure */ #define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_OFST 8 #define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_LEN 1 @@ -12002,6 +14131,7 @@ #define MC_CMD_XPM_VERIFY_CONTENTS_IN_LEN 4 /* Data type to be checked */ #define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_OFST 0 +#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_LEN 4 /* MC_CMD_XPM_VERIFY_CONTENTS_OUT msgresponse */ #define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMIN 12 @@ -12009,10 +14139,13 @@ #define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LEN(num) (12+1*(num)) /* Number of sectors found (test builds only) */ #define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_OFST 0 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_LEN 4 /* Number of bytes found (test builds only) */ #define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_OFST 4 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_LEN 4 /* Length of signature */ #define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_OFST 8 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_LEN 4 /* Signature */ #define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_OFST 12 #define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_LEN 1 @@ -12037,12 +14170,16 @@ #define MC_CMD_SET_EVQ_TMR_IN_LEN 16 /* Function-relative queue instance */ #define MC_CMD_SET_EVQ_TMR_IN_INSTANCE_OFST 0 +#define MC_CMD_SET_EVQ_TMR_IN_INSTANCE_LEN 4 /* Requested value for timer load (in nanoseconds) */ #define MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_OFST 4 +#define MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_LEN 4 /* Requested value for timer reload (in nanoseconds) */ #define MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_OFST 8 +#define MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_LEN 4 /* Timer mode. Meanings as per EVQ_TMR_REG.TC_TIMER_VAL */ #define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_OFST 12 +#define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_LEN 4 #define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS 0x0 /* enum */ #define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START 0x1 /* enum */ #define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START 0x2 /* enum */ @@ -12052,8 +14189,10 @@ #define MC_CMD_SET_EVQ_TMR_OUT_LEN 8 /* Actual value for timer load (in nanoseconds) */ #define MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_OFST 0 +#define MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_LEN 4 /* Actual value for timer reload (in nanoseconds) */ #define MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_OFST 4 +#define MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_LEN 4 /***********************************/ @@ -12071,29 +14210,35 @@ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN 36 /* Reserved for future use. */ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_OFST 0 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_LEN 4 /* For timers updated via writes to EVQ_TMR_REG, this is the time interval (in * nanoseconds) for each increment of the timer load/reload count. The * requested duration of a timer is this value multiplied by the timer * load/reload count. */ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_OFST 4 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_LEN 4 /* For timers updated via writes to EVQ_TMR_REG, this is the maximum value * allowed for timer load/reload counts. */ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_OFST 8 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_LEN 4 /* For timers updated via writes to EVQ_TMR_REG, timer load/reload counts not a * multiple of this step size will be rounded in an implementation defined * manner. */ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_OFST 12 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_LEN 4 /* Maximum timer duration (in nanoseconds) for timers updated via MCDI. Only * meaningful if MC_CMD_SET_EVQ_TMR is implemented. */ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_OFST 16 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_LEN 4 /* Timer durations requested via MCDI that are not a multiple of this step size * will be rounded up. Only meaningful if MC_CMD_SET_EVQ_TMR is implemented. */ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_OFST 20 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_LEN 4 /* For timers updated using the bug35388 workaround, this is the time interval * (in nanoseconds) for each increment of the timer load/reload count. The * requested duration of a timer is this value multiplied by the timer @@ -12101,17 +14246,20 @@ * is enabled. */ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_OFST 24 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_LEN 4 /* For timers updated using the bug35388 workaround, this is the maximum value * allowed for timer load/reload counts. This field is only meaningful if the * bug35388 workaround is enabled. */ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_OFST 28 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_LEN 4 /* For timers updated using the bug35388 workaround, timer load/reload counts * not a multiple of this step size will be rounded in an implementation * defined manner. This field is only meaningful if the bug35388 workaround is * enabled. */ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_LEN 4 /***********************************/ @@ -12129,19 +14277,24 @@ * local queue index. */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_OFST 0 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_LEN 4 /* Will the common pool be used as TX_vFIFO_ULL (1) */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_OFST 4 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_LEN 4 #define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED 0x1 /* enum */ /* enum: Using this interface without TX_vFIFO_ULL is not supported for now */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED 0x0 /* Number of buffers to reserve for the common pool */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_OFST 8 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_LEN 4 /* TX datapath to which the Common Pool is connected to. */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_OFST 12 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_LEN 4 /* enum: Extracts information from function */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 /* Network port or RX Engine to which the common pool connects. */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_OFST 16 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_LEN 4 /* enum: Extracts information from function */ /* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0 0x0 /* enum */ @@ -12157,6 +14310,7 @@ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_LEN 4 /* ID of the common pool allocated */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_OFST 0 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_LEN 4 /***********************************/ @@ -12173,8 +14327,10 @@ /* Common pool previously allocated to which the new vFIFO will be associated */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_OFST 0 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_LEN 4 /* Port or RX engine to associate the vFIFO egress */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_OFST 4 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_LEN 4 /* enum: Extracts information from common pool */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE -0x1 #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0 0x0 /* enum */ @@ -12187,12 +14343,15 @@ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1 0x5 /* Minimum number of buffers that the pool must have */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_OFST 8 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_LEN 4 /* enum: Do not check the space available */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM 0x0 /* Will the vFIFO be used as TX_vFIFO_ULL */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_OFST 12 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_LEN 4 /* Network priority of the vFIFO,if applicable */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_OFST 16 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_LEN 4 /* enum: Search for the lowest unused priority */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE -0x1 @@ -12200,8 +14359,10 @@ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_LEN 8 /* Short vFIFO ID */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_OFST 0 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_LEN 4 /* Network priority of the vFIFO */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_OFST 4 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_LEN 4 /***********************************/ @@ -12217,6 +14378,7 @@ #define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_LEN 4 /* Short vFIFO ID */ #define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_OFST 0 +#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_LEN 4 /* MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT msgresponse */ #define MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT_LEN 0 @@ -12235,6 +14397,7 @@ #define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_LEN 4 /* Common pool ID given when pool allocated */ #define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_OFST 0 +#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_LEN 4 /* MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT msgresponse */ #define MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT_LEN 0 @@ -12256,8 +14419,10 @@ #define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_LEN 8 /* Available buffers for the ENG to NET vFIFOs. */ #define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_OFST 0 +#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_LEN 4 /* Available buffers for the ENG to ENG and NET to ENG vFIFOs. */ #define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_OFST 4 +#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_LEN 4 #endif /* MCDI_PCOL_H */ diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c index 6e1f282b2976..ce8aabf9091e 100644 --- a/drivers/net/ethernet/sfc/mcdi_port.c +++ b/drivers/net/ethernet/sfc/mcdi_port.c @@ -171,89 +171,108 @@ static int efx_mcdi_mdio_write(struct net_device *net_dev, return 0; } -static u32 mcdi_to_ethtool_cap(u32 media, u32 cap) +static void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset) { - u32 result = 0; + #define SET_BIT(name) __set_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \ + linkset) + bitmap_zero(linkset, __ETHTOOL_LINK_MODE_MASK_NBITS); switch (media) { case MC_CMD_MEDIA_KX4: - result |= SUPPORTED_Backplane; + SET_BIT(Backplane); if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) - result |= SUPPORTED_1000baseKX_Full; + SET_BIT(1000baseKX_Full); if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) - result |= SUPPORTED_10000baseKX4_Full; + SET_BIT(10000baseKX4_Full); if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) - result |= SUPPORTED_40000baseKR4_Full; + SET_BIT(40000baseKR4_Full); break; case MC_CMD_MEDIA_XFP: case MC_CMD_MEDIA_SFP_PLUS: case MC_CMD_MEDIA_QSFP_PLUS: - result |= SUPPORTED_FIBRE; + SET_BIT(FIBRE); if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) - result |= SUPPORTED_1000baseT_Full; + SET_BIT(1000baseT_Full); if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) - result |= SUPPORTED_10000baseT_Full; + SET_BIT(10000baseT_Full); if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) - result |= SUPPORTED_40000baseCR4_Full; + SET_BIT(40000baseCR4_Full); + if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN)) + SET_BIT(100000baseCR4_Full); + if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN)) + SET_BIT(25000baseCR_Full); + if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN)) + SET_BIT(50000baseCR2_Full); break; case MC_CMD_MEDIA_BASE_T: - result |= SUPPORTED_TP; + SET_BIT(TP); if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN)) - result |= SUPPORTED_10baseT_Half; + SET_BIT(10baseT_Half); if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN)) - result |= SUPPORTED_10baseT_Full; + SET_BIT(10baseT_Full); if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN)) - result |= SUPPORTED_100baseT_Half; + SET_BIT(100baseT_Half); if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN)) - result |= SUPPORTED_100baseT_Full; + SET_BIT(100baseT_Full); if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN)) - result |= SUPPORTED_1000baseT_Half; + SET_BIT(1000baseT_Half); if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) - result |= SUPPORTED_1000baseT_Full; + SET_BIT(1000baseT_Full); if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) - result |= SUPPORTED_10000baseT_Full; + SET_BIT(10000baseT_Full); break; } if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) - result |= SUPPORTED_Pause; + SET_BIT(Pause); if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) - result |= SUPPORTED_Asym_Pause; + SET_BIT(Asym_Pause); if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) - result |= SUPPORTED_Autoneg; + SET_BIT(Autoneg); - return result; + #undef SET_BIT } -static u32 ethtool_to_mcdi_cap(u32 cap) +static u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset) { u32 result = 0; - if (cap & SUPPORTED_10baseT_Half) + #define TEST_BIT(name) test_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \ + linkset) + + if (TEST_BIT(10baseT_Half)) result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN); - if (cap & SUPPORTED_10baseT_Full) + if (TEST_BIT(10baseT_Full)) result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN); - if (cap & SUPPORTED_100baseT_Half) + if (TEST_BIT(100baseT_Half)) result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN); - if (cap & SUPPORTED_100baseT_Full) + if (TEST_BIT(100baseT_Full)) result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN); - if (cap & SUPPORTED_1000baseT_Half) + if (TEST_BIT(1000baseT_Half)) result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN); - if (cap & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseKX_Full)) + if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full)) result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN); - if (cap & (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full)) + if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full)) result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN); - if (cap & (SUPPORTED_40000baseCR4_Full | SUPPORTED_40000baseKR4_Full)) + if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full)) result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN); - if (cap & SUPPORTED_Pause) + if (TEST_BIT(100000baseCR4_Full)) + result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN); + if (TEST_BIT(25000baseCR_Full)) + result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN); + if (TEST_BIT(50000baseCR2_Full)) + result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN); + if (TEST_BIT(Pause)) result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN); - if (cap & SUPPORTED_Asym_Pause) + if (TEST_BIT(Asym_Pause)) result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN); - if (cap & SUPPORTED_Autoneg) + if (TEST_BIT(Autoneg)) result |= (1 << MC_CMD_PHY_CAP_AN_LBN); + #undef TEST_BIT + return result; } @@ -285,7 +304,7 @@ static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx) return flags; } -static u32 mcdi_to_ethtool_media(u32 media) +static u8 mcdi_to_ethtool_media(u32 media) { switch (media) { case MC_CMD_MEDIA_XAUI: @@ -371,8 +390,8 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx) caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP); if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN)) - efx->link_advertising = - mcdi_to_ethtool_cap(phy_data->media, caps); + mcdi_to_ethtool_linkset(phy_data->media, caps, + efx->link_advertising); else phy_data->forced_cap = caps; @@ -435,8 +454,8 @@ fail: int efx_mcdi_port_reconfigure(struct efx_nic *efx) { struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; - u32 caps = (efx->link_advertising ? - ethtool_to_mcdi_cap(efx->link_advertising) : + u32 caps = (efx->link_advertising[0] ? + ethtool_linkset_to_mcdi_cap(efx->link_advertising) : phy_cfg->forced_cap); return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), @@ -509,34 +528,28 @@ static void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx, struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); int rc; - u32 supported, advertising, lp_advertising; - supported = mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap); - advertising = efx->link_advertising; cmd->base.speed = efx->link_state.speed; cmd->base.duplex = efx->link_state.fd; cmd->base.port = mcdi_to_ethtool_media(phy_cfg->media); cmd->base.phy_address = phy_cfg->port; - cmd->base.autoneg = !!(efx->link_advertising & ADVERTISED_Autoneg); + cmd->base.autoneg = !!(efx->link_advertising[0] & ADVERTISED_Autoneg); cmd->base.mdio_support = (efx->mdio.mode_support & (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22)); - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, - supported); - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, - advertising); + mcdi_to_ethtool_linkset(phy_cfg->media, phy_cfg->supported_cap, + cmd->link_modes.supported); + memcpy(cmd->link_modes.advertising, efx->link_advertising, + sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK())); BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, outbuf, sizeof(outbuf), NULL); if (rc) return; - lp_advertising = - mcdi_to_ethtool_cap(phy_cfg->media, - MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP)); - - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, - lp_advertising); + mcdi_to_ethtool_linkset(phy_cfg->media, + MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP), + cmd->link_modes.lp_advertising); } static int @@ -546,29 +559,28 @@ efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx, struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; u32 caps; int rc; - u32 advertising; - - ethtool_convert_link_mode_to_legacy_u32(&advertising, - cmd->link_modes.advertising); if (cmd->base.autoneg) { - caps = (ethtool_to_mcdi_cap(advertising) | - 1 << MC_CMD_PHY_CAP_AN_LBN); + caps = (ethtool_linkset_to_mcdi_cap(cmd->link_modes.advertising) | + 1 << MC_CMD_PHY_CAP_AN_LBN); } else if (cmd->base.duplex) { switch (cmd->base.speed) { - case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break; - case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break; - case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break; - case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break; - case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break; - default: return -EINVAL; + case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break; + case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break; + case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break; + case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break; + case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break; + case 100000: caps = 1 << MC_CMD_PHY_CAP_100000FDX_LBN; break; + case 25000: caps = 1 << MC_CMD_PHY_CAP_25000FDX_LBN; break; + case 50000: caps = 1 << MC_CMD_PHY_CAP_50000FDX_LBN; break; + default: return -EINVAL; } } else { switch (cmd->base.speed) { - case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break; - case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break; - case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break; - default: return -EINVAL; + case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break; + case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break; + case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break; + default: return -EINVAL; } } @@ -578,11 +590,10 @@ efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx, return rc; if (cmd->base.autoneg) { - efx_link_set_advertising( - efx, advertising | ADVERTISED_Autoneg); + efx_link_set_advertising(efx, cmd->link_modes.advertising); phy_cfg->forced_cap = 0; } else { - efx_link_set_advertising(efx, 0); + efx_link_clear_advertising(efx); phy_cfg->forced_cap = caps; } return 0; @@ -985,6 +996,9 @@ static unsigned int efx_mcdi_event_link_speed[] = { [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000, [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000, [MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000, + [MCDI_EVENT_LINKCHANGE_SPEED_25G] = 25000, + [MCDI_EVENT_LINKCHANGE_SPEED_50G] = 50000, + [MCDI_EVENT_LINKCHANGE_SPEED_100G] = 100000, }; void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev) @@ -1087,7 +1101,7 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx, int period = action == EFX_STATS_ENABLE ? 1000 : 0; dma_addr_t dma_addr = efx->stats_buffer.dma_addr; u32 dma_len = action != EFX_STATS_DISABLE ? - MC_CMD_MAC_NSTATS * sizeof(u64) : 0; + efx->num_mac_stats * sizeof(u64) : 0; BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0); @@ -1121,7 +1135,7 @@ void efx_mcdi_mac_start_stats(struct efx_nic *efx) { __le64 *dma_stats = efx->stats_buffer.addr; - dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID; + dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID; efx_mcdi_mac_stats(efx, EFX_STATS_ENABLE, 0); } @@ -1139,10 +1153,10 @@ void efx_mcdi_mac_pull_stats(struct efx_nic *efx) __le64 *dma_stats = efx->stats_buffer.addr; int attempts = EFX_MAC_STATS_WAIT_ATTEMPTS; - dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID; + dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID; efx_mcdi_mac_stats(efx, EFX_STATS_PULL, 0); - while (dma_stats[MC_CMD_MAC_GENERATION_END] == + while (dma_stats[efx->num_mac_stats - 1] == EFX_MC_STATS_GENERATION_INVALID && attempts-- != 0) udelay(EFX_MAC_STATS_WAIT_US); @@ -1167,7 +1181,7 @@ int efx_mcdi_port_probe(struct efx_nic *efx) /* Allocate buffer for stats */ rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, - MC_CMD_MAC_NSTATS * sizeof(u64), GFP_KERNEL); + efx->num_mac_stats * sizeof(u64), GFP_KERNEL); if (rc) return rc; netif_dbg(efx, probe, efx->net_dev, diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index c0537ea06c9a..d20a8660ee48 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -191,6 +191,7 @@ struct efx_tx_buffer { * Size of the region is efx_piobuf_size. * @piobuf_offset: Buffer offset to be specified in PIO descriptors * @initialised: Has hardware queue been initialised? + * @timestamping: Is timestamping enabled for this channel? * @handle_tso: TSO xmit preparation handler. Sets up the TSO metadata and * may also map tx data, depending on the nature of the TSO implementation. * @read_count: Current read pointer. @@ -202,6 +203,10 @@ struct efx_tx_buffer { * avoid cache-line ping-pong between the xmit path and the * completion path. * @merge_events: Number of TX merged completion events + * @completed_desc_ptr: Most recent completed pointer - only used with + * timestamping. + * @completed_timestamp_major: Top part of the most recent tx timestamp. + * @completed_timestamp_minor: Low part of the most recent tx timestamp. * @insert_count: Current insert pointer * This is the number of buffers that have been added to the * software ring. @@ -247,6 +252,7 @@ struct efx_tx_queue { void __iomem *piobuf; unsigned int piobuf_offset; bool initialised; + bool timestamping; /* Function pointers used in the fast path. */ int (*handle_tso)(struct efx_tx_queue*, struct sk_buff*, bool *); @@ -257,6 +263,9 @@ struct efx_tx_queue { unsigned int merge_events; unsigned int bytes_compl; unsigned int pkts_compl; + unsigned int completed_desc_ptr; + u32 completed_timestamp_major; + u32 completed_timestamp_minor; /* Members used only on the xmit path */ unsigned int insert_count ____cacheline_aligned_in_smp; @@ -522,8 +531,12 @@ struct efx_msi_context { * @copy: Copy the channel state prior to reallocation. May be %NULL if * reallocation is not supported. * @receive_skb: Handle an skb ready to be passed to netif_receive_skb() + * @want_txqs: Determine whether this channel should have TX queues + * created. If %NULL, TX queues are not created. * @keep_eventq: Flag for whether event queue should be kept initialised * while the device is stopped + * @want_pio: Flag for whether PIO buffers should be linked to this + * channel's TX queues. */ struct efx_channel_type { void (*handle_no_channel)(struct efx_nic *); @@ -532,7 +545,9 @@ struct efx_channel_type { void (*get_name)(struct efx_channel *, char *buf, size_t len); struct efx_channel *(*copy)(const struct efx_channel *); bool (*receive_skb)(struct efx_channel *, struct sk_buff *); + bool (*want_txqs)(struct efx_channel *); bool keep_eventq; + bool want_pio; }; enum efx_led_mode { @@ -708,6 +723,7 @@ struct vfdi_status; * @reset_work: Scheduled reset workitem * @membase_phys: Memory BAR value as physical address * @membase: Memory BAR value + * @vi_stride: step between per-VI registers / memory regions * @interrupt_mode: Interrupt mode * @timer_quantum_ns: Interrupt timer quantum, in nanoseconds * @timer_max_ns: Interrupt timer maximum value, in nanoseconds @@ -734,6 +750,7 @@ struct vfdi_status; * @n_channels: Number of channels in use * @n_rx_channels: Number of channels used for RX (= number of RX queues) * @n_tx_channels: Number of channels used for TX + * @n_extra_tx_channels: Number of extra channels with TX queues * @rx_ip_align: RX DMA address offset to have IP header aligned in * in accordance with NET_IP_ALIGN * @rx_dma_len: Current maximum RX DMA length @@ -773,6 +790,8 @@ struct vfdi_status; * @port_initialized: Port initialized? * @net_dev: Operating system network device. Consider holding the rtnl lock * @fixed_features: Features which cannot be turned off + * @num_mac_stats: Number of MAC stats reported by firmware (MAC_STATS_NUM_STATS + * field of %MC_CMD_GET_CAPABILITIES_V4 response, or %MC_CMD_MAC_NSTATS) * @stats_buffer: DMA buffer for statistics * @phy_type: PHY type * @phy_op: PHY interface @@ -812,6 +831,7 @@ struct vfdi_status; * @vf_init_count: Number of VFs that have been fully initialised. * @vi_scale: log2 number of vnics per VF. * @ptp_data: PTP state data + * @ptp_warned: has this NIC seen and warned about unexpected PTP events? * @vpd_sn: Serial number read from VPD * @monitor_work: Hardware monitor workitem * @biu_lock: BIU (bus interface unit) lock @@ -842,6 +862,8 @@ struct efx_nic { resource_size_t membase_phys; void __iomem *membase; + unsigned int vi_stride; + enum efx_int_mode interrupt_mode; unsigned int timer_quantum_ns; unsigned int timer_max_ns; @@ -875,6 +897,7 @@ struct efx_nic { unsigned rss_spread; unsigned tx_channel_offset; unsigned n_tx_channels; + unsigned n_extra_tx_channels; unsigned int rx_ip_align; unsigned int rx_dma_len; unsigned int rx_buffer_order; @@ -918,6 +941,7 @@ struct efx_nic { netdev_features_t fixed_features; + u16 num_mac_stats; struct efx_buffer stats_buffer; u64 rx_nodesc_drops_total; u64 rx_nodesc_drops_while_down; @@ -930,7 +954,7 @@ struct efx_nic { unsigned int mdio_bus; enum efx_phy_mode phy_mode; - u32 link_advertising; + __ETHTOOL_DECLARE_LINK_MODE_MASK(link_advertising); struct efx_link_state link_state; unsigned int n_link_state_changes; @@ -965,6 +989,7 @@ struct efx_nic { #endif struct efx_ptp_data *ptp_data; + bool ptp_warned; char *vpd_sn; @@ -1154,7 +1179,7 @@ struct efx_udp_tunnel { */ struct efx_nic_type { bool is_vf; - unsigned int mem_bar; + unsigned int (*mem_bar)(struct efx_nic *efx); unsigned int (*mem_map_size)(struct efx_nic *efx); int (*probe)(struct efx_nic *efx); void (*remove)(struct efx_nic *efx); @@ -1355,8 +1380,8 @@ efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type) static inline bool efx_channel_has_tx_queues(struct efx_channel *channel) { - return channel->channel - channel->efx->tx_channel_offset < - channel->efx->n_tx_channels; + return channel->type && channel->type->want_txqs && + channel->type->want_txqs(channel); } static inline struct efx_tx_queue * diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 7b51b6371724..6549fc685a48 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -325,6 +325,30 @@ enum { EF10_STAT_tx_bad, EF10_STAT_tx_bad_bytes, EF10_STAT_tx_overflow, + EF10_STAT_V1_COUNT, + EF10_STAT_fec_uncorrected_errors = EF10_STAT_V1_COUNT, + EF10_STAT_fec_corrected_errors, + EF10_STAT_fec_corrected_symbols_lane0, + EF10_STAT_fec_corrected_symbols_lane1, + EF10_STAT_fec_corrected_symbols_lane2, + EF10_STAT_fec_corrected_symbols_lane3, + EF10_STAT_ctpio_dmabuf_start, + EF10_STAT_ctpio_vi_busy_fallback, + EF10_STAT_ctpio_long_write_success, + EF10_STAT_ctpio_missing_dbell_fail, + EF10_STAT_ctpio_overflow_fail, + EF10_STAT_ctpio_underflow_fail, + EF10_STAT_ctpio_timeout_fail, + EF10_STAT_ctpio_noncontig_wr_fail, + EF10_STAT_ctpio_frm_clobber_fail, + EF10_STAT_ctpio_invalid_wr_fail, + EF10_STAT_ctpio_vi_clobber_fallback, + EF10_STAT_ctpio_unqualified_fallback, + EF10_STAT_ctpio_runt_fallback, + EF10_STAT_ctpio_success, + EF10_STAT_ctpio_fallback, + EF10_STAT_ctpio_poison, + EF10_STAT_ctpio_erase, EF10_STAT_COUNT }; @@ -416,6 +440,7 @@ struct efx_ef10_nic_data { struct efx_udp_tunnel udp_tunnels[16]; bool udp_tunnels_dirty; struct mutex udp_tunnels_lock; + u64 licensed_features; }; int efx_init_sriov(void); @@ -424,6 +449,7 @@ void efx_fini_sriov(void); struct ethtool_ts_info; int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel); void efx_ptp_defer_probe_with_channel(struct efx_nic *efx); +struct efx_channel *efx_ptp_channel(struct efx_nic *efx); void efx_ptp_remove(struct efx_nic *efx); int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr); int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr); @@ -447,6 +473,8 @@ static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel, } void efx_ptp_start_datapath(struct efx_nic *efx); void efx_ptp_stop_datapath(struct efx_nic *efx); +bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx); +ktime_t efx_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue); extern const struct efx_nic_type falcon_a1_nic_type; extern const struct efx_nic_type falcon_b0_nic_type; diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index caa89bf7603e..f21661532ed3 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -149,18 +149,14 @@ enum ptp_packet_state { /* Maximum parts-per-billion adjustment that is acceptable */ #define MAX_PPB 1000000 -/* Number of bits required to hold the above */ -#define MAX_PPB_BITS 20 - -/* Number of extra bits allowed when calculating fractional ns. - * EXTRA_BITS + MC_CMD_PTP_IN_ADJUST_BITS + MAX_PPB_BITS should - * be less than 63. - */ -#define PPB_EXTRA_BITS 2 - /* Precalculate scale word to avoid long long division at runtime */ -#define PPB_SCALE_WORD ((1LL << (PPB_EXTRA_BITS + MC_CMD_PTP_IN_ADJUST_BITS +\ - MAX_PPB_BITS)) / 1000000000LL) +/* This is equivalent to 2^66 / 10^9. */ +#define PPB_SCALE_WORD ((1LL << (57)) / 1953125LL) + +/* How much to shift down after scaling to convert to FP40 */ +#define PPB_SHIFT_FP40 26 +/* ... and FP44. */ +#define PPB_SHIFT_FP44 22 #define PTP_SYNC_ATTEMPTS 4 @@ -218,8 +214,8 @@ struct efx_ptp_timeset { * @channel: The PTP channel (Siena only) * @rx_ts_inline: Flag for whether RX timestamps are inline (else they are * separate events) - * @rxq: Receive queue (awaiting timestamps) - * @txq: Transmit queue + * @rxq: Receive SKB queue (awaiting timestamps) + * @txq: Transmit SKB queue * @evt_list: List of MC receive events awaiting packets * @evt_free_list: List of free events * @evt_lock: Lock for manipulating evt_list and evt_free_list @@ -233,19 +229,36 @@ struct efx_ptp_timeset { * @config: Current timestamp configuration * @enabled: PTP operation enabled * @mode: Mode in which PTP operating (PTP version) - * @time_format: Time format supported by this NIC * @ns_to_nic_time: Function to convert from scalar nanoseconds to NIC time * @nic_to_kernel_time: Function to convert from NIC to kernel time + * @nic_time.minor_max: Wrap point for NIC minor times + * @nic_time.sync_event_diff_min: Minimum acceptable difference between time + * in packet prefix and last MCDI time sync event i.e. how much earlier than + * the last sync event time a packet timestamp can be. + * @nic_time.sync_event_diff_max: Maximum acceptable difference between time + * in packet prefix and last MCDI time sync event i.e. how much later than + * the last sync event time a packet timestamp can be. + * @nic_time.sync_event_minor_shift: Shift required to make minor time from + * field in MCDI time sync event. * @min_synchronisation_ns: Minimum acceptable corrected sync window - * @ts_corrections.tx: Required driver correction of transmit timestamps - * @ts_corrections.rx: Required driver correction of receive timestamps + * @capabilities: Capabilities flags from the NIC + * @ts_corrections.ptp_tx: Required driver correction of PTP packet transmit + * timestamps + * @ts_corrections.ptp_rx: Required driver correction of PTP packet receive + * timestamps * @ts_corrections.pps_out: PPS output error (information only) * @ts_corrections.pps_in: Required driver correction of PPS input timestamps + * @ts_corrections.general_tx: Required driver correction of general packet + * transmit timestamps + * @ts_corrections.general_rx: Required driver correction of general packet + * receive timestamps * @evt_frags: Partly assembled PTP events * @evt_frag_idx: Current fragment number * @evt_code: Last event code * @start: Address at which MC indicates ready for synchronisation * @host_time_pps: Host time at last PPS + * @adjfreq_ppb_shift: Shift required to convert scaled parts-per-billion + * frequency adjustment into a fixed point fractional nanosecond format. * @current_adjfreq: Current ppb adjustment. * @phc_clock: Pointer to registered phc device (if primary function) * @phc_clock_info: Registration structure for phc device @@ -264,6 +277,7 @@ struct efx_ptp_timeset { * @oversize_sync_windows: Number of corrected sync windows that are too large * @rx_no_timestamp: Number of packets received without a timestamp. * @timeset: Last set of synchronisation statistics. + * @xmit_skb: Transmit SKB function. */ struct efx_ptp_data { struct efx_nic *efx; @@ -284,22 +298,31 @@ struct efx_ptp_data { struct hwtstamp_config config; bool enabled; unsigned int mode; - unsigned int time_format; void (*ns_to_nic_time)(s64 ns, u32 *nic_major, u32 *nic_minor); ktime_t (*nic_to_kernel_time)(u32 nic_major, u32 nic_minor, s32 correction); + struct { + u32 minor_max; + u32 sync_event_diff_min; + u32 sync_event_diff_max; + unsigned int sync_event_minor_shift; + } nic_time; unsigned int min_synchronisation_ns; + unsigned int capabilities; struct { - s32 tx; - s32 rx; + s32 ptp_tx; + s32 ptp_rx; s32 pps_out; s32 pps_in; + s32 general_tx; + s32 general_rx; } ts_corrections; efx_qword_t evt_frags[MAX_EVENT_FRAGS]; int evt_frag_idx; int evt_code; struct efx_buffer start; struct pps_event_time host_time_pps; + unsigned int adjfreq_ppb_shift; s64 current_adjfreq; struct ptp_clock *phc_clock; struct ptp_clock_info phc_clock_info; @@ -319,6 +342,7 @@ struct efx_ptp_data { unsigned int rx_no_timestamp; struct efx_ptp_timeset timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM]; + void (*xmit_skb)(struct efx_nic *efx, struct sk_buff *skb); }; static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta); @@ -329,6 +353,24 @@ static int efx_phc_settime(struct ptp_clock_info *ptp, static int efx_phc_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *request, int on); +bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + return ((efx_nic_rev(efx) >= EFX_REV_HUNT_A0) && + (nic_data->datapath_caps2 & + (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_LBN) + )); +} + +/* PTP 'extra' channel is still a traffic channel, but we only create TX queues + * if PTP uses MAC TX timestamps, not if PTP uses the MC directly to transmit. + */ +static bool efx_ptp_want_txqs(struct efx_channel *channel) +{ + return efx_ptp_use_mac_tx_timestamps(channel->efx); +} + #define PTP_SW_STAT(ext_name, field_name) \ { #ext_name, 0, offsetof(struct efx_ptp_data, field_name) } #define PTP_MC_STAT(ext_name, mcdi_name) \ @@ -471,6 +513,89 @@ static ktime_t efx_ptp_s27_to_ktime_correction(u32 nic_major, u32 nic_minor, return efx_ptp_s27_to_ktime(nic_major, nic_minor); } +/* For Medford2 platforms the time is in seconds and quarter nanoseconds. */ +static void efx_ptp_ns_to_s_qns(s64 ns, u32 *nic_major, u32 *nic_minor) +{ + struct timespec64 ts = ns_to_timespec64(ns); + + *nic_major = (u32)ts.tv_sec; + *nic_minor = ts.tv_nsec * 4; +} + +static ktime_t efx_ptp_s_qns_to_ktime_correction(u32 nic_major, u32 nic_minor, + s32 correction) +{ + ktime_t kt; + + nic_minor = DIV_ROUND_CLOSEST(nic_minor, 4); + correction = DIV_ROUND_CLOSEST(correction, 4); + + kt = ktime_set(nic_major, nic_minor); + + if (correction >= 0) + kt = ktime_add_ns(kt, (u64)correction); + else + kt = ktime_sub_ns(kt, (u64)-correction); + return kt; +} + +struct efx_channel *efx_ptp_channel(struct efx_nic *efx) +{ + return efx->ptp_data ? efx->ptp_data->channel : NULL; +} + +static u32 last_sync_timestamp_major(struct efx_nic *efx) +{ + struct efx_channel *channel = efx_ptp_channel(efx); + u32 major = 0; + + if (channel) + major = channel->sync_timestamp_major; + return major; +} + +/* The 8000 series and later can provide the time from the MAC, which is only + * 48 bits long and provides meta-information in the top 2 bits. + */ +static ktime_t +efx_ptp_mac_nic_to_ktime_correction(struct efx_nic *efx, + struct efx_ptp_data *ptp, + u32 nic_major, u32 nic_minor, + s32 correction) +{ + ktime_t kt = { 0 }; + + if (!(nic_major & 0x80000000)) { + WARN_ON_ONCE(nic_major >> 16); + /* Use the top bits from the latest sync event. */ + nic_major &= 0xffff; + nic_major |= (last_sync_timestamp_major(efx) & 0xffff0000); + + kt = ptp->nic_to_kernel_time(nic_major, nic_minor, + correction); + } + return kt; +} + +ktime_t efx_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + struct efx_ptp_data *ptp = efx->ptp_data; + ktime_t kt; + + if (efx_ptp_use_mac_tx_timestamps(efx)) + kt = efx_ptp_mac_nic_to_ktime_correction(efx, ptp, + tx_queue->completed_timestamp_major, + tx_queue->completed_timestamp_minor, + ptp->ts_corrections.general_tx); + else + kt = ptp->nic_to_kernel_time( + tx_queue->completed_timestamp_major, + tx_queue->completed_timestamp_minor, + ptp->ts_corrections.general_tx); + return kt; +} + /* Get PTP attributes and set up time conversions */ static int efx_ptp_get_attributes(struct efx_nic *efx) { @@ -502,31 +627,71 @@ static int efx_ptp_get_attributes(struct efx_nic *efx) return rc; } - if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION) { + switch (fmt) { + case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION: ptp->ns_to_nic_time = efx_ptp_ns_to_s27; ptp->nic_to_kernel_time = efx_ptp_s27_to_ktime_correction; - } else if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS) { + ptp->nic_time.minor_max = 1 << 27; + ptp->nic_time.sync_event_minor_shift = 19; + break; + case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS: ptp->ns_to_nic_time = efx_ptp_ns_to_s_ns; ptp->nic_to_kernel_time = efx_ptp_s_ns_to_ktime_correction; - } else { + ptp->nic_time.minor_max = 1000000000; + ptp->nic_time.sync_event_minor_shift = 22; + break; + case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS: + ptp->ns_to_nic_time = efx_ptp_ns_to_s_qns; + ptp->nic_to_kernel_time = efx_ptp_s_qns_to_ktime_correction; + ptp->nic_time.minor_max = 4000000000UL; + ptp->nic_time.sync_event_minor_shift = 24; + break; + default: return -ERANGE; } - ptp->time_format = fmt; - - /* MC_CMD_PTP_OP_GET_ATTRIBUTES is an extended version of an older - * operation MC_CMD_PTP_OP_GET_TIME_FORMAT that also returns a value - * to use for the minimum acceptable corrected synchronization window. + /* Precalculate acceptable difference between the minor time in the + * packet prefix and the last MCDI time sync event. We expect the + * packet prefix timestamp to be after of sync event by up to one + * sync event interval (0.25s) but we allow it to exceed this by a + * fuzz factor of (0.1s) + */ + ptp->nic_time.sync_event_diff_min = ptp->nic_time.minor_max + - (ptp->nic_time.minor_max / 10); + ptp->nic_time.sync_event_diff_max = (ptp->nic_time.minor_max / 4) + + (ptp->nic_time.minor_max / 10); + + /* MC_CMD_PTP_OP_GET_ATTRIBUTES has been extended twice from an older + * operation MC_CMD_PTP_OP_GET_TIME_FORMAT. The function now may return + * a value to use for the minimum acceptable corrected synchronization + * window and may return further capabilities. * If we have the extra information store it. For older firmware that * does not implement the extended command use the default value. */ - if (rc == 0 && out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN) + if (rc == 0 && + out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST) ptp->min_synchronisation_ns = MCDI_DWORD(outbuf, PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN); else ptp->min_synchronisation_ns = DEFAULT_MIN_SYNCHRONISATION_NS; + if (rc == 0 && + out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN) + ptp->capabilities = MCDI_DWORD(outbuf, + PTP_OUT_GET_ATTRIBUTES_CAPABILITIES); + else + ptp->capabilities = 0; + + /* Set up the shift for conversion between frequency + * adjustments in parts-per-billion and the fixed-point + * fractional ns format that the adapter uses. + */ + if (ptp->capabilities & (1 << MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN)) + ptp->adjfreq_ppb_shift = PPB_SHIFT_FP44; + else + ptp->adjfreq_ppb_shift = PPB_SHIFT_FP40; + return 0; } @@ -534,8 +699,9 @@ static int efx_ptp_get_attributes(struct efx_nic *efx) static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx) { MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN); - MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN); int rc; + size_t out_len; /* Get the timestamp corrections from the NIC. If this operation is * not supported (older NICs) then no correction is required. @@ -545,21 +711,37 @@ static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx) MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), NULL); + outbuf, sizeof(outbuf), &out_len); if (rc == 0) { - efx->ptp_data->ts_corrections.tx = MCDI_DWORD(outbuf, + efx->ptp_data->ts_corrections.ptp_tx = MCDI_DWORD(outbuf, PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT); - efx->ptp_data->ts_corrections.rx = MCDI_DWORD(outbuf, + efx->ptp_data->ts_corrections.ptp_rx = MCDI_DWORD(outbuf, PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE); efx->ptp_data->ts_corrections.pps_out = MCDI_DWORD(outbuf, PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT); efx->ptp_data->ts_corrections.pps_in = MCDI_DWORD(outbuf, PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN); + + if (out_len >= MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN) { + efx->ptp_data->ts_corrections.general_tx = MCDI_DWORD( + outbuf, + PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX); + efx->ptp_data->ts_corrections.general_rx = MCDI_DWORD( + outbuf, + PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX); + } else { + efx->ptp_data->ts_corrections.general_tx = + efx->ptp_data->ts_corrections.ptp_tx; + efx->ptp_data->ts_corrections.general_rx = + efx->ptp_data->ts_corrections.ptp_rx; + } } else if (rc == -EINVAL) { - efx->ptp_data->ts_corrections.tx = 0; - efx->ptp_data->ts_corrections.rx = 0; + efx->ptp_data->ts_corrections.ptp_tx = 0; + efx->ptp_data->ts_corrections.ptp_rx = 0; efx->ptp_data->ts_corrections.pps_out = 0; efx->ptp_data->ts_corrections.pps_in = 0; + efx->ptp_data->ts_corrections.general_tx = 0; + efx->ptp_data->ts_corrections.general_rx = 0; } else { efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf), outbuf, sizeof(outbuf), rc); @@ -873,8 +1055,24 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) return rc; } +/* Transmit a PTP packet via the dedicated hardware timestamped queue. */ +static void efx_ptp_xmit_skb_queue(struct efx_nic *efx, struct sk_buff *skb) +{ + struct efx_ptp_data *ptp_data = efx->ptp_data; + struct efx_tx_queue *tx_queue; + u8 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0; + + tx_queue = &ptp_data->channel->tx_queue[type]; + if (tx_queue && tx_queue->timestamping) { + efx_enqueue_skb(tx_queue, skb); + } else { + WARN_ONCE(1, "PTP channel has no timestamped tx queue\n"); + dev_kfree_skb_any(skb); + } +} + /* Transmit a PTP packet, via the MCDI interface, to the wire. */ -static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb) +static void efx_ptp_xmit_skb_mc(struct efx_nic *efx, struct sk_buff *skb) { struct efx_ptp_data *ptp_data = efx->ptp_data; struct skb_shared_hwtstamps timestamps; @@ -910,16 +1108,16 @@ static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb) timestamps.hwtstamp = ptp_data->nic_to_kernel_time( MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MAJOR), MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MINOR), - ptp_data->ts_corrections.tx); + ptp_data->ts_corrections.ptp_tx); skb_tstamp_tx(skb, ×tamps); rc = 0; fail: - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); - return rc; + return; } static void efx_ptp_drop_time_expired_events(struct efx_nic *efx) @@ -1189,7 +1387,7 @@ static void efx_ptp_worker(struct work_struct *work) efx_ptp_process_events(efx, &tempq); while ((skb = skb_dequeue(&ptp_data->txq))) - efx_ptp_xmit_skb(efx, skb); + ptp_data->xmit_skb(efx, skb); while ((skb = __skb_dequeue(&tempq))) efx_ptp_process_rx(efx, skb); @@ -1239,6 +1437,14 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel) goto fail2; } + if (efx_ptp_use_mac_tx_timestamps(efx)) { + ptp->xmit_skb = efx_ptp_xmit_skb_queue; + /* Request sync events on this channel. */ + channel->sync_events_state = SYNC_EVENTS_QUIESCENT; + } else { + ptp->xmit_skb = efx_ptp_xmit_skb_mc; + } + INIT_WORK(&ptp->work, efx_ptp_worker); ptp->config.flags = 0; ptp->config.tx_type = HWTSTAMP_TX_OFF; @@ -1303,11 +1509,21 @@ fail1: static int efx_ptp_probe_channel(struct efx_channel *channel) { struct efx_nic *efx = channel->efx; + int rc; channel->irq_moderation_us = 0; channel->rx_queue.core_index = 0; - return efx_ptp_probe(efx, channel); + rc = efx_ptp_probe(efx, channel); + /* Failure to probe PTP is not fatal; this channel will just not be + * used for anything. + * In the case of EPERM, efx_ptp_probe will print its own message (in + * efx_ptp_get_attributes()), so we don't need to. + */ + if (rc && rc != -EPERM) + netif_warn(efx, drv, efx->net_dev, + "Failed to probe PTP, rc=%d\n", rc); + return 0; } void efx_ptp_remove(struct efx_nic *efx) @@ -1332,6 +1548,7 @@ void efx_ptp_remove(struct efx_nic *efx) efx_nic_free_buffer(efx, &efx->ptp_data->start); kfree(efx->ptp_data); + efx->ptp_data = NULL; } static void efx_ptp_remove_channel(struct efx_channel *channel) @@ -1548,6 +1765,17 @@ void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info) ts_info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE); + /* Check licensed features. If we don't have the license for TX + * timestamps, the NIC will not support them. + */ + if (efx_ptp_use_mac_tx_timestamps(efx)) { + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + if (!(nic_data->licensed_features & + (1 << LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN))) + ts_info->so_timestamping &= + ~SOF_TIMESTAMPING_TX_HARDWARE; + } if (primary && primary->ptp_data && primary->ptp_data->phc_clock) ts_info->phc_index = ptp_clock_index(primary->ptp_data->phc_clock); @@ -1627,7 +1855,7 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp) evt->hwtimestamp = efx->ptp_data->nic_to_kernel_time( EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA), EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA), - ptp->ts_corrections.rx); + ptp->ts_corrections.ptp_rx); evt->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS); list_add_tail(&evt->link, &ptp->evt_list); @@ -1662,9 +1890,11 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev) int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE); if (!ptp) { - if (net_ratelimit()) + if (!efx->ptp_warned) { netif_warn(efx, drv, efx->net_dev, "Received PTP event but PTP not set up\n"); + efx->ptp_warned = true; + } return; } @@ -1707,9 +1937,20 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev) void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev) { + struct efx_nic *efx = channel->efx; + struct efx_ptp_data *ptp = efx->ptp_data; + + /* When extracting the sync timestamp minor value, we should discard + * the least significant two bits. These are not required in order + * to reconstruct full-range timestamps and they are optionally used + * to report status depending on the options supplied when subscribing + * for sync events. + */ channel->sync_timestamp_major = MCDI_EVENT_FIELD(*ev, PTP_TIME_MAJOR); channel->sync_timestamp_minor = - MCDI_EVENT_FIELD(*ev, PTP_TIME_MINOR_26_19) << 19; + (MCDI_EVENT_FIELD(*ev, PTP_TIME_MINOR_MS_8BITS) & 0xFC) + << ptp->nic_time.sync_event_minor_shift; + /* if sync events have been disabled then we want to silently ignore * this event, so throw away result. */ @@ -1717,15 +1958,6 @@ void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev) SYNC_EVENTS_VALID); } -/* make some assumptions about the time representation rather than abstract it, - * since we currently only support one type of inline timestamping and only on - * EF10. - */ -#define MINOR_TICKS_PER_SECOND 0x8000000 -/* Fuzz factor for sync events to be out of order with RX events */ -#define FUZZ (MINOR_TICKS_PER_SECOND / 10) -#define EXPECTED_SYNC_EVENTS_PER_SECOND 4 - static inline u32 efx_rx_buf_timestamp_minor(struct efx_nic *efx, const u8 *eh) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) @@ -1743,31 +1975,33 @@ void __efx_rx_skb_attach_timestamp(struct efx_channel *channel, struct sk_buff *skb) { struct efx_nic *efx = channel->efx; + struct efx_ptp_data *ptp = efx->ptp_data; u32 pkt_timestamp_major, pkt_timestamp_minor; u32 diff, carry; struct skb_shared_hwtstamps *timestamps; - pkt_timestamp_minor = (efx_rx_buf_timestamp_minor(efx, - skb_mac_header(skb)) + - (u32) efx->ptp_data->ts_corrections.rx) & - (MINOR_TICKS_PER_SECOND - 1); + if (channel->sync_events_state != SYNC_EVENTS_VALID) + return; + + pkt_timestamp_minor = efx_rx_buf_timestamp_minor(efx, skb_mac_header(skb)); /* get the difference between the packet and sync timestamps, * modulo one second */ - diff = (pkt_timestamp_minor - channel->sync_timestamp_minor) & - (MINOR_TICKS_PER_SECOND - 1); + diff = pkt_timestamp_minor - channel->sync_timestamp_minor; + if (pkt_timestamp_minor < channel->sync_timestamp_minor) + diff += ptp->nic_time.minor_max; + /* do we roll over a second boundary and need to carry the one? */ - carry = channel->sync_timestamp_minor + diff > MINOR_TICKS_PER_SECOND ? + carry = (channel->sync_timestamp_minor >= ptp->nic_time.minor_max - diff) ? 1 : 0; - if (diff <= MINOR_TICKS_PER_SECOND / EXPECTED_SYNC_EVENTS_PER_SECOND + - FUZZ) { + if (diff <= ptp->nic_time.sync_event_diff_max) { /* packet is ahead of the sync event by a quarter of a second or * less (allowing for fuzz) */ pkt_timestamp_major = channel->sync_timestamp_major + carry; - } else if (diff >= MINOR_TICKS_PER_SECOND - FUZZ) { + } else if (diff >= ptp->nic_time.sync_event_diff_min) { /* packet is behind the sync event but within the fuzz factor. * This means the RX packet and sync event crossed as they were * placed on the event queue, which can sometimes happen. @@ -1789,7 +2023,9 @@ void __efx_rx_skb_attach_timestamp(struct efx_channel *channel, /* attach the timestamps to the skb */ timestamps = skb_hwtstamps(skb); timestamps->hwtstamp = - efx_ptp_s27_to_ktime(pkt_timestamp_major, pkt_timestamp_minor); + ptp->nic_to_kernel_time(pkt_timestamp_major, + pkt_timestamp_minor, + ptp->ts_corrections.general_rx); } static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) @@ -1807,9 +2043,10 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) else if (delta < -MAX_PPB) delta = -MAX_PPB; - /* Convert ppb to fixed point ns. */ - adjustment_ns = (((s64)delta * PPB_SCALE_WORD) >> - (PPB_EXTRA_BITS + MAX_PPB_BITS)); + /* Convert ppb to fixed point ns taking care to round correctly. */ + adjustment_ns = ((s64)delta * PPB_SCALE_WORD + + (1 << (ptp_data->adjfreq_ppb_shift - 1))) >> + ptp_data->adjfreq_ppb_shift; MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); MCDI_SET_DWORD(inadj, PTP_IN_PERIPH_ID, 0); @@ -1916,6 +2153,7 @@ static const struct efx_channel_type efx_ptp_channel_type = { .get_name = efx_ptp_get_channel_name, /* no copy operation; there is no need to reallocate this channel */ .receive_skb = efx_ptp_rx, + .want_txqs = efx_ptp_want_txqs, .keep_eventq = false, }; diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index a617f657eae3..ae8645ae4492 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -242,6 +242,14 @@ static int siena_dimension_resources(struct efx_nic *efx) return 0; } +/* On all Falcon-architecture NICs, PFs use BAR 0 for I/O space and BAR 2(&3) + * for memory. + */ +static unsigned int siena_mem_bar(struct efx_nic *efx) +{ + return 2; +} + static unsigned int siena_mem_map_size(struct efx_nic *efx) { return FR_CZ_MC_TREG_SMEM + @@ -547,7 +555,7 @@ static int siena_try_update_nic_stats(struct efx_nic *efx) dma_stats = efx->stats_buffer.addr; - generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; + generation_end = dma_stats[efx->num_mac_stats - 1]; if (generation_end == EFX_MC_STATS_GENERATION_INVALID) return 0; rmb(); @@ -950,7 +958,7 @@ fail: const struct efx_nic_type siena_a0_nic_type = { .is_vf = false, - .mem_bar = EFX_MEM_BAR, + .mem_bar = siena_mem_bar, .mem_map_size = siena_mem_map_size, .probe = siena_probe_nic, .remove = siena_remove_nic, diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 9937a2450e57..cece961f2e82 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -77,9 +77,23 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, } if (buffer->flags & EFX_TX_BUF_SKB) { + struct sk_buff *skb = (struct sk_buff *)buffer->skb; + EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl); (*pkts_compl)++; - (*bytes_compl) += buffer->skb->len; + (*bytes_compl) += skb->len; + if (tx_queue->timestamping && + (tx_queue->completed_timestamp_major || + tx_queue->completed_timestamp_minor)) { + struct skb_shared_hwtstamps hwtstamp; + + hwtstamp.hwtstamp = + efx_ptp_nic_to_kernel_time(tx_queue); + skb_tstamp_tx(skb, &hwtstamp); + + tx_queue->completed_timestamp_major = 0; + tx_queue->completed_timestamp_minor = 0; + } dev_consume_skb_any((struct sk_buff *)buffer->skb); netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, "TX queue %d transmission id %x complete\n", @@ -828,6 +842,11 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue) tx_queue->old_read_count = 0; tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; tx_queue->xmit_more_available = false; + tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) && + tx_queue->channel == efx_ptp_channel(efx)); + tx_queue->completed_desc_ptr = tx_queue->ptr_mask; + tx_queue->completed_timestamp_major = 0; + tx_queue->completed_timestamp_minor = 0; /* Set up default function pointers. These may get replaced by * efx_nic_init_tx() based off NIC/queue capabilities. diff --git a/drivers/net/ethernet/socionext/Kconfig b/drivers/net/ethernet/socionext/Kconfig new file mode 100644 index 000000000000..6bcfe27fc560 --- /dev/null +++ b/drivers/net/ethernet/socionext/Kconfig @@ -0,0 +1,34 @@ +config NET_VENDOR_SOCIONEXT + bool "Socionext ethernet drivers" + default y + ---help--- + Option to select ethernet drivers for Socionext platforms. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Socionext devices. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_SOCIONEXT + +config SNI_AVE + tristate "Socionext AVE ethernet support" + depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF + select PHYLIB + ---help--- + Driver for gigabit ethernet MACs, called AVE, in the + Socionext UniPhier family. + +config SNI_NETSEC + tristate "Socionext NETSEC ethernet support" + depends on (ARCH_SYNQUACER || COMPILE_TEST) && OF + select PHYLIB + select MII + ---help--- + Enable to add support for the SocioNext NetSec Gigabit Ethernet + controller + PHY, as found on the Synquacer SC2A11 SoC + + To compile this driver as a module, choose M here: the module will be + called netsec. If unsure, say N. + +endif #NET_VENDOR_SOCIONEXT diff --git a/drivers/net/ethernet/socionext/Makefile b/drivers/net/ethernet/socionext/Makefile new file mode 100644 index 000000000000..7fd837a999fd --- /dev/null +++ b/drivers/net/ethernet/socionext/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for all ethernet ip drivers on Socionext platforms +# +obj-$(CONFIG_SNI_AVE) += sni_ave.o +obj-$(CONFIG_SNI_NETSEC) += netsec.o diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c new file mode 100644 index 000000000000..f4c0b02ddad8 --- /dev/null +++ b/drivers/net/ethernet/socionext/netsec.c @@ -0,0 +1,1777 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/types.h> +#include <linux/clk.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/acpi.h> +#include <linux/of_mdio.h> +#include <linux/etherdevice.h> +#include <linux/interrupt.h> +#include <linux/io.h> + +#include <net/tcp.h> +#include <net/ip6_checksum.h> + +#define NETSEC_REG_SOFT_RST 0x104 +#define NETSEC_REG_COM_INIT 0x120 + +#define NETSEC_REG_TOP_STATUS 0x200 +#define NETSEC_IRQ_RX BIT(1) +#define NETSEC_IRQ_TX BIT(0) + +#define NETSEC_REG_TOP_INTEN 0x204 +#define NETSEC_REG_INTEN_SET 0x234 +#define NETSEC_REG_INTEN_CLR 0x238 + +#define NETSEC_REG_NRM_TX_STATUS 0x400 +#define NETSEC_REG_NRM_TX_INTEN 0x404 +#define NETSEC_REG_NRM_TX_INTEN_SET 0x428 +#define NETSEC_REG_NRM_TX_INTEN_CLR 0x42c +#define NRM_TX_ST_NTOWNR BIT(17) +#define NRM_TX_ST_TR_ERR BIT(16) +#define NRM_TX_ST_TXDONE BIT(15) +#define NRM_TX_ST_TMREXP BIT(14) + +#define NETSEC_REG_NRM_RX_STATUS 0x440 +#define NETSEC_REG_NRM_RX_INTEN 0x444 +#define NETSEC_REG_NRM_RX_INTEN_SET 0x468 +#define NETSEC_REG_NRM_RX_INTEN_CLR 0x46c +#define NRM_RX_ST_RC_ERR BIT(16) +#define NRM_RX_ST_PKTCNT BIT(15) +#define NRM_RX_ST_TMREXP BIT(14) + +#define NETSEC_REG_PKT_CMD_BUF 0xd0 + +#define NETSEC_REG_CLK_EN 0x100 + +#define NETSEC_REG_PKT_CTRL 0x140 + +#define NETSEC_REG_DMA_TMR_CTRL 0x20c +#define NETSEC_REG_F_TAIKI_MC_VER 0x22c +#define NETSEC_REG_F_TAIKI_VER 0x230 +#define NETSEC_REG_DMA_HM_CTRL 0x214 +#define NETSEC_REG_DMA_MH_CTRL 0x220 +#define NETSEC_REG_ADDR_DIS_CORE 0x218 +#define NETSEC_REG_DMAC_HM_CMD_BUF 0x210 +#define NETSEC_REG_DMAC_MH_CMD_BUF 0x21c + +#define NETSEC_REG_NRM_TX_PKTCNT 0x410 + +#define NETSEC_REG_NRM_TX_DONE_PKTCNT 0x414 +#define NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT 0x418 + +#define NETSEC_REG_NRM_TX_TMR 0x41c + +#define NETSEC_REG_NRM_RX_PKTCNT 0x454 +#define NETSEC_REG_NRM_RX_RXINT_PKTCNT 0x458 +#define NETSEC_REG_NRM_TX_TXINT_TMR 0x420 +#define NETSEC_REG_NRM_RX_RXINT_TMR 0x460 + +#define NETSEC_REG_NRM_RX_TMR 0x45c + +#define NETSEC_REG_NRM_TX_DESC_START_UP 0x434 +#define NETSEC_REG_NRM_TX_DESC_START_LW 0x408 +#define NETSEC_REG_NRM_RX_DESC_START_UP 0x474 +#define NETSEC_REG_NRM_RX_DESC_START_LW 0x448 + +#define NETSEC_REG_NRM_TX_CONFIG 0x430 +#define NETSEC_REG_NRM_RX_CONFIG 0x470 + +#define MAC_REG_STATUS 0x1024 +#define MAC_REG_DATA 0x11c0 +#define MAC_REG_CMD 0x11c4 +#define MAC_REG_FLOW_TH 0x11cc +#define MAC_REG_INTF_SEL 0x11d4 +#define MAC_REG_DESC_INIT 0x11fc +#define MAC_REG_DESC_SOFT_RST 0x1204 +#define NETSEC_REG_MODE_TRANS_COMP_STATUS 0x500 + +#define GMAC_REG_MCR 0x0000 +#define GMAC_REG_MFFR 0x0004 +#define GMAC_REG_GAR 0x0010 +#define GMAC_REG_GDR 0x0014 +#define GMAC_REG_FCR 0x0018 +#define GMAC_REG_BMR 0x1000 +#define GMAC_REG_RDLAR 0x100c +#define GMAC_REG_TDLAR 0x1010 +#define GMAC_REG_OMR 0x1018 + +#define MHZ(n) ((n) * 1000 * 1000) + +#define NETSEC_TX_SHIFT_OWN_FIELD 31 +#define NETSEC_TX_SHIFT_LD_FIELD 30 +#define NETSEC_TX_SHIFT_DRID_FIELD 24 +#define NETSEC_TX_SHIFT_PT_FIELD 21 +#define NETSEC_TX_SHIFT_TDRID_FIELD 16 +#define NETSEC_TX_SHIFT_CC_FIELD 15 +#define NETSEC_TX_SHIFT_FS_FIELD 9 +#define NETSEC_TX_LAST 8 +#define NETSEC_TX_SHIFT_CO 7 +#define NETSEC_TX_SHIFT_SO 6 +#define NETSEC_TX_SHIFT_TRS_FIELD 4 + +#define NETSEC_RX_PKT_OWN_FIELD 31 +#define NETSEC_RX_PKT_LD_FIELD 30 +#define NETSEC_RX_PKT_SDRID_FIELD 24 +#define NETSEC_RX_PKT_FR_FIELD 23 +#define NETSEC_RX_PKT_ER_FIELD 21 +#define NETSEC_RX_PKT_ERR_FIELD 16 +#define NETSEC_RX_PKT_TDRID_FIELD 12 +#define NETSEC_RX_PKT_FS_FIELD 9 +#define NETSEC_RX_PKT_LS_FIELD 8 +#define NETSEC_RX_PKT_CO_FIELD 6 + +#define NETSEC_RX_PKT_ERR_MASK 3 + +#define NETSEC_MAX_TX_PKT_LEN 1518 +#define NETSEC_MAX_TX_JUMBO_PKT_LEN 9018 + +#define NETSEC_RING_GMAC 15 +#define NETSEC_RING_MAX 2 + +#define NETSEC_TCP_SEG_LEN_MAX 1460 +#define NETSEC_TCP_JUMBO_SEG_LEN_MAX 8960 + +#define NETSEC_RX_CKSUM_NOTAVAIL 0 +#define NETSEC_RX_CKSUM_OK 1 +#define NETSEC_RX_CKSUM_NG 2 + +#define NETSEC_TOP_IRQ_REG_CODE_LOAD_END BIT(20) +#define NETSEC_IRQ_TRANSITION_COMPLETE BIT(4) + +#define NETSEC_MODE_TRANS_COMP_IRQ_N2T BIT(20) +#define NETSEC_MODE_TRANS_COMP_IRQ_T2N BIT(19) + +#define NETSEC_INT_PKTCNT_MAX 2047 + +#define NETSEC_FLOW_START_TH_MAX 95 +#define NETSEC_FLOW_STOP_TH_MAX 95 +#define NETSEC_FLOW_PAUSE_TIME_MIN 5 + +#define NETSEC_CLK_EN_REG_DOM_ALL 0x3f + +#define NETSEC_PKT_CTRL_REG_MODE_NRM BIT(28) +#define NETSEC_PKT_CTRL_REG_EN_JUMBO BIT(27) +#define NETSEC_PKT_CTRL_REG_LOG_CHKSUM_ER BIT(3) +#define NETSEC_PKT_CTRL_REG_LOG_HD_INCOMPLETE BIT(2) +#define NETSEC_PKT_CTRL_REG_LOG_HD_ER BIT(1) +#define NETSEC_PKT_CTRL_REG_DRP_NO_MATCH BIT(0) + +#define NETSEC_CLK_EN_REG_DOM_G BIT(5) +#define NETSEC_CLK_EN_REG_DOM_C BIT(1) +#define NETSEC_CLK_EN_REG_DOM_D BIT(0) + +#define NETSEC_COM_INIT_REG_DB BIT(2) +#define NETSEC_COM_INIT_REG_CLS BIT(1) +#define NETSEC_COM_INIT_REG_ALL (NETSEC_COM_INIT_REG_CLS | \ + NETSEC_COM_INIT_REG_DB) + +#define NETSEC_SOFT_RST_REG_RESET 0 +#define NETSEC_SOFT_RST_REG_RUN BIT(31) + +#define NETSEC_DMA_CTRL_REG_STOP 1 +#define MH_CTRL__MODE_TRANS BIT(20) + +#define NETSEC_GMAC_CMD_ST_READ 0 +#define NETSEC_GMAC_CMD_ST_WRITE BIT(28) +#define NETSEC_GMAC_CMD_ST_BUSY BIT(31) + +#define NETSEC_GMAC_BMR_REG_COMMON 0x00412080 +#define NETSEC_GMAC_BMR_REG_RESET 0x00020181 +#define NETSEC_GMAC_BMR_REG_SWR 0x00000001 + +#define NETSEC_GMAC_OMR_REG_ST BIT(13) +#define NETSEC_GMAC_OMR_REG_SR BIT(1) + +#define NETSEC_GMAC_MCR_REG_IBN BIT(30) +#define NETSEC_GMAC_MCR_REG_CST BIT(25) +#define NETSEC_GMAC_MCR_REG_JE BIT(20) +#define NETSEC_MCR_PS BIT(15) +#define NETSEC_GMAC_MCR_REG_FES BIT(14) +#define NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON 0x0000280c +#define NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON 0x0001a00c + +#define NETSEC_FCR_RFE BIT(2) +#define NETSEC_FCR_TFE BIT(1) + +#define NETSEC_GMAC_GAR_REG_GW BIT(1) +#define NETSEC_GMAC_GAR_REG_GB BIT(0) + +#define NETSEC_GMAC_GAR_REG_SHIFT_PA 11 +#define NETSEC_GMAC_GAR_REG_SHIFT_GR 6 +#define GMAC_REG_SHIFT_CR_GAR 2 + +#define NETSEC_GMAC_GAR_REG_CR_25_35_MHZ 2 +#define NETSEC_GMAC_GAR_REG_CR_35_60_MHZ 3 +#define NETSEC_GMAC_GAR_REG_CR_60_100_MHZ 0 +#define NETSEC_GMAC_GAR_REG_CR_100_150_MHZ 1 +#define NETSEC_GMAC_GAR_REG_CR_150_250_MHZ 4 +#define NETSEC_GMAC_GAR_REG_CR_250_300_MHZ 5 + +#define NETSEC_GMAC_RDLAR_REG_COMMON 0x18000 +#define NETSEC_GMAC_TDLAR_REG_COMMON 0x1c000 + +#define NETSEC_REG_NETSEC_VER_F_TAIKI 0x50000 + +#define NETSEC_REG_DESC_RING_CONFIG_CFG_UP BIT(31) +#define NETSEC_REG_DESC_RING_CONFIG_CH_RST BIT(30) +#define NETSEC_REG_DESC_TMR_MODE 4 +#define NETSEC_REG_DESC_ENDIAN 0 + +#define NETSEC_MAC_DESC_SOFT_RST_SOFT_RST 1 +#define NETSEC_MAC_DESC_INIT_REG_INIT 1 + +#define NETSEC_EEPROM_MAC_ADDRESS 0x00 +#define NETSEC_EEPROM_HM_ME_ADDRESS_H 0x08 +#define NETSEC_EEPROM_HM_ME_ADDRESS_L 0x0C +#define NETSEC_EEPROM_HM_ME_SIZE 0x10 +#define NETSEC_EEPROM_MH_ME_ADDRESS_H 0x14 +#define NETSEC_EEPROM_MH_ME_ADDRESS_L 0x18 +#define NETSEC_EEPROM_MH_ME_SIZE 0x1C +#define NETSEC_EEPROM_PKT_ME_ADDRESS 0x20 +#define NETSEC_EEPROM_PKT_ME_SIZE 0x24 + +#define DESC_NUM 128 +#define NAPI_BUDGET (DESC_NUM / 2) + +#define DESC_SZ sizeof(struct netsec_de) + +#define NETSEC_F_NETSEC_VER_MAJOR_NUM(x) ((x) & 0xffff0000) + +enum ring_id { + NETSEC_RING_TX = 0, + NETSEC_RING_RX +}; + +struct netsec_desc { + struct sk_buff *skb; + dma_addr_t dma_addr; + void *addr; + u16 len; +}; + +struct netsec_desc_ring { + dma_addr_t desc_dma; + struct netsec_desc *desc; + void *vaddr; + u16 pkt_cnt; + u16 head, tail; +}; + +struct netsec_priv { + struct netsec_desc_ring desc_ring[NETSEC_RING_MAX]; + struct ethtool_coalesce et_coalesce; + spinlock_t reglock; /* protect reg access */ + struct napi_struct napi; + phy_interface_t phy_interface; + struct net_device *ndev; + struct device_node *phy_np; + struct phy_device *phydev; + struct mii_bus *mii_bus; + void __iomem *ioaddr; + void __iomem *eeprom_base; + struct device *dev; + struct clk *clk; + u32 msg_enable; + u32 freq; + bool rx_cksum_offload_flag; +}; + +struct netsec_de { /* Netsec Descriptor layout */ + u32 attr; + u32 data_buf_addr_up; + u32 data_buf_addr_lw; + u32 buf_len_info; +}; + +struct netsec_tx_pkt_ctrl { + u16 tcp_seg_len; + bool tcp_seg_offload_flag; + bool cksum_offload_flag; +}; + +struct netsec_rx_pkt_info { + int rx_cksum_result; + int err_code; + bool err_flag; +}; + +static void netsec_write(struct netsec_priv *priv, u32 reg_addr, u32 val) +{ + writel(val, priv->ioaddr + reg_addr); +} + +static u32 netsec_read(struct netsec_priv *priv, u32 reg_addr) +{ + return readl(priv->ioaddr + reg_addr); +} + +/************* MDIO BUS OPS FOLLOW *************/ + +#define TIMEOUT_SPINS_MAC 1000 +#define TIMEOUT_SECONDARY_MS_MAC 100 + +static u32 netsec_clk_type(u32 freq) +{ + if (freq < MHZ(35)) + return NETSEC_GMAC_GAR_REG_CR_25_35_MHZ; + if (freq < MHZ(60)) + return NETSEC_GMAC_GAR_REG_CR_35_60_MHZ; + if (freq < MHZ(100)) + return NETSEC_GMAC_GAR_REG_CR_60_100_MHZ; + if (freq < MHZ(150)) + return NETSEC_GMAC_GAR_REG_CR_100_150_MHZ; + if (freq < MHZ(250)) + return NETSEC_GMAC_GAR_REG_CR_150_250_MHZ; + + return NETSEC_GMAC_GAR_REG_CR_250_300_MHZ; +} + +static int netsec_wait_while_busy(struct netsec_priv *priv, u32 addr, u32 mask) +{ + u32 timeout = TIMEOUT_SPINS_MAC; + + while (--timeout && netsec_read(priv, addr) & mask) + cpu_relax(); + if (timeout) + return 0; + + timeout = TIMEOUT_SECONDARY_MS_MAC; + while (--timeout && netsec_read(priv, addr) & mask) + usleep_range(1000, 2000); + + if (timeout) + return 0; + + netdev_WARN(priv->ndev, "%s: timeout\n", __func__); + + return -ETIMEDOUT; +} + +static int netsec_mac_write(struct netsec_priv *priv, u32 addr, u32 value) +{ + netsec_write(priv, MAC_REG_DATA, value); + netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_WRITE); + return netsec_wait_while_busy(priv, + MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY); +} + +static int netsec_mac_read(struct netsec_priv *priv, u32 addr, u32 *read) +{ + int ret; + + netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_READ); + ret = netsec_wait_while_busy(priv, + MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY); + if (ret) + return ret; + + *read = netsec_read(priv, MAC_REG_DATA); + + return 0; +} + +static int netsec_mac_wait_while_busy(struct netsec_priv *priv, + u32 addr, u32 mask) +{ + u32 timeout = TIMEOUT_SPINS_MAC; + int ret, data; + + do { + ret = netsec_mac_read(priv, addr, &data); + if (ret) + break; + cpu_relax(); + } while (--timeout && (data & mask)); + + if (timeout) + return 0; + + timeout = TIMEOUT_SECONDARY_MS_MAC; + do { + usleep_range(1000, 2000); + + ret = netsec_mac_read(priv, addr, &data); + if (ret) + break; + cpu_relax(); + } while (--timeout && (data & mask)); + + if (timeout && !ret) + return 0; + + netdev_WARN(priv->ndev, "%s: timeout\n", __func__); + + return -ETIMEDOUT; +} + +static int netsec_mac_update_to_phy_state(struct netsec_priv *priv) +{ + struct phy_device *phydev = priv->ndev->phydev; + u32 value = 0; + + value = phydev->duplex ? NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON : + NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON; + + if (phydev->speed != SPEED_1000) + value |= NETSEC_MCR_PS; + + if (priv->phy_interface != PHY_INTERFACE_MODE_GMII && + phydev->speed == SPEED_100) + value |= NETSEC_GMAC_MCR_REG_FES; + + value |= NETSEC_GMAC_MCR_REG_CST | NETSEC_GMAC_MCR_REG_JE; + + if (phy_interface_mode_is_rgmii(priv->phy_interface)) + value |= NETSEC_GMAC_MCR_REG_IBN; + + if (netsec_mac_write(priv, GMAC_REG_MCR, value)) + return -ETIMEDOUT; + + return 0; +} + +static int netsec_phy_write(struct mii_bus *bus, + int phy_addr, int reg, u16 val) +{ + struct netsec_priv *priv = bus->priv; + + if (netsec_mac_write(priv, GMAC_REG_GDR, val)) + return -ETIMEDOUT; + if (netsec_mac_write(priv, GMAC_REG_GAR, + phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA | + reg << NETSEC_GMAC_GAR_REG_SHIFT_GR | + NETSEC_GMAC_GAR_REG_GW | NETSEC_GMAC_GAR_REG_GB | + (netsec_clk_type(priv->freq) << + GMAC_REG_SHIFT_CR_GAR))) + return -ETIMEDOUT; + + return netsec_mac_wait_while_busy(priv, GMAC_REG_GAR, + NETSEC_GMAC_GAR_REG_GB); +} + +static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr) +{ + struct netsec_priv *priv = bus->priv; + u32 data; + int ret; + + if (netsec_mac_write(priv, GMAC_REG_GAR, NETSEC_GMAC_GAR_REG_GB | + phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA | + reg_addr << NETSEC_GMAC_GAR_REG_SHIFT_GR | + (netsec_clk_type(priv->freq) << + GMAC_REG_SHIFT_CR_GAR))) + return -ETIMEDOUT; + + ret = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR, + NETSEC_GMAC_GAR_REG_GB); + if (ret) + return ret; + + ret = netsec_mac_read(priv, GMAC_REG_GDR, &data); + if (ret) + return ret; + + return data; +} + +/************* ETHTOOL_OPS FOLLOW *************/ + +static void netsec_et_get_drvinfo(struct net_device *net_device, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, "netsec", sizeof(info->driver)); + strlcpy(info->bus_info, dev_name(net_device->dev.parent), + sizeof(info->bus_info)); +} + +static int netsec_et_get_coalesce(struct net_device *net_device, + struct ethtool_coalesce *et_coalesce) +{ + struct netsec_priv *priv = netdev_priv(net_device); + + *et_coalesce = priv->et_coalesce; + + return 0; +} + +static int netsec_et_set_coalesce(struct net_device *net_device, + struct ethtool_coalesce *et_coalesce) +{ + struct netsec_priv *priv = netdev_priv(net_device); + + priv->et_coalesce = *et_coalesce; + + if (priv->et_coalesce.tx_coalesce_usecs < 50) + priv->et_coalesce.tx_coalesce_usecs = 50; + if (priv->et_coalesce.tx_max_coalesced_frames < 1) + priv->et_coalesce.tx_max_coalesced_frames = 1; + + netsec_write(priv, NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT, + priv->et_coalesce.tx_max_coalesced_frames); + netsec_write(priv, NETSEC_REG_NRM_TX_TXINT_TMR, + priv->et_coalesce.tx_coalesce_usecs); + netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TXDONE); + netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TMREXP); + + if (priv->et_coalesce.rx_coalesce_usecs < 50) + priv->et_coalesce.rx_coalesce_usecs = 50; + if (priv->et_coalesce.rx_max_coalesced_frames < 1) + priv->et_coalesce.rx_max_coalesced_frames = 1; + + netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_PKTCNT, + priv->et_coalesce.rx_max_coalesced_frames); + netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_TMR, + priv->et_coalesce.rx_coalesce_usecs); + netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_PKTCNT); + netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_TMREXP); + + return 0; +} + +static u32 netsec_et_get_msglevel(struct net_device *dev) +{ + struct netsec_priv *priv = netdev_priv(dev); + + return priv->msg_enable; +} + +static void netsec_et_set_msglevel(struct net_device *dev, u32 datum) +{ + struct netsec_priv *priv = netdev_priv(dev); + + priv->msg_enable = datum; +} + +static const struct ethtool_ops netsec_ethtool_ops = { + .get_drvinfo = netsec_et_get_drvinfo, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_link = ethtool_op_get_link, + .get_coalesce = netsec_et_get_coalesce, + .set_coalesce = netsec_et_set_coalesce, + .get_msglevel = netsec_et_get_msglevel, + .set_msglevel = netsec_et_set_msglevel, +}; + +/************* NETDEV_OPS FOLLOW *************/ + +static struct sk_buff *netsec_alloc_skb(struct netsec_priv *priv, + struct netsec_desc *desc) +{ + struct sk_buff *skb; + + if (device_get_dma_attr(priv->dev) == DEV_DMA_COHERENT) { + skb = netdev_alloc_skb_ip_align(priv->ndev, desc->len); + } else { + desc->len = L1_CACHE_ALIGN(desc->len); + skb = netdev_alloc_skb(priv->ndev, desc->len); + } + if (!skb) + return NULL; + + desc->addr = skb->data; + desc->dma_addr = dma_map_single(priv->dev, desc->addr, desc->len, + DMA_FROM_DEVICE); + if (dma_mapping_error(priv->dev, desc->dma_addr)) { + dev_kfree_skb_any(skb); + return NULL; + } + return skb; +} + +static void netsec_set_rx_de(struct netsec_priv *priv, + struct netsec_desc_ring *dring, u16 idx, + const struct netsec_desc *desc, + struct sk_buff *skb) +{ + struct netsec_de *de = dring->vaddr + DESC_SZ * idx; + u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) | + (1 << NETSEC_RX_PKT_FS_FIELD) | + (1 << NETSEC_RX_PKT_LS_FIELD); + + if (idx == DESC_NUM - 1) + attr |= (1 << NETSEC_RX_PKT_LD_FIELD); + + de->data_buf_addr_up = upper_32_bits(desc->dma_addr); + de->data_buf_addr_lw = lower_32_bits(desc->dma_addr); + de->buf_len_info = desc->len; + de->attr = attr; + dma_wmb(); + + dring->desc[idx].dma_addr = desc->dma_addr; + dring->desc[idx].addr = desc->addr; + dring->desc[idx].len = desc->len; + dring->desc[idx].skb = skb; +} + +static struct sk_buff *netsec_get_rx_de(struct netsec_priv *priv, + struct netsec_desc_ring *dring, + u16 idx, + struct netsec_rx_pkt_info *rxpi, + struct netsec_desc *desc, u16 *len) +{ + struct netsec_de de = {}; + + memcpy(&de, dring->vaddr + DESC_SZ * idx, DESC_SZ); + + *len = de.buf_len_info >> 16; + + rxpi->err_flag = (de.attr >> NETSEC_RX_PKT_ER_FIELD) & 1; + rxpi->rx_cksum_result = (de.attr >> NETSEC_RX_PKT_CO_FIELD) & 3; + rxpi->err_code = (de.attr >> NETSEC_RX_PKT_ERR_FIELD) & + NETSEC_RX_PKT_ERR_MASK; + *desc = dring->desc[idx]; + return desc->skb; +} + +static struct sk_buff *netsec_get_rx_pkt_data(struct netsec_priv *priv, + struct netsec_rx_pkt_info *rxpi, + struct netsec_desc *desc, + u16 *len) +{ + struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; + struct sk_buff *tmp_skb, *skb = NULL; + struct netsec_desc td; + int tail; + + *rxpi = (struct netsec_rx_pkt_info){}; + + td.len = priv->ndev->mtu + 22; + + tmp_skb = netsec_alloc_skb(priv, &td); + + dma_rmb(); + + tail = dring->tail; + + if (!tmp_skb) { + netsec_set_rx_de(priv, dring, tail, &dring->desc[tail], + dring->desc[tail].skb); + } else { + skb = netsec_get_rx_de(priv, dring, tail, rxpi, desc, len); + netsec_set_rx_de(priv, dring, tail, &td, tmp_skb); + } + + /* move tail ahead */ + dring->tail = (dring->tail + 1) % DESC_NUM; + + dring->pkt_cnt--; + + return skb; +} + +static int netsec_clean_tx_dring(struct netsec_priv *priv, int budget) +{ + struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; + unsigned int pkts, bytes; + + dring->pkt_cnt += netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT); + + if (dring->pkt_cnt < budget) + budget = dring->pkt_cnt; + + pkts = 0; + bytes = 0; + + while (pkts < budget) { + struct netsec_desc *desc; + struct netsec_de *entry; + int tail, eop; + + tail = dring->tail; + + /* move tail ahead */ + dring->tail = (tail + 1) % DESC_NUM; + + desc = &dring->desc[tail]; + entry = dring->vaddr + DESC_SZ * tail; + + eop = (entry->attr >> NETSEC_TX_LAST) & 1; + + dma_unmap_single(priv->dev, desc->dma_addr, desc->len, + DMA_TO_DEVICE); + if (eop) { + pkts++; + bytes += desc->skb->len; + dev_kfree_skb(desc->skb); + } + *desc = (struct netsec_desc){}; + } + dring->pkt_cnt -= budget; + + priv->ndev->stats.tx_packets += budget; + priv->ndev->stats.tx_bytes += bytes; + + netdev_completed_queue(priv->ndev, budget, bytes); + + return budget; +} + +static int netsec_process_tx(struct netsec_priv *priv, int budget) +{ + struct net_device *ndev = priv->ndev; + int new, done = 0; + + do { + new = netsec_clean_tx_dring(priv, budget); + done += new; + budget -= new; + } while (new); + + if (done && netif_queue_stopped(ndev)) + netif_wake_queue(ndev); + + return done; +} + +static int netsec_process_rx(struct netsec_priv *priv, int budget) +{ + struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; + struct net_device *ndev = priv->ndev; + struct netsec_rx_pkt_info rx_info; + int done = 0, rx_num = 0; + struct netsec_desc desc; + struct sk_buff *skb; + u16 len; + + while (done < budget) { + if (!rx_num) { + rx_num = netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT); + dring->pkt_cnt += rx_num; + + /* move head 'rx_num' */ + dring->head = (dring->head + rx_num) % DESC_NUM; + + rx_num = dring->pkt_cnt; + if (!rx_num) + break; + } + done++; + rx_num--; + skb = netsec_get_rx_pkt_data(priv, &rx_info, &desc, &len); + if (unlikely(!skb) || rx_info.err_flag) { + netif_err(priv, drv, priv->ndev, + "%s: rx fail err(%d)\n", + __func__, rx_info.err_code); + ndev->stats.rx_dropped++; + continue; + } + + dma_unmap_single(priv->dev, desc.dma_addr, desc.len, + DMA_FROM_DEVICE); + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, priv->ndev); + + if (priv->rx_cksum_offload_flag && + rx_info.rx_cksum_result == NETSEC_RX_CKSUM_OK) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (napi_gro_receive(&priv->napi, skb) != GRO_DROP) { + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += len; + } + } + + return done; +} + +static int netsec_napi_poll(struct napi_struct *napi, int budget) +{ + struct netsec_priv *priv; + struct net_device *ndev; + int tx, rx, done, todo; + + priv = container_of(napi, struct netsec_priv, napi); + ndev = priv->ndev; + + todo = budget; + do { + if (!todo) + break; + + tx = netsec_process_tx(priv, todo); + todo -= tx; + + if (!todo) + break; + + rx = netsec_process_rx(priv, todo); + todo -= rx; + } while (rx || tx); + + done = budget - todo; + + if (done < budget && napi_complete_done(napi, done)) { + unsigned long flags; + + spin_lock_irqsave(&priv->reglock, flags); + netsec_write(priv, NETSEC_REG_INTEN_SET, + NETSEC_IRQ_RX | NETSEC_IRQ_TX); + spin_unlock_irqrestore(&priv->reglock, flags); + } + + return done; +} + +static void netsec_set_tx_de(struct netsec_priv *priv, + struct netsec_desc_ring *dring, + const struct netsec_tx_pkt_ctrl *tx_ctrl, + const struct netsec_desc *desc, + struct sk_buff *skb) +{ + int idx = dring->head; + struct netsec_de *de; + u32 attr; + + de = dring->vaddr + (DESC_SZ * idx); + + attr = (1 << NETSEC_TX_SHIFT_OWN_FIELD) | + (1 << NETSEC_TX_SHIFT_PT_FIELD) | + (NETSEC_RING_GMAC << NETSEC_TX_SHIFT_TDRID_FIELD) | + (1 << NETSEC_TX_SHIFT_FS_FIELD) | + (1 << NETSEC_TX_LAST) | + (tx_ctrl->cksum_offload_flag << NETSEC_TX_SHIFT_CO) | + (tx_ctrl->tcp_seg_offload_flag << NETSEC_TX_SHIFT_SO) | + (1 << NETSEC_TX_SHIFT_TRS_FIELD); + if (idx == DESC_NUM - 1) + attr |= (1 << NETSEC_TX_SHIFT_LD_FIELD); + + de->data_buf_addr_up = upper_32_bits(desc->dma_addr); + de->data_buf_addr_lw = lower_32_bits(desc->dma_addr); + de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len; + de->attr = attr; + dma_wmb(); + + dring->desc[idx] = *desc; + dring->desc[idx].skb = skb; + + /* move head ahead */ + dring->head = (dring->head + 1) % DESC_NUM; +} + +static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + struct netsec_priv *priv = netdev_priv(ndev); + struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; + struct netsec_tx_pkt_ctrl tx_ctrl = {}; + struct netsec_desc tx_desc; + u16 tso_seg_len = 0; + int filled; + + /* differentiate between full/emtpy ring */ + if (dring->head >= dring->tail) + filled = dring->head - dring->tail; + else + filled = dring->head + DESC_NUM - dring->tail; + + if (DESC_NUM - filled < 2) { /* if less than 2 available */ + netif_err(priv, drv, priv->ndev, "%s: TxQFull!\n", __func__); + netif_stop_queue(priv->ndev); + dma_wmb(); + return NETDEV_TX_BUSY; + } + + if (skb->ip_summed == CHECKSUM_PARTIAL) + tx_ctrl.cksum_offload_flag = true; + + if (skb_is_gso(skb)) + tso_seg_len = skb_shinfo(skb)->gso_size; + + if (tso_seg_len > 0) { + if (skb->protocol == htons(ETH_P_IP)) { + ip_hdr(skb)->tot_len = 0; + tcp_hdr(skb)->check = + ~tcp_v4_check(0, ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, 0); + } else { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + } + + tx_ctrl.tcp_seg_offload_flag = true; + tx_ctrl.tcp_seg_len = tso_seg_len; + } + + tx_desc.dma_addr = dma_map_single(priv->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) { + netif_err(priv, drv, priv->ndev, + "%s: DMA mapping failed\n", __func__); + ndev->stats.tx_dropped++; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + tx_desc.addr = skb->data; + tx_desc.len = skb_headlen(skb); + + skb_tx_timestamp(skb); + netdev_sent_queue(priv->ndev, skb->len); + + netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb); + netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */ + + return NETDEV_TX_OK; +} + +static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id) +{ + struct netsec_desc_ring *dring = &priv->desc_ring[id]; + struct netsec_desc *desc; + u16 idx; + + if (!dring->vaddr || !dring->desc) + return; + + for (idx = 0; idx < DESC_NUM; idx++) { + desc = &dring->desc[idx]; + if (!desc->addr) + continue; + + dma_unmap_single(priv->dev, desc->dma_addr, desc->len, + id == NETSEC_RING_RX ? DMA_FROM_DEVICE : + DMA_TO_DEVICE); + dev_kfree_skb(desc->skb); + } + + memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM); + memset(dring->vaddr, 0, DESC_SZ * DESC_NUM); + + dring->head = 0; + dring->tail = 0; + dring->pkt_cnt = 0; +} + +static void netsec_free_dring(struct netsec_priv *priv, int id) +{ + struct netsec_desc_ring *dring = &priv->desc_ring[id]; + + if (dring->vaddr) { + dma_free_coherent(priv->dev, DESC_SZ * DESC_NUM, + dring->vaddr, dring->desc_dma); + dring->vaddr = NULL; + } + + kfree(dring->desc); + dring->desc = NULL; +} + +static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id) +{ + struct netsec_desc_ring *dring = &priv->desc_ring[id]; + int ret = 0; + + dring->vaddr = dma_zalloc_coherent(priv->dev, DESC_SZ * DESC_NUM, + &dring->desc_dma, GFP_KERNEL); + if (!dring->vaddr) { + ret = -ENOMEM; + goto err; + } + + dring->desc = kzalloc(DESC_NUM * sizeof(*dring->desc), GFP_KERNEL); + if (!dring->desc) { + ret = -ENOMEM; + goto err; + } + + return 0; +err: + netsec_free_dring(priv, id); + + return ret; +} + +static int netsec_setup_rx_dring(struct netsec_priv *priv) +{ + struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; + struct netsec_desc desc; + struct sk_buff *skb; + int n; + + desc.len = priv->ndev->mtu + 22; + + for (n = 0; n < DESC_NUM; n++) { + skb = netsec_alloc_skb(priv, &desc); + if (!skb) { + netsec_uninit_pkt_dring(priv, NETSEC_RING_RX); + return -ENOMEM; + } + netsec_set_rx_de(priv, dring, n, &desc, skb); + } + + return 0; +} + +static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg, + u32 addr_h, u32 addr_l, u32 size) +{ + u64 base = (u64)addr_h << 32 | addr_l; + void __iomem *ucode; + u32 i; + + ucode = ioremap(base, size * sizeof(u32)); + if (!ucode) + return -ENOMEM; + + for (i = 0; i < size; i++) + netsec_write(priv, reg, readl(ucode + i * 4)); + + iounmap(ucode); + return 0; +} + +static int netsec_netdev_load_microcode(struct netsec_priv *priv) +{ + u32 addr_h, addr_l, size; + int err; + + addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_H); + addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_L); + size = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_SIZE); + err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_HM_CMD_BUF, + addr_h, addr_l, size); + if (err) + return err; + + addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_H); + addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_L); + size = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_SIZE); + err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_MH_CMD_BUF, + addr_h, addr_l, size); + if (err) + return err; + + addr_h = 0; + addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_ADDRESS); + size = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_SIZE); + err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_PKT_CMD_BUF, + addr_h, addr_l, size); + if (err) + return err; + + return 0; +} + +static int netsec_reset_hardware(struct netsec_priv *priv) +{ + u32 value; + int err; + + /* stop DMA engines */ + if (!netsec_read(priv, NETSEC_REG_ADDR_DIS_CORE)) { + netsec_write(priv, NETSEC_REG_DMA_HM_CTRL, + NETSEC_DMA_CTRL_REG_STOP); + netsec_write(priv, NETSEC_REG_DMA_MH_CTRL, + NETSEC_DMA_CTRL_REG_STOP); + + while (netsec_read(priv, NETSEC_REG_DMA_HM_CTRL) & + NETSEC_DMA_CTRL_REG_STOP) + cpu_relax(); + + while (netsec_read(priv, NETSEC_REG_DMA_MH_CTRL) & + NETSEC_DMA_CTRL_REG_STOP) + cpu_relax(); + } + + netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RESET); + netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RUN); + netsec_write(priv, NETSEC_REG_COM_INIT, NETSEC_COM_INIT_REG_ALL); + + while (netsec_read(priv, NETSEC_REG_COM_INIT) != 0) + cpu_relax(); + + /* set desc_start addr */ + netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_UP, + upper_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma)); + netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_LW, + lower_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma)); + + netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_UP, + upper_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma)); + netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_LW, + lower_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma)); + + /* set normal tx dring ring config */ + netsec_write(priv, NETSEC_REG_NRM_TX_CONFIG, + 1 << NETSEC_REG_DESC_ENDIAN); + netsec_write(priv, NETSEC_REG_NRM_RX_CONFIG, + 1 << NETSEC_REG_DESC_ENDIAN); + + err = netsec_netdev_load_microcode(priv); + if (err) { + netif_err(priv, probe, priv->ndev, + "%s: failed to load microcode (%d)\n", __func__, err); + return err; + } + + /* start DMA engines */ + netsec_write(priv, NETSEC_REG_DMA_TMR_CTRL, priv->freq / 1000000 - 1); + netsec_write(priv, NETSEC_REG_ADDR_DIS_CORE, 0); + + usleep_range(1000, 2000); + + if (!(netsec_read(priv, NETSEC_REG_TOP_STATUS) & + NETSEC_TOP_IRQ_REG_CODE_LOAD_END)) { + netif_err(priv, probe, priv->ndev, + "microengine start failed\n"); + return -ENXIO; + } + netsec_write(priv, NETSEC_REG_TOP_STATUS, + NETSEC_TOP_IRQ_REG_CODE_LOAD_END); + + value = NETSEC_PKT_CTRL_REG_MODE_NRM; + if (priv->ndev->mtu > ETH_DATA_LEN) + value |= NETSEC_PKT_CTRL_REG_EN_JUMBO; + + /* change to normal mode */ + netsec_write(priv, NETSEC_REG_DMA_MH_CTRL, MH_CTRL__MODE_TRANS); + netsec_write(priv, NETSEC_REG_PKT_CTRL, value); + + while ((netsec_read(priv, NETSEC_REG_MODE_TRANS_COMP_STATUS) & + NETSEC_MODE_TRANS_COMP_IRQ_T2N) == 0) + cpu_relax(); + + /* clear any pending EMPTY/ERR irq status */ + netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, ~0); + + /* Disable TX & RX intr */ + netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0); + + return 0; +} + +static int netsec_start_gmac(struct netsec_priv *priv) +{ + struct phy_device *phydev = priv->ndev->phydev; + u32 value = 0; + int ret; + + if (phydev->speed != SPEED_1000) + value = (NETSEC_GMAC_MCR_REG_CST | + NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON); + + if (netsec_mac_write(priv, GMAC_REG_MCR, value)) + return -ETIMEDOUT; + if (netsec_mac_write(priv, GMAC_REG_BMR, + NETSEC_GMAC_BMR_REG_RESET)) + return -ETIMEDOUT; + + /* Wait soft reset */ + usleep_range(1000, 5000); + + ret = netsec_mac_read(priv, GMAC_REG_BMR, &value); + if (ret) + return ret; + if (value & NETSEC_GMAC_BMR_REG_SWR) + return -EAGAIN; + + netsec_write(priv, MAC_REG_DESC_SOFT_RST, 1); + if (netsec_wait_while_busy(priv, MAC_REG_DESC_SOFT_RST, 1)) + return -ETIMEDOUT; + + netsec_write(priv, MAC_REG_DESC_INIT, 1); + if (netsec_wait_while_busy(priv, MAC_REG_DESC_INIT, 1)) + return -ETIMEDOUT; + + if (netsec_mac_write(priv, GMAC_REG_BMR, + NETSEC_GMAC_BMR_REG_COMMON)) + return -ETIMEDOUT; + if (netsec_mac_write(priv, GMAC_REG_RDLAR, + NETSEC_GMAC_RDLAR_REG_COMMON)) + return -ETIMEDOUT; + if (netsec_mac_write(priv, GMAC_REG_TDLAR, + NETSEC_GMAC_TDLAR_REG_COMMON)) + return -ETIMEDOUT; + if (netsec_mac_write(priv, GMAC_REG_MFFR, 0x80000001)) + return -ETIMEDOUT; + + ret = netsec_mac_update_to_phy_state(priv); + if (ret) + return ret; + + ret = netsec_mac_read(priv, GMAC_REG_OMR, &value); + if (ret) + return ret; + + value |= NETSEC_GMAC_OMR_REG_SR; + value |= NETSEC_GMAC_OMR_REG_ST; + + netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0); + netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0); + + netsec_et_set_coalesce(priv->ndev, &priv->et_coalesce); + + if (netsec_mac_write(priv, GMAC_REG_OMR, value)) + return -ETIMEDOUT; + + return 0; +} + +static int netsec_stop_gmac(struct netsec_priv *priv) +{ + u32 value; + int ret; + + ret = netsec_mac_read(priv, GMAC_REG_OMR, &value); + if (ret) + return ret; + value &= ~NETSEC_GMAC_OMR_REG_SR; + value &= ~NETSEC_GMAC_OMR_REG_ST; + + /* disable all interrupts */ + netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0); + netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0); + + return netsec_mac_write(priv, GMAC_REG_OMR, value); +} + +static void netsec_phy_adjust_link(struct net_device *ndev) +{ + struct netsec_priv *priv = netdev_priv(ndev); + + if (ndev->phydev->link) + netsec_start_gmac(priv); + else + netsec_stop_gmac(priv); + + phy_print_status(ndev->phydev); +} + +static irqreturn_t netsec_irq_handler(int irq, void *dev_id) +{ + struct netsec_priv *priv = dev_id; + u32 val, status = netsec_read(priv, NETSEC_REG_TOP_STATUS); + unsigned long flags; + + /* Disable interrupts */ + if (status & NETSEC_IRQ_TX) { + val = netsec_read(priv, NETSEC_REG_NRM_TX_STATUS); + netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, val); + } + if (status & NETSEC_IRQ_RX) { + val = netsec_read(priv, NETSEC_REG_NRM_RX_STATUS); + netsec_write(priv, NETSEC_REG_NRM_RX_STATUS, val); + } + + spin_lock_irqsave(&priv->reglock, flags); + netsec_write(priv, NETSEC_REG_INTEN_CLR, NETSEC_IRQ_RX | NETSEC_IRQ_TX); + spin_unlock_irqrestore(&priv->reglock, flags); + + napi_schedule(&priv->napi); + + return IRQ_HANDLED; +} + +static int netsec_netdev_open(struct net_device *ndev) +{ + struct netsec_priv *priv = netdev_priv(ndev); + int ret; + + pm_runtime_get_sync(priv->dev); + + ret = netsec_setup_rx_dring(priv); + if (ret) { + netif_err(priv, probe, priv->ndev, + "%s: fail setup ring\n", __func__); + goto err1; + } + + ret = request_irq(priv->ndev->irq, netsec_irq_handler, + IRQF_SHARED, "netsec", priv); + if (ret) { + netif_err(priv, drv, priv->ndev, "request_irq failed\n"); + goto err2; + } + + if (dev_of_node(priv->dev)) { + if (!of_phy_connect(priv->ndev, priv->phy_np, + netsec_phy_adjust_link, 0, + priv->phy_interface)) { + netif_err(priv, link, priv->ndev, "missing PHY\n"); + ret = -ENODEV; + goto err3; + } + } else { + ret = phy_connect_direct(priv->ndev, priv->phydev, + netsec_phy_adjust_link, + priv->phy_interface); + if (ret) { + netif_err(priv, link, priv->ndev, + "phy_connect_direct() failed (%d)\n", ret); + goto err3; + } + } + + phy_start(ndev->phydev); + + netsec_start_gmac(priv); + napi_enable(&priv->napi); + netif_start_queue(ndev); + + /* Enable RX intr. */ + netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX); + + return 0; +err3: + free_irq(priv->ndev->irq, priv); +err2: + netsec_uninit_pkt_dring(priv, NETSEC_RING_RX); +err1: + pm_runtime_put_sync(priv->dev); + return ret; +} + +static int netsec_netdev_stop(struct net_device *ndev) +{ + struct netsec_priv *priv = netdev_priv(ndev); + + netif_stop_queue(priv->ndev); + dma_wmb(); + + napi_disable(&priv->napi); + + netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0); + netsec_stop_gmac(priv); + + free_irq(priv->ndev->irq, priv); + + netsec_uninit_pkt_dring(priv, NETSEC_RING_TX); + netsec_uninit_pkt_dring(priv, NETSEC_RING_RX); + + phy_stop(ndev->phydev); + phy_disconnect(ndev->phydev); + + pm_runtime_put_sync(priv->dev); + + return 0; +} + +static int netsec_netdev_init(struct net_device *ndev) +{ + struct netsec_priv *priv = netdev_priv(ndev); + int ret; + + ret = netsec_alloc_dring(priv, NETSEC_RING_TX); + if (ret) + return ret; + + ret = netsec_alloc_dring(priv, NETSEC_RING_RX); + if (ret) + goto err1; + + ret = netsec_reset_hardware(priv); + if (ret) + goto err2; + + return 0; +err2: + netsec_free_dring(priv, NETSEC_RING_RX); +err1: + netsec_free_dring(priv, NETSEC_RING_TX); + return ret; +} + +static void netsec_netdev_uninit(struct net_device *ndev) +{ + struct netsec_priv *priv = netdev_priv(ndev); + + netsec_free_dring(priv, NETSEC_RING_RX); + netsec_free_dring(priv, NETSEC_RING_TX); +} + +static int netsec_netdev_set_features(struct net_device *ndev, + netdev_features_t features) +{ + struct netsec_priv *priv = netdev_priv(ndev); + + priv->rx_cksum_offload_flag = !!(features & NETIF_F_RXCSUM); + + return 0; +} + +static int netsec_netdev_ioctl(struct net_device *ndev, struct ifreq *ifr, + int cmd) +{ + return phy_mii_ioctl(ndev->phydev, ifr, cmd); +} + +static const struct net_device_ops netsec_netdev_ops = { + .ndo_init = netsec_netdev_init, + .ndo_uninit = netsec_netdev_uninit, + .ndo_open = netsec_netdev_open, + .ndo_stop = netsec_netdev_stop, + .ndo_start_xmit = netsec_netdev_start_xmit, + .ndo_set_features = netsec_netdev_set_features, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = netsec_netdev_ioctl, +}; + +static int netsec_of_probe(struct platform_device *pdev, + struct netsec_priv *priv) +{ + priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); + if (!priv->phy_np) { + dev_err(&pdev->dev, "missing required property 'phy-handle'\n"); + return -EINVAL; + } + + priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */ + if (IS_ERR(priv->clk)) { + dev_err(&pdev->dev, "phy_ref_clk not found\n"); + return PTR_ERR(priv->clk); + } + priv->freq = clk_get_rate(priv->clk); + + return 0; +} + +static int netsec_acpi_probe(struct platform_device *pdev, + struct netsec_priv *priv, u32 *phy_addr) +{ + int ret; + + if (!IS_ENABLED(CONFIG_ACPI)) + return -ENODEV; + + ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr); + if (ret) { + dev_err(&pdev->dev, + "missing required property 'phy-channel'\n"); + return ret; + } + + ret = device_property_read_u32(&pdev->dev, + "socionext,phy-clock-frequency", + &priv->freq); + if (ret) + dev_err(&pdev->dev, + "missing required property 'socionext,phy-clock-frequency'\n"); + return ret; +} + +static void netsec_unregister_mdio(struct netsec_priv *priv) +{ + struct phy_device *phydev = priv->phydev; + + if (!dev_of_node(priv->dev) && phydev) { + phy_device_remove(phydev); + phy_device_free(phydev); + } + + mdiobus_unregister(priv->mii_bus); +} + +static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr) +{ + struct mii_bus *bus; + int ret; + + bus = devm_mdiobus_alloc(priv->dev); + if (!bus) + return -ENOMEM; + + snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(priv->dev)); + bus->priv = priv; + bus->name = "SNI NETSEC MDIO"; + bus->read = netsec_phy_read; + bus->write = netsec_phy_write; + bus->parent = priv->dev; + priv->mii_bus = bus; + + if (dev_of_node(priv->dev)) { + struct device_node *mdio_node, *parent = dev_of_node(priv->dev); + + mdio_node = of_get_child_by_name(parent, "mdio"); + if (mdio_node) { + parent = mdio_node; + } else { + /* older f/w doesn't populate the mdio subnode, + * allow relaxed upgrade of f/w in due time. + */ + dev_info(priv->dev, "Upgrade f/w for mdio subnode!\n"); + } + + ret = of_mdiobus_register(bus, parent); + of_node_put(mdio_node); + + if (ret) { + dev_err(priv->dev, "mdiobus register err(%d)\n", ret); + return ret; + } + } else { + /* Mask out all PHYs from auto probing. */ + bus->phy_mask = ~0; + ret = mdiobus_register(bus); + if (ret) { + dev_err(priv->dev, "mdiobus register err(%d)\n", ret); + return ret; + } + + priv->phydev = get_phy_device(bus, phy_addr, false); + if (IS_ERR(priv->phydev)) { + ret = PTR_ERR(priv->phydev); + dev_err(priv->dev, "get_phy_device err(%d)\n", ret); + priv->phydev = NULL; + return -ENODEV; + } + + ret = phy_device_register(priv->phydev); + if (ret) { + mdiobus_unregister(bus); + dev_err(priv->dev, + "phy_device_register err(%d)\n", ret); + } + } + + return ret; +} + +static int netsec_probe(struct platform_device *pdev) +{ + struct resource *mmio_res, *eeprom_res, *irq_res; + u8 *mac, macbuf[ETH_ALEN]; + struct netsec_priv *priv; + u32 hw_ver, phy_addr = 0; + struct net_device *ndev; + int ret; + + mmio_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mmio_res) { + dev_err(&pdev->dev, "No MMIO resource found.\n"); + return -ENODEV; + } + + eeprom_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!eeprom_res) { + dev_info(&pdev->dev, "No EEPROM resource found.\n"); + return -ENODEV; + } + + irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!irq_res) { + dev_err(&pdev->dev, "No IRQ resource found.\n"); + return -ENODEV; + } + + ndev = alloc_etherdev(sizeof(*priv)); + if (!ndev) + return -ENOMEM; + + priv = netdev_priv(ndev); + + spin_lock_init(&priv->reglock); + SET_NETDEV_DEV(ndev, &pdev->dev); + platform_set_drvdata(pdev, priv); + ndev->irq = irq_res->start; + priv->dev = &pdev->dev; + priv->ndev = ndev; + + priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV | + NETIF_MSG_LINK | NETIF_MSG_PROBE; + + priv->phy_interface = device_get_phy_mode(&pdev->dev); + if (priv->phy_interface < 0) { + dev_err(&pdev->dev, "missing required property 'phy-mode'\n"); + ret = -ENODEV; + goto free_ndev; + } + + priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start, + resource_size(mmio_res)); + if (!priv->ioaddr) { + dev_err(&pdev->dev, "devm_ioremap() failed\n"); + ret = -ENXIO; + goto free_ndev; + } + + priv->eeprom_base = devm_ioremap(&pdev->dev, eeprom_res->start, + resource_size(eeprom_res)); + if (!priv->eeprom_base) { + dev_err(&pdev->dev, "devm_ioremap() failed for EEPROM\n"); + ret = -ENXIO; + goto free_ndev; + } + + mac = device_get_mac_address(&pdev->dev, macbuf, sizeof(macbuf)); + if (mac) + ether_addr_copy(ndev->dev_addr, mac); + + if (priv->eeprom_base && + (!mac || !is_valid_ether_addr(ndev->dev_addr))) { + void __iomem *macp = priv->eeprom_base + + NETSEC_EEPROM_MAC_ADDRESS; + + ndev->dev_addr[0] = readb(macp + 3); + ndev->dev_addr[1] = readb(macp + 2); + ndev->dev_addr[2] = readb(macp + 1); + ndev->dev_addr[3] = readb(macp + 0); + ndev->dev_addr[4] = readb(macp + 7); + ndev->dev_addr[5] = readb(macp + 6); + } + + if (!is_valid_ether_addr(ndev->dev_addr)) { + dev_warn(&pdev->dev, "No MAC address found, using random\n"); + eth_hw_addr_random(ndev); + } + + if (dev_of_node(&pdev->dev)) + ret = netsec_of_probe(pdev, priv); + else + ret = netsec_acpi_probe(pdev, priv, &phy_addr); + if (ret) + goto free_ndev; + + if (!priv->freq) { + dev_err(&pdev->dev, "missing PHY reference clock frequency\n"); + ret = -ENODEV; + goto free_ndev; + } + + /* default for throughput */ + priv->et_coalesce.rx_coalesce_usecs = 500; + priv->et_coalesce.rx_max_coalesced_frames = 8; + priv->et_coalesce.tx_coalesce_usecs = 500; + priv->et_coalesce.tx_max_coalesced_frames = 8; + + ret = device_property_read_u32(&pdev->dev, "max-frame-size", + &ndev->max_mtu); + if (ret < 0) + ndev->max_mtu = ETH_DATA_LEN; + + /* runtime_pm coverage just for probe, open/close also cover it */ + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); + + hw_ver = netsec_read(priv, NETSEC_REG_F_TAIKI_VER); + /* this driver only supports F_TAIKI style NETSEC */ + if (NETSEC_F_NETSEC_VER_MAJOR_NUM(hw_ver) != + NETSEC_F_NETSEC_VER_MAJOR_NUM(NETSEC_REG_NETSEC_VER_F_TAIKI)) { + ret = -ENODEV; + goto pm_disable; + } + + dev_info(&pdev->dev, "hardware revision %d.%d\n", + hw_ver >> 16, hw_ver & 0xffff); + + netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_BUDGET); + + ndev->netdev_ops = &netsec_netdev_ops; + ndev->ethtool_ops = &netsec_ethtool_ops; + + ndev->features |= NETIF_F_HIGHDMA | NETIF_F_RXCSUM | NETIF_F_GSO | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + ndev->hw_features = ndev->features; + + priv->rx_cksum_offload_flag = true; + + ret = netsec_register_mdio(priv, phy_addr); + if (ret) + goto unreg_napi; + + if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) + dev_warn(&pdev->dev, "Failed to enable 64-bit DMA\n"); + + ret = register_netdev(ndev); + if (ret) { + netif_err(priv, probe, ndev, "register_netdev() failed\n"); + goto unreg_mii; + } + + pm_runtime_put_sync(&pdev->dev); + return 0; + +unreg_mii: + netsec_unregister_mdio(priv); +unreg_napi: + netif_napi_del(&priv->napi); +pm_disable: + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); +free_ndev: + free_netdev(ndev); + dev_err(&pdev->dev, "init failed\n"); + + return ret; +} + +static int netsec_remove(struct platform_device *pdev) +{ + struct netsec_priv *priv = platform_get_drvdata(pdev); + + unregister_netdev(priv->ndev); + + netsec_unregister_mdio(priv); + + netif_napi_del(&priv->napi); + + pm_runtime_disable(&pdev->dev); + free_netdev(priv->ndev); + + return 0; +} + +#ifdef CONFIG_PM +static int netsec_runtime_suspend(struct device *dev) +{ + struct netsec_priv *priv = dev_get_drvdata(dev); + + netsec_write(priv, NETSEC_REG_CLK_EN, 0); + + clk_disable_unprepare(priv->clk); + + return 0; +} + +static int netsec_runtime_resume(struct device *dev) +{ + struct netsec_priv *priv = dev_get_drvdata(dev); + + clk_prepare_enable(priv->clk); + + netsec_write(priv, NETSEC_REG_CLK_EN, NETSEC_CLK_EN_REG_DOM_D | + NETSEC_CLK_EN_REG_DOM_C | + NETSEC_CLK_EN_REG_DOM_G); + return 0; +} +#endif + +static const struct dev_pm_ops netsec_pm_ops = { + SET_RUNTIME_PM_OPS(netsec_runtime_suspend, netsec_runtime_resume, NULL) +}; + +static const struct of_device_id netsec_dt_ids[] = { + { .compatible = "socionext,synquacer-netsec" }, + { } +}; +MODULE_DEVICE_TABLE(of, netsec_dt_ids); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id netsec_acpi_ids[] = { + { "SCX0001" }, + { } +}; +MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids); +#endif + +static struct platform_driver netsec_driver = { + .probe = netsec_probe, + .remove = netsec_remove, + .driver = { + .name = "netsec", + .pm = &netsec_pm_ops, + .of_match_table = netsec_dt_ids, + .acpi_match_table = ACPI_PTR(netsec_acpi_ids), + }, +}; +module_platform_driver(netsec_driver); + +MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>"); +MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); +MODULE_DESCRIPTION("NETSEC Ethernet driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c new file mode 100644 index 000000000000..111e7ca9df56 --- /dev/null +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -0,0 +1,1736 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * sni_ave.c - Socionext UniPhier AVE ethernet driver + * Copyright 2014 Panasonic Corporation + * Copyright 2015-2017 Socionext Inc. + */ + +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/etherdevice.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/mii.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/of_net.h> +#include <linux/of_mdio.h> +#include <linux/of_platform.h> +#include <linux/phy.h> +#include <linux/reset.h> +#include <linux/types.h> +#include <linux/u64_stats_sync.h> + +/* General Register Group */ +#define AVE_IDR 0x000 /* ID */ +#define AVE_VR 0x004 /* Version */ +#define AVE_GRR 0x008 /* Global Reset */ +#define AVE_CFGR 0x00c /* Configuration */ + +/* Interrupt Register Group */ +#define AVE_GIMR 0x100 /* Global Interrupt Mask */ +#define AVE_GISR 0x104 /* Global Interrupt Status */ + +/* MAC Register Group */ +#define AVE_TXCR 0x200 /* TX Setup */ +#define AVE_RXCR 0x204 /* RX Setup */ +#define AVE_RXMAC1R 0x208 /* MAC address (lower) */ +#define AVE_RXMAC2R 0x20c /* MAC address (upper) */ +#define AVE_MDIOCTR 0x214 /* MDIO Control */ +#define AVE_MDIOAR 0x218 /* MDIO Address */ +#define AVE_MDIOWDR 0x21c /* MDIO Data */ +#define AVE_MDIOSR 0x220 /* MDIO Status */ +#define AVE_MDIORDR 0x224 /* MDIO Rd Data */ + +/* Descriptor Control Register Group */ +#define AVE_DESCC 0x300 /* Descriptor Control */ +#define AVE_TXDC 0x304 /* TX Descriptor Configuration */ +#define AVE_RXDC0 0x308 /* RX Descriptor Ring0 Configuration */ +#define AVE_IIRQC 0x34c /* Interval IRQ Control */ + +/* Packet Filter Register Group */ +#define AVE_PKTF_BASE 0x800 /* PF Base Address */ +#define AVE_PFMBYTE_BASE 0xd00 /* PF Mask Byte Base Address */ +#define AVE_PFMBIT_BASE 0xe00 /* PF Mask Bit Base Address */ +#define AVE_PFSEL_BASE 0xf00 /* PF Selector Base Address */ +#define AVE_PFEN 0xffc /* Packet Filter Enable */ +#define AVE_PKTF(ent) (AVE_PKTF_BASE + (ent) * 0x40) +#define AVE_PFMBYTE(ent) (AVE_PFMBYTE_BASE + (ent) * 8) +#define AVE_PFMBIT(ent) (AVE_PFMBIT_BASE + (ent) * 4) +#define AVE_PFSEL(ent) (AVE_PFSEL_BASE + (ent) * 4) + +/* 64bit descriptor memory */ +#define AVE_DESC_SIZE_64 12 /* Descriptor Size */ + +#define AVE_TXDM_64 0x1000 /* Tx Descriptor Memory */ +#define AVE_RXDM_64 0x1c00 /* Rx Descriptor Memory */ + +#define AVE_TXDM_SIZE_64 0x0ba0 /* Tx Descriptor Memory Size 3KB */ +#define AVE_RXDM_SIZE_64 0x6000 /* Rx Descriptor Memory Size 24KB */ + +/* 32bit descriptor memory */ +#define AVE_DESC_SIZE_32 8 /* Descriptor Size */ + +#define AVE_TXDM_32 0x1000 /* Tx Descriptor Memory */ +#define AVE_RXDM_32 0x1800 /* Rx Descriptor Memory */ + +#define AVE_TXDM_SIZE_32 0x07c0 /* Tx Descriptor Memory Size 2KB */ +#define AVE_RXDM_SIZE_32 0x4000 /* Rx Descriptor Memory Size 16KB */ + +/* RMII Bridge Register Group */ +#define AVE_RSTCTRL 0x8028 /* Reset control */ +#define AVE_RSTCTRL_RMIIRST BIT(16) +#define AVE_LINKSEL 0x8034 /* Link speed setting */ +#define AVE_LINKSEL_100M BIT(0) + +/* AVE_GRR */ +#define AVE_GRR_RXFFR BIT(5) /* Reset RxFIFO */ +#define AVE_GRR_PHYRST BIT(4) /* Reset external PHY */ +#define AVE_GRR_GRST BIT(0) /* Reset all MAC */ + +/* AVE_CFGR */ +#define AVE_CFGR_FLE BIT(31) /* Filter Function */ +#define AVE_CFGR_CHE BIT(30) /* Checksum Function */ +#define AVE_CFGR_MII BIT(27) /* Func mode (1:MII/RMII, 0:RGMII) */ +#define AVE_CFGR_IPFCEN BIT(24) /* IP fragment sum Enable */ + +/* AVE_GISR (common with GIMR) */ +#define AVE_GI_PHY BIT(24) /* PHY interrupt */ +#define AVE_GI_TX BIT(16) /* Tx complete */ +#define AVE_GI_RXERR BIT(8) /* Receive frame more than max size */ +#define AVE_GI_RXOVF BIT(7) /* Overflow at the RxFIFO */ +#define AVE_GI_RXDROP BIT(6) /* Drop packet */ +#define AVE_GI_RXIINT BIT(5) /* Interval interrupt */ + +/* AVE_TXCR */ +#define AVE_TXCR_FLOCTR BIT(18) /* Flow control */ +#define AVE_TXCR_TXSPD_1G BIT(17) +#define AVE_TXCR_TXSPD_100 BIT(16) + +/* AVE_RXCR */ +#define AVE_RXCR_RXEN BIT(30) /* Rx enable */ +#define AVE_RXCR_FDUPEN BIT(22) /* Interface mode */ +#define AVE_RXCR_FLOCTR BIT(21) /* Flow control */ +#define AVE_RXCR_AFEN BIT(19) /* MAC address filter */ +#define AVE_RXCR_DRPEN BIT(18) /* Drop pause frame */ +#define AVE_RXCR_MPSIZ_MASK GENMASK(10, 0) + +/* AVE_MDIOCTR */ +#define AVE_MDIOCTR_RREQ BIT(3) /* Read request */ +#define AVE_MDIOCTR_WREQ BIT(2) /* Write request */ + +/* AVE_MDIOSR */ +#define AVE_MDIOSR_STS BIT(0) /* access status */ + +/* AVE_DESCC */ +#define AVE_DESCC_STATUS_MASK GENMASK(31, 16) +#define AVE_DESCC_RD0 BIT(8) /* Enable Rx descriptor Ring0 */ +#define AVE_DESCC_RDSTP BIT(4) /* Pause Rx descriptor */ +#define AVE_DESCC_TD BIT(0) /* Enable Tx descriptor */ + +/* AVE_TXDC */ +#define AVE_TXDC_SIZE GENMASK(27, 16) /* Size of Tx descriptor */ +#define AVE_TXDC_ADDR GENMASK(11, 0) /* Start address */ +#define AVE_TXDC_ADDR_START 0 + +/* AVE_RXDC0 */ +#define AVE_RXDC0_SIZE GENMASK(30, 16) /* Size of Rx descriptor */ +#define AVE_RXDC0_ADDR GENMASK(14, 0) /* Start address */ +#define AVE_RXDC0_ADDR_START 0 + +/* AVE_IIRQC */ +#define AVE_IIRQC_EN0 BIT(27) /* Enable interval interrupt Ring0 */ +#define AVE_IIRQC_BSCK GENMASK(15, 0) /* Interval count unit */ + +/* Command status for descriptor */ +#define AVE_STS_OWN BIT(31) /* Descriptor ownership */ +#define AVE_STS_INTR BIT(29) /* Request for interrupt */ +#define AVE_STS_OK BIT(27) /* Normal transmit */ +/* TX */ +#define AVE_STS_NOCSUM BIT(28) /* No use HW checksum */ +#define AVE_STS_1ST BIT(26) /* Head of buffer chain */ +#define AVE_STS_LAST BIT(25) /* Tail of buffer chain */ +#define AVE_STS_OWC BIT(21) /* Out of window,Late Collision */ +#define AVE_STS_EC BIT(20) /* Excess collision occurred */ +#define AVE_STS_PKTLEN_TX_MASK GENMASK(15, 0) +/* RX */ +#define AVE_STS_CSSV BIT(21) /* Checksum check performed */ +#define AVE_STS_CSER BIT(20) /* Checksum error detected */ +#define AVE_STS_PKTLEN_RX_MASK GENMASK(10, 0) + +/* Packet filter */ +#define AVE_PFMBYTE_MASK0 (GENMASK(31, 8) | GENMASK(5, 0)) +#define AVE_PFMBYTE_MASK1 GENMASK(25, 0) +#define AVE_PFMBIT_MASK GENMASK(15, 0) + +#define AVE_PF_SIZE 17 /* Number of all packet filter */ +#define AVE_PF_MULTICAST_SIZE 7 /* Number of multicast filter */ + +#define AVE_PFNUM_FILTER 0 /* No.0 */ +#define AVE_PFNUM_UNICAST 1 /* No.1 */ +#define AVE_PFNUM_BROADCAST 2 /* No.2 */ +#define AVE_PFNUM_MULTICAST 11 /* No.11-17 */ + +/* NETIF Message control */ +#define AVE_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_IFDOWN | \ + NETIF_MSG_IFUP | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR) + +/* Parameter for descriptor */ +#define AVE_NR_TXDESC 32 /* Tx descriptor */ +#define AVE_NR_RXDESC 64 /* Rx descriptor */ + +#define AVE_DESC_OFS_CMDSTS 0 +#define AVE_DESC_OFS_ADDRL 4 +#define AVE_DESC_OFS_ADDRU 8 + +/* Parameter for ethernet frame */ +#define AVE_MAX_ETHFRAME 1518 + +/* Parameter for interrupt */ +#define AVE_INTM_COUNT 20 +#define AVE_FORCE_TXINTCNT 1 + +#define IS_DESC_64BIT(p) ((p)->data->is_desc_64bit) + +enum desc_id { + AVE_DESCID_RX, + AVE_DESCID_TX, +}; + +enum desc_state { + AVE_DESC_RX_PERMIT, + AVE_DESC_RX_SUSPEND, + AVE_DESC_START, + AVE_DESC_STOP, +}; + +struct ave_desc { + struct sk_buff *skbs; + dma_addr_t skbs_dma; + size_t skbs_dmalen; +}; + +struct ave_desc_info { + u32 ndesc; /* number of descriptor */ + u32 daddr; /* start address of descriptor */ + u32 proc_idx; /* index of processing packet */ + u32 done_idx; /* index of processed packet */ + struct ave_desc *desc; /* skb info related descriptor */ +}; + +struct ave_soc_data { + bool is_desc_64bit; +}; + +struct ave_stats { + struct u64_stats_sync syncp; + u64 packets; + u64 bytes; + u64 errors; + u64 dropped; + u64 collisions; + u64 fifo_errors; +}; + +struct ave_private { + void __iomem *base; + int irq; + int phy_id; + unsigned int desc_size; + u32 msg_enable; + struct clk *clk; + struct reset_control *rst; + phy_interface_t phy_mode; + struct phy_device *phydev; + struct mii_bus *mdio; + + /* stats */ + struct ave_stats stats_rx; + struct ave_stats stats_tx; + + /* NAPI support */ + struct net_device *ndev; + struct napi_struct napi_rx; + struct napi_struct napi_tx; + + /* descriptor */ + struct ave_desc_info rx; + struct ave_desc_info tx; + + /* flow control */ + int pause_auto; + int pause_rx; + int pause_tx; + + const struct ave_soc_data *data; +}; + +static u32 ave_desc_read(struct net_device *ndev, enum desc_id id, int entry, + int offset) +{ + struct ave_private *priv = netdev_priv(ndev); + u32 addr; + + addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr) + + entry * priv->desc_size + offset; + + return readl(priv->base + addr); +} + +static u32 ave_desc_read_cmdsts(struct net_device *ndev, enum desc_id id, + int entry) +{ + return ave_desc_read(ndev, id, entry, AVE_DESC_OFS_CMDSTS); +} + +static void ave_desc_write(struct net_device *ndev, enum desc_id id, + int entry, int offset, u32 val) +{ + struct ave_private *priv = netdev_priv(ndev); + u32 addr; + + addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr) + + entry * priv->desc_size + offset; + + writel(val, priv->base + addr); +} + +static void ave_desc_write_cmdsts(struct net_device *ndev, enum desc_id id, + int entry, u32 val) +{ + ave_desc_write(ndev, id, entry, AVE_DESC_OFS_CMDSTS, val); +} + +static void ave_desc_write_addr(struct net_device *ndev, enum desc_id id, + int entry, dma_addr_t paddr) +{ + struct ave_private *priv = netdev_priv(ndev); + + ave_desc_write(ndev, id, entry, AVE_DESC_OFS_ADDRL, + lower_32_bits(paddr)); + if (IS_DESC_64BIT(priv)) + ave_desc_write(ndev, id, + entry, AVE_DESC_OFS_ADDRU, + upper_32_bits(paddr)); +} + +static u32 ave_irq_disable_all(struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + u32 ret; + + ret = readl(priv->base + AVE_GIMR); + writel(0, priv->base + AVE_GIMR); + + return ret; +} + +static void ave_irq_restore(struct net_device *ndev, u32 val) +{ + struct ave_private *priv = netdev_priv(ndev); + + writel(val, priv->base + AVE_GIMR); +} + +static void ave_irq_enable(struct net_device *ndev, u32 bitflag) +{ + struct ave_private *priv = netdev_priv(ndev); + + writel(readl(priv->base + AVE_GIMR) | bitflag, priv->base + AVE_GIMR); + writel(bitflag, priv->base + AVE_GISR); +} + +static void ave_hw_write_macaddr(struct net_device *ndev, + const unsigned char *mac_addr, + int reg1, int reg2) +{ + struct ave_private *priv = netdev_priv(ndev); + + writel(mac_addr[0] | mac_addr[1] << 8 | + mac_addr[2] << 16 | mac_addr[3] << 24, priv->base + reg1); + writel(mac_addr[4] | mac_addr[5] << 8, priv->base + reg2); +} + +static void ave_hw_read_version(struct net_device *ndev, char *buf, int len) +{ + struct ave_private *priv = netdev_priv(ndev); + u32 major, minor, vr; + + vr = readl(priv->base + AVE_VR); + major = (vr & GENMASK(15, 8)) >> 8; + minor = (vr & GENMASK(7, 0)); + snprintf(buf, len, "v%u.%u", major, minor); +} + +static void ave_ethtool_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *info) +{ + struct device *dev = ndev->dev.parent; + + strlcpy(info->driver, dev->driver->name, sizeof(info->driver)); + strlcpy(info->bus_info, dev_name(dev), sizeof(info->bus_info)); + ave_hw_read_version(ndev, info->fw_version, sizeof(info->fw_version)); +} + +static u32 ave_ethtool_get_msglevel(struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + + return priv->msg_enable; +} + +static void ave_ethtool_set_msglevel(struct net_device *ndev, u32 val) +{ + struct ave_private *priv = netdev_priv(ndev); + + priv->msg_enable = val; +} + +static void ave_ethtool_get_wol(struct net_device *ndev, + struct ethtool_wolinfo *wol) +{ + wol->supported = 0; + wol->wolopts = 0; + + if (ndev->phydev) + phy_ethtool_get_wol(ndev->phydev, wol); +} + +static int ave_ethtool_set_wol(struct net_device *ndev, + struct ethtool_wolinfo *wol) +{ + int ret; + + if (!ndev->phydev || + (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))) + return -EOPNOTSUPP; + + ret = phy_ethtool_set_wol(ndev->phydev, wol); + if (!ret) + device_set_wakeup_enable(&ndev->dev, !!wol->wolopts); + + return ret; +} + +static void ave_ethtool_get_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct ave_private *priv = netdev_priv(ndev); + + pause->autoneg = priv->pause_auto; + pause->rx_pause = priv->pause_rx; + pause->tx_pause = priv->pause_tx; +} + +static int ave_ethtool_set_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct ave_private *priv = netdev_priv(ndev); + struct phy_device *phydev = ndev->phydev; + + if (!phydev) + return -EINVAL; + + priv->pause_auto = pause->autoneg; + priv->pause_rx = pause->rx_pause; + priv->pause_tx = pause->tx_pause; + + phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); + if (pause->rx_pause) + phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; + if (pause->tx_pause) + phydev->advertising ^= ADVERTISED_Asym_Pause; + + if (pause->autoneg) { + if (netif_running(ndev)) + phy_start_aneg(phydev); + } + + return 0; +} + +static const struct ethtool_ops ave_ethtool_ops = { + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_drvinfo = ave_ethtool_get_drvinfo, + .nway_reset = phy_ethtool_nway_reset, + .get_link = ethtool_op_get_link, + .get_msglevel = ave_ethtool_get_msglevel, + .set_msglevel = ave_ethtool_set_msglevel, + .get_wol = ave_ethtool_get_wol, + .set_wol = ave_ethtool_set_wol, + .get_pauseparam = ave_ethtool_get_pauseparam, + .set_pauseparam = ave_ethtool_set_pauseparam, +}; + +static int ave_mdiobus_read(struct mii_bus *bus, int phyid, int regnum) +{ + struct net_device *ndev = bus->priv; + struct ave_private *priv; + u32 mdioctl, mdiosr; + int ret; + + priv = netdev_priv(ndev); + + /* write address */ + writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR); + + /* read request */ + mdioctl = readl(priv->base + AVE_MDIOCTR); + writel((mdioctl | AVE_MDIOCTR_RREQ) & ~AVE_MDIOCTR_WREQ, + priv->base + AVE_MDIOCTR); + + ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr, + !(mdiosr & AVE_MDIOSR_STS), 20, 2000); + if (ret) { + netdev_err(ndev, "failed to read (phy:%d reg:%x)\n", + phyid, regnum); + return ret; + } + + return readl(priv->base + AVE_MDIORDR) & GENMASK(15, 0); +} + +static int ave_mdiobus_write(struct mii_bus *bus, int phyid, int regnum, + u16 val) +{ + struct net_device *ndev = bus->priv; + struct ave_private *priv; + u32 mdioctl, mdiosr; + int ret; + + priv = netdev_priv(ndev); + + /* write address */ + writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR); + + /* write data */ + writel(val, priv->base + AVE_MDIOWDR); + + /* write request */ + mdioctl = readl(priv->base + AVE_MDIOCTR); + writel((mdioctl | AVE_MDIOCTR_WREQ) & ~AVE_MDIOCTR_RREQ, + priv->base + AVE_MDIOCTR); + + ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr, + !(mdiosr & AVE_MDIOSR_STS), 20, 2000); + if (ret) + netdev_err(ndev, "failed to write (phy:%d reg:%x)\n", + phyid, regnum); + + return ret; +} + +static int ave_dma_map(struct net_device *ndev, struct ave_desc *desc, + void *ptr, size_t len, enum dma_data_direction dir, + dma_addr_t *paddr) +{ + dma_addr_t map_addr; + + map_addr = dma_map_single(ndev->dev.parent, ptr, len, dir); + if (unlikely(dma_mapping_error(ndev->dev.parent, map_addr))) + return -ENOMEM; + + desc->skbs_dma = map_addr; + desc->skbs_dmalen = len; + *paddr = map_addr; + + return 0; +} + +static void ave_dma_unmap(struct net_device *ndev, struct ave_desc *desc, + enum dma_data_direction dir) +{ + if (!desc->skbs_dma) + return; + + dma_unmap_single(ndev->dev.parent, + desc->skbs_dma, desc->skbs_dmalen, dir); + desc->skbs_dma = 0; +} + +/* Prepare Rx descriptor and memory */ +static int ave_rxdesc_prepare(struct net_device *ndev, int entry) +{ + struct ave_private *priv = netdev_priv(ndev); + struct sk_buff *skb; + dma_addr_t paddr; + int ret; + + skb = priv->rx.desc[entry].skbs; + if (!skb) { + skb = netdev_alloc_skb_ip_align(ndev, + AVE_MAX_ETHFRAME); + if (!skb) { + netdev_err(ndev, "can't allocate skb for Rx\n"); + return -ENOMEM; + } + } + + /* set disable to cmdsts */ + ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry, + AVE_STS_INTR | AVE_STS_OWN); + + /* map Rx buffer + * Rx buffer set to the Rx descriptor has two restrictions: + * - Rx buffer address is 4 byte aligned. + * - Rx buffer begins with 2 byte headroom, and data will be put from + * (buffer + 2). + * To satisfy this, specify the address to put back the buffer + * pointer advanced by NET_IP_ALIGN by netdev_alloc_skb_ip_align(), + * and expand the map size by NET_IP_ALIGN. + */ + ret = ave_dma_map(ndev, &priv->rx.desc[entry], + skb->data - NET_IP_ALIGN, + AVE_MAX_ETHFRAME + NET_IP_ALIGN, + DMA_FROM_DEVICE, &paddr); + if (ret) { + netdev_err(ndev, "can't map skb for Rx\n"); + dev_kfree_skb_any(skb); + return ret; + } + priv->rx.desc[entry].skbs = skb; + + /* set buffer pointer */ + ave_desc_write_addr(ndev, AVE_DESCID_RX, entry, paddr); + + /* set enable to cmdsts */ + ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry, + AVE_STS_INTR | AVE_MAX_ETHFRAME); + + return ret; +} + +/* Switch state of descriptor */ +static int ave_desc_switch(struct net_device *ndev, enum desc_state state) +{ + struct ave_private *priv = netdev_priv(ndev); + int ret = 0; + u32 val; + + switch (state) { + case AVE_DESC_START: + writel(AVE_DESCC_TD | AVE_DESCC_RD0, priv->base + AVE_DESCC); + break; + + case AVE_DESC_STOP: + writel(0, priv->base + AVE_DESCC); + if (readl_poll_timeout(priv->base + AVE_DESCC, val, !val, + 150, 15000)) { + netdev_err(ndev, "can't stop descriptor\n"); + ret = -EBUSY; + } + break; + + case AVE_DESC_RX_SUSPEND: + val = readl(priv->base + AVE_DESCC); + val |= AVE_DESCC_RDSTP; + val &= ~AVE_DESCC_STATUS_MASK; + writel(val, priv->base + AVE_DESCC); + if (readl_poll_timeout(priv->base + AVE_DESCC, val, + val & (AVE_DESCC_RDSTP << 16), + 150, 150000)) { + netdev_err(ndev, "can't suspend descriptor\n"); + ret = -EBUSY; + } + break; + + case AVE_DESC_RX_PERMIT: + val = readl(priv->base + AVE_DESCC); + val &= ~AVE_DESCC_RDSTP; + val &= ~AVE_DESCC_STATUS_MASK; + writel(val, priv->base + AVE_DESCC); + break; + + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int ave_tx_complete(struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + u32 proc_idx, done_idx, ndesc, cmdsts; + unsigned int nr_freebuf = 0; + unsigned int tx_packets = 0; + unsigned int tx_bytes = 0; + + proc_idx = priv->tx.proc_idx; + done_idx = priv->tx.done_idx; + ndesc = priv->tx.ndesc; + + /* free pre-stored skb from done_idx to proc_idx */ + while (proc_idx != done_idx) { + cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_TX, done_idx); + + /* do nothing if owner is HW (==1 for Tx) */ + if (cmdsts & AVE_STS_OWN) + break; + + /* check Tx status and updates statistics */ + if (cmdsts & AVE_STS_OK) { + tx_bytes += cmdsts & AVE_STS_PKTLEN_TX_MASK; + /* success */ + if (cmdsts & AVE_STS_LAST) + tx_packets++; + } else { + /* error */ + if (cmdsts & AVE_STS_LAST) { + priv->stats_tx.errors++; + if (cmdsts & (AVE_STS_OWC | AVE_STS_EC)) + priv->stats_tx.collisions++; + } + } + + /* release skb */ + if (priv->tx.desc[done_idx].skbs) { + ave_dma_unmap(ndev, &priv->tx.desc[done_idx], + DMA_TO_DEVICE); + dev_consume_skb_any(priv->tx.desc[done_idx].skbs); + priv->tx.desc[done_idx].skbs = NULL; + nr_freebuf++; + } + done_idx = (done_idx + 1) % ndesc; + } + + priv->tx.done_idx = done_idx; + + /* update stats */ + u64_stats_update_begin(&priv->stats_tx.syncp); + priv->stats_tx.packets += tx_packets; + priv->stats_tx.bytes += tx_bytes; + u64_stats_update_end(&priv->stats_tx.syncp); + + /* wake queue for freeing buffer */ + if (unlikely(netif_queue_stopped(ndev)) && nr_freebuf) + netif_wake_queue(ndev); + + return nr_freebuf; +} + +static int ave_rx_receive(struct net_device *ndev, int num) +{ + struct ave_private *priv = netdev_priv(ndev); + unsigned int rx_packets = 0; + unsigned int rx_bytes = 0; + u32 proc_idx, done_idx; + struct sk_buff *skb; + unsigned int pktlen; + int restpkt, npkts; + u32 ndesc, cmdsts; + + proc_idx = priv->rx.proc_idx; + done_idx = priv->rx.done_idx; + ndesc = priv->rx.ndesc; + restpkt = ((proc_idx + ndesc - 1) - done_idx) % ndesc; + + for (npkts = 0; npkts < num; npkts++) { + /* we can't receive more packet, so fill desc quickly */ + if (--restpkt < 0) + break; + + cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_RX, proc_idx); + + /* do nothing if owner is HW (==0 for Rx) */ + if (!(cmdsts & AVE_STS_OWN)) + break; + + if (!(cmdsts & AVE_STS_OK)) { + priv->stats_rx.errors++; + proc_idx = (proc_idx + 1) % ndesc; + continue; + } + + pktlen = cmdsts & AVE_STS_PKTLEN_RX_MASK; + + /* get skbuff for rx */ + skb = priv->rx.desc[proc_idx].skbs; + priv->rx.desc[proc_idx].skbs = NULL; + + ave_dma_unmap(ndev, &priv->rx.desc[proc_idx], DMA_FROM_DEVICE); + + skb->dev = ndev; + skb_put(skb, pktlen); + skb->protocol = eth_type_trans(skb, ndev); + + if ((cmdsts & AVE_STS_CSSV) && (!(cmdsts & AVE_STS_CSER))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + rx_packets++; + rx_bytes += pktlen; + + netif_receive_skb(skb); + + proc_idx = (proc_idx + 1) % ndesc; + } + + priv->rx.proc_idx = proc_idx; + + /* update stats */ + u64_stats_update_begin(&priv->stats_rx.syncp); + priv->stats_rx.packets += rx_packets; + priv->stats_rx.bytes += rx_bytes; + u64_stats_update_end(&priv->stats_rx.syncp); + + /* refill the Rx buffers */ + while (proc_idx != done_idx) { + if (ave_rxdesc_prepare(ndev, done_idx)) + break; + done_idx = (done_idx + 1) % ndesc; + } + + priv->rx.done_idx = done_idx; + + return npkts; +} + +static int ave_napi_poll_rx(struct napi_struct *napi, int budget) +{ + struct ave_private *priv; + struct net_device *ndev; + int num; + + priv = container_of(napi, struct ave_private, napi_rx); + ndev = priv->ndev; + + num = ave_rx_receive(ndev, budget); + if (num < budget) { + napi_complete_done(napi, num); + + /* enable Rx interrupt when NAPI finishes */ + ave_irq_enable(ndev, AVE_GI_RXIINT); + } + + return num; +} + +static int ave_napi_poll_tx(struct napi_struct *napi, int budget) +{ + struct ave_private *priv; + struct net_device *ndev; + int num; + + priv = container_of(napi, struct ave_private, napi_tx); + ndev = priv->ndev; + + num = ave_tx_complete(ndev); + napi_complete(napi); + + /* enable Tx interrupt when NAPI finishes */ + ave_irq_enable(ndev, AVE_GI_TX); + + return num; +} + +static void ave_global_reset(struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + u32 val; + + /* set config register */ + val = AVE_CFGR_FLE | AVE_CFGR_IPFCEN | AVE_CFGR_CHE; + if (!phy_interface_mode_is_rgmii(priv->phy_mode)) + val |= AVE_CFGR_MII; + writel(val, priv->base + AVE_CFGR); + + /* reset RMII register */ + val = readl(priv->base + AVE_RSTCTRL); + val &= ~AVE_RSTCTRL_RMIIRST; + writel(val, priv->base + AVE_RSTCTRL); + + /* assert reset */ + writel(AVE_GRR_GRST | AVE_GRR_PHYRST, priv->base + AVE_GRR); + msleep(20); + + /* 1st, negate PHY reset only */ + writel(AVE_GRR_GRST, priv->base + AVE_GRR); + msleep(40); + + /* negate reset */ + writel(0, priv->base + AVE_GRR); + msleep(40); + + /* negate RMII register */ + val = readl(priv->base + AVE_RSTCTRL); + val |= AVE_RSTCTRL_RMIIRST; + writel(val, priv->base + AVE_RSTCTRL); + + ave_irq_disable_all(ndev); +} + +static void ave_rxfifo_reset(struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + u32 rxcr_org; + + /* save and disable MAC receive op */ + rxcr_org = readl(priv->base + AVE_RXCR); + writel(rxcr_org & (~AVE_RXCR_RXEN), priv->base + AVE_RXCR); + + /* suspend Rx descriptor */ + ave_desc_switch(ndev, AVE_DESC_RX_SUSPEND); + + /* receive all packets before descriptor starts */ + ave_rx_receive(ndev, priv->rx.ndesc); + + /* assert reset */ + writel(AVE_GRR_RXFFR, priv->base + AVE_GRR); + usleep_range(40, 50); + + /* negate reset */ + writel(0, priv->base + AVE_GRR); + usleep_range(10, 20); + + /* negate interrupt status */ + writel(AVE_GI_RXOVF, priv->base + AVE_GISR); + + /* permit descriptor */ + ave_desc_switch(ndev, AVE_DESC_RX_PERMIT); + + /* restore MAC reccieve op */ + writel(rxcr_org, priv->base + AVE_RXCR); +} + +static irqreturn_t ave_irq_handler(int irq, void *netdev) +{ + struct net_device *ndev = (struct net_device *)netdev; + struct ave_private *priv = netdev_priv(ndev); + u32 gimr_val, gisr_val; + + gimr_val = ave_irq_disable_all(ndev); + + /* get interrupt status */ + gisr_val = readl(priv->base + AVE_GISR); + + /* PHY */ + if (gisr_val & AVE_GI_PHY) + writel(AVE_GI_PHY, priv->base + AVE_GISR); + + /* check exceeding packet */ + if (gisr_val & AVE_GI_RXERR) { + writel(AVE_GI_RXERR, priv->base + AVE_GISR); + netdev_err(ndev, "receive a packet exceeding frame buffer\n"); + } + + gisr_val &= gimr_val; + if (!gisr_val) + goto exit_isr; + + /* RxFIFO overflow */ + if (gisr_val & AVE_GI_RXOVF) { + priv->stats_rx.fifo_errors++; + ave_rxfifo_reset(ndev); + goto exit_isr; + } + + /* Rx drop */ + if (gisr_val & AVE_GI_RXDROP) { + priv->stats_rx.dropped++; + writel(AVE_GI_RXDROP, priv->base + AVE_GISR); + } + + /* Rx interval */ + if (gisr_val & AVE_GI_RXIINT) { + napi_schedule(&priv->napi_rx); + /* still force to disable Rx interrupt until NAPI finishes */ + gimr_val &= ~AVE_GI_RXIINT; + } + + /* Tx completed */ + if (gisr_val & AVE_GI_TX) { + napi_schedule(&priv->napi_tx); + /* still force to disable Tx interrupt until NAPI finishes */ + gimr_val &= ~AVE_GI_TX; + } + +exit_isr: + ave_irq_restore(ndev, gimr_val); + + return IRQ_HANDLED; +} + +static int ave_pfsel_start(struct net_device *ndev, unsigned int entry) +{ + struct ave_private *priv = netdev_priv(ndev); + u32 val; + + if (WARN_ON(entry > AVE_PF_SIZE)) + return -EINVAL; + + val = readl(priv->base + AVE_PFEN); + writel(val | BIT(entry), priv->base + AVE_PFEN); + + return 0; +} + +static int ave_pfsel_stop(struct net_device *ndev, unsigned int entry) +{ + struct ave_private *priv = netdev_priv(ndev); + u32 val; + + if (WARN_ON(entry > AVE_PF_SIZE)) + return -EINVAL; + + val = readl(priv->base + AVE_PFEN); + writel(val & ~BIT(entry), priv->base + AVE_PFEN); + + return 0; +} + +static int ave_pfsel_set_macaddr(struct net_device *ndev, + unsigned int entry, + const unsigned char *mac_addr, + unsigned int set_size) +{ + struct ave_private *priv = netdev_priv(ndev); + + if (WARN_ON(entry > AVE_PF_SIZE)) + return -EINVAL; + if (WARN_ON(set_size > 6)) + return -EINVAL; + + ave_pfsel_stop(ndev, entry); + + /* set MAC address for the filter */ + ave_hw_write_macaddr(ndev, mac_addr, + AVE_PKTF(entry), AVE_PKTF(entry) + 4); + + /* set byte mask */ + writel(GENMASK(31, set_size) & AVE_PFMBYTE_MASK0, + priv->base + AVE_PFMBYTE(entry)); + writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4); + + /* set bit mask filter */ + writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry)); + + /* set selector to ring 0 */ + writel(0, priv->base + AVE_PFSEL(entry)); + + /* restart filter */ + ave_pfsel_start(ndev, entry); + + return 0; +} + +static void ave_pfsel_set_promisc(struct net_device *ndev, + unsigned int entry, u32 rxring) +{ + struct ave_private *priv = netdev_priv(ndev); + + if (WARN_ON(entry > AVE_PF_SIZE)) + return; + + ave_pfsel_stop(ndev, entry); + + /* set byte mask */ + writel(AVE_PFMBYTE_MASK0, priv->base + AVE_PFMBYTE(entry)); + writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4); + + /* set bit mask filter */ + writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry)); + + /* set selector to rxring */ + writel(rxring, priv->base + AVE_PFSEL(entry)); + + ave_pfsel_start(ndev, entry); +} + +static void ave_pfsel_init(struct net_device *ndev) +{ + unsigned char bcast_mac[ETH_ALEN]; + int i; + + eth_broadcast_addr(bcast_mac); + + for (i = 0; i < AVE_PF_SIZE; i++) + ave_pfsel_stop(ndev, i); + + /* promiscious entry, select ring 0 */ + ave_pfsel_set_promisc(ndev, AVE_PFNUM_FILTER, 0); + + /* unicast entry */ + ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6); + + /* broadcast entry */ + ave_pfsel_set_macaddr(ndev, AVE_PFNUM_BROADCAST, bcast_mac, 6); +} + +static void ave_phy_adjust_link(struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + struct phy_device *phydev = ndev->phydev; + u32 val, txcr, rxcr, rxcr_org; + u16 rmt_adv = 0, lcl_adv = 0; + u8 cap; + + /* set RGMII speed */ + val = readl(priv->base + AVE_TXCR); + val &= ~(AVE_TXCR_TXSPD_100 | AVE_TXCR_TXSPD_1G); + + if (phy_interface_is_rgmii(phydev) && phydev->speed == SPEED_1000) + val |= AVE_TXCR_TXSPD_1G; + else if (phydev->speed == SPEED_100) + val |= AVE_TXCR_TXSPD_100; + + writel(val, priv->base + AVE_TXCR); + + /* set RMII speed (100M/10M only) */ + if (!phy_interface_is_rgmii(phydev)) { + val = readl(priv->base + AVE_LINKSEL); + if (phydev->speed == SPEED_10) + val &= ~AVE_LINKSEL_100M; + else + val |= AVE_LINKSEL_100M; + writel(val, priv->base + AVE_LINKSEL); + } + + /* check current RXCR/TXCR */ + rxcr = readl(priv->base + AVE_RXCR); + txcr = readl(priv->base + AVE_TXCR); + rxcr_org = rxcr; + + if (phydev->duplex) { + rxcr |= AVE_RXCR_FDUPEN; + + if (phydev->pause) + rmt_adv |= LPA_PAUSE_CAP; + if (phydev->asym_pause) + rmt_adv |= LPA_PAUSE_ASYM; + if (phydev->advertising & ADVERTISED_Pause) + lcl_adv |= ADVERTISE_PAUSE_CAP; + if (phydev->advertising & ADVERTISED_Asym_Pause) + lcl_adv |= ADVERTISE_PAUSE_ASYM; + + cap = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); + if (cap & FLOW_CTRL_TX) + txcr |= AVE_TXCR_FLOCTR; + else + txcr &= ~AVE_TXCR_FLOCTR; + if (cap & FLOW_CTRL_RX) + rxcr |= AVE_RXCR_FLOCTR; + else + rxcr &= ~AVE_RXCR_FLOCTR; + } else { + rxcr &= ~AVE_RXCR_FDUPEN; + rxcr &= ~AVE_RXCR_FLOCTR; + txcr &= ~AVE_TXCR_FLOCTR; + } + + if (rxcr_org != rxcr) { + /* disable Rx mac */ + writel(rxcr & ~AVE_RXCR_RXEN, priv->base + AVE_RXCR); + /* change and enable TX/Rx mac */ + writel(txcr, priv->base + AVE_TXCR); + writel(rxcr, priv->base + AVE_RXCR); + } + + phy_print_status(phydev); +} + +static void ave_macaddr_init(struct net_device *ndev) +{ + ave_hw_write_macaddr(ndev, ndev->dev_addr, AVE_RXMAC1R, AVE_RXMAC2R); + + /* pfsel unicast entry */ + ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6); +} + +static int ave_init(struct net_device *ndev) +{ + struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; + struct ave_private *priv = netdev_priv(ndev); + struct device *dev = ndev->dev.parent; + struct device_node *np = dev->of_node; + struct device_node *mdio_np; + struct phy_device *phydev; + int ret; + + /* enable clk because of hw access until ndo_open */ + ret = clk_prepare_enable(priv->clk); + if (ret) { + dev_err(dev, "can't enable clock\n"); + return ret; + } + ret = reset_control_deassert(priv->rst); + if (ret) { + dev_err(dev, "can't deassert reset\n"); + goto out_clk_disable; + } + + ave_global_reset(ndev); + + mdio_np = of_get_child_by_name(np, "mdio"); + if (!mdio_np) { + dev_err(dev, "mdio node not found\n"); + ret = -EINVAL; + goto out_reset_assert; + } + ret = of_mdiobus_register(priv->mdio, mdio_np); + of_node_put(mdio_np); + if (ret) { + dev_err(dev, "failed to register mdiobus\n"); + goto out_reset_assert; + } + + phydev = of_phy_get_and_connect(ndev, np, ave_phy_adjust_link); + if (!phydev) { + dev_err(dev, "could not attach to PHY\n"); + ret = -ENODEV; + goto out_mdio_unregister; + } + + priv->phydev = phydev; + + phy_ethtool_get_wol(phydev, &wol); + device_set_wakeup_capable(&ndev->dev, !!wol.supported); + + if (!phy_interface_is_rgmii(phydev)) { + phydev->supported &= ~PHY_GBIT_FEATURES; + phydev->supported |= PHY_BASIC_FEATURES; + } + phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + + phy_attached_info(phydev); + + return 0; + +out_mdio_unregister: + mdiobus_unregister(priv->mdio); +out_reset_assert: + reset_control_assert(priv->rst); +out_clk_disable: + clk_disable_unprepare(priv->clk); + + return ret; +} + +static void ave_uninit(struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + + phy_disconnect(priv->phydev); + mdiobus_unregister(priv->mdio); + + /* disable clk because of hw access after ndo_stop */ + reset_control_assert(priv->rst); + clk_disable_unprepare(priv->clk); +} + +static int ave_open(struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + int entry; + int ret; + u32 val; + + ret = request_irq(priv->irq, ave_irq_handler, IRQF_SHARED, ndev->name, + ndev); + if (ret) + return ret; + + priv->tx.desc = kcalloc(priv->tx.ndesc, sizeof(*priv->tx.desc), + GFP_KERNEL); + if (!priv->tx.desc) { + ret = -ENOMEM; + goto out_free_irq; + } + + priv->rx.desc = kcalloc(priv->rx.ndesc, sizeof(*priv->rx.desc), + GFP_KERNEL); + if (!priv->rx.desc) { + kfree(priv->tx.desc); + ret = -ENOMEM; + goto out_free_irq; + } + + /* initialize Tx work and descriptor */ + priv->tx.proc_idx = 0; + priv->tx.done_idx = 0; + for (entry = 0; entry < priv->tx.ndesc; entry++) { + ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, entry, 0); + ave_desc_write_addr(ndev, AVE_DESCID_TX, entry, 0); + } + writel(AVE_TXDC_ADDR_START | + (((priv->tx.ndesc * priv->desc_size) << 16) & AVE_TXDC_SIZE), + priv->base + AVE_TXDC); + + /* initialize Rx work and descriptor */ + priv->rx.proc_idx = 0; + priv->rx.done_idx = 0; + for (entry = 0; entry < priv->rx.ndesc; entry++) { + if (ave_rxdesc_prepare(ndev, entry)) + break; + } + writel(AVE_RXDC0_ADDR_START | + (((priv->rx.ndesc * priv->desc_size) << 16) & AVE_RXDC0_SIZE), + priv->base + AVE_RXDC0); + + ave_desc_switch(ndev, AVE_DESC_START); + + ave_pfsel_init(ndev); + ave_macaddr_init(ndev); + + /* set Rx configuration */ + /* full duplex, enable pause drop, enalbe flow control */ + val = AVE_RXCR_RXEN | AVE_RXCR_FDUPEN | AVE_RXCR_DRPEN | + AVE_RXCR_FLOCTR | (AVE_MAX_ETHFRAME & AVE_RXCR_MPSIZ_MASK); + writel(val, priv->base + AVE_RXCR); + + /* set Tx configuration */ + /* enable flow control, disable loopback */ + writel(AVE_TXCR_FLOCTR, priv->base + AVE_TXCR); + + /* enable timer, clear EN,INTM, and mask interval unit(BSCK) */ + val = readl(priv->base + AVE_IIRQC) & AVE_IIRQC_BSCK; + val |= AVE_IIRQC_EN0 | (AVE_INTM_COUNT << 16); + writel(val, priv->base + AVE_IIRQC); + + val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX; + ave_irq_restore(ndev, val); + + napi_enable(&priv->napi_rx); + napi_enable(&priv->napi_tx); + + phy_start(ndev->phydev); + phy_start_aneg(ndev->phydev); + netif_start_queue(ndev); + + return 0; + +out_free_irq: + disable_irq(priv->irq); + free_irq(priv->irq, ndev); + + return ret; +} + +static int ave_stop(struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + int entry; + + ave_irq_disable_all(ndev); + disable_irq(priv->irq); + free_irq(priv->irq, ndev); + + netif_tx_disable(ndev); + phy_stop(ndev->phydev); + napi_disable(&priv->napi_tx); + napi_disable(&priv->napi_rx); + + ave_desc_switch(ndev, AVE_DESC_STOP); + + /* free Tx buffer */ + for (entry = 0; entry < priv->tx.ndesc; entry++) { + if (!priv->tx.desc[entry].skbs) + continue; + + ave_dma_unmap(ndev, &priv->tx.desc[entry], DMA_TO_DEVICE); + dev_kfree_skb_any(priv->tx.desc[entry].skbs); + priv->tx.desc[entry].skbs = NULL; + } + priv->tx.proc_idx = 0; + priv->tx.done_idx = 0; + + /* free Rx buffer */ + for (entry = 0; entry < priv->rx.ndesc; entry++) { + if (!priv->rx.desc[entry].skbs) + continue; + + ave_dma_unmap(ndev, &priv->rx.desc[entry], DMA_FROM_DEVICE); + dev_kfree_skb_any(priv->rx.desc[entry].skbs); + priv->rx.desc[entry].skbs = NULL; + } + priv->rx.proc_idx = 0; + priv->rx.done_idx = 0; + + kfree(priv->tx.desc); + kfree(priv->rx.desc); + + return 0; +} + +static int ave_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + u32 proc_idx, done_idx, ndesc, cmdsts; + int ret, freepkt; + dma_addr_t paddr; + + proc_idx = priv->tx.proc_idx; + done_idx = priv->tx.done_idx; + ndesc = priv->tx.ndesc; + freepkt = ((done_idx + ndesc - 1) - proc_idx) % ndesc; + + /* stop queue when not enough entry */ + if (unlikely(freepkt < 1)) { + netif_stop_queue(ndev); + return NETDEV_TX_BUSY; + } + + /* add padding for short packet */ + if (skb_put_padto(skb, ETH_ZLEN)) { + priv->stats_tx.dropped++; + return NETDEV_TX_OK; + } + + /* map Tx buffer + * Tx buffer set to the Tx descriptor doesn't have any restriction. + */ + ret = ave_dma_map(ndev, &priv->tx.desc[proc_idx], + skb->data, skb->len, DMA_TO_DEVICE, &paddr); + if (ret) { + dev_kfree_skb_any(skb); + priv->stats_tx.dropped++; + return NETDEV_TX_OK; + } + + priv->tx.desc[proc_idx].skbs = skb; + + ave_desc_write_addr(ndev, AVE_DESCID_TX, proc_idx, paddr); + + cmdsts = AVE_STS_OWN | AVE_STS_1ST | AVE_STS_LAST | + (skb->len & AVE_STS_PKTLEN_TX_MASK); + + /* set interrupt per AVE_FORCE_TXINTCNT or when queue is stopped */ + if (!(proc_idx % AVE_FORCE_TXINTCNT) || netif_queue_stopped(ndev)) + cmdsts |= AVE_STS_INTR; + + /* disable checksum calculation when skb doesn't calurate checksum */ + if (skb->ip_summed == CHECKSUM_NONE || + skb->ip_summed == CHECKSUM_UNNECESSARY) + cmdsts |= AVE_STS_NOCSUM; + + ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, proc_idx, cmdsts); + + priv->tx.proc_idx = (proc_idx + 1) % ndesc; + + return NETDEV_TX_OK; +} + +static int ave_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) +{ + return phy_mii_ioctl(ndev->phydev, ifr, cmd); +} + +static const u8 v4multi_macadr[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; +static const u8 v6multi_macadr[] = { 0x33, 0x00, 0x00, 0x00, 0x00, 0x00 }; + +static void ave_set_rx_mode(struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + struct netdev_hw_addr *hw_adr; + int count, mc_cnt; + u32 val; + + /* MAC addr filter enable for promiscious mode */ + mc_cnt = netdev_mc_count(ndev); + val = readl(priv->base + AVE_RXCR); + if (ndev->flags & IFF_PROMISC || !mc_cnt) + val &= ~AVE_RXCR_AFEN; + else + val |= AVE_RXCR_AFEN; + writel(val, priv->base + AVE_RXCR); + + /* set all multicast address */ + if ((ndev->flags & IFF_ALLMULTI) || mc_cnt > AVE_PF_MULTICAST_SIZE) { + ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST, + v4multi_macadr, 1); + ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + 1, + v6multi_macadr, 1); + } else { + /* stop all multicast filter */ + for (count = 0; count < AVE_PF_MULTICAST_SIZE; count++) + ave_pfsel_stop(ndev, AVE_PFNUM_MULTICAST + count); + + /* set multicast addresses */ + count = 0; + netdev_for_each_mc_addr(hw_adr, ndev) { + if (count == mc_cnt) + break; + ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + count, + hw_adr->addr, 6); + count++; + } + } +} + +static void ave_get_stats64(struct net_device *ndev, + struct rtnl_link_stats64 *stats) +{ + struct ave_private *priv = netdev_priv(ndev); + unsigned int start; + + do { + start = u64_stats_fetch_begin_irq(&priv->stats_rx.syncp); + stats->rx_packets = priv->stats_rx.packets; + stats->rx_bytes = priv->stats_rx.bytes; + } while (u64_stats_fetch_retry_irq(&priv->stats_rx.syncp, start)); + + do { + start = u64_stats_fetch_begin_irq(&priv->stats_tx.syncp); + stats->tx_packets = priv->stats_tx.packets; + stats->tx_bytes = priv->stats_tx.bytes; + } while (u64_stats_fetch_retry_irq(&priv->stats_tx.syncp, start)); + + stats->rx_errors = priv->stats_rx.errors; + stats->tx_errors = priv->stats_tx.errors; + stats->rx_dropped = priv->stats_rx.dropped; + stats->tx_dropped = priv->stats_tx.dropped; + stats->rx_fifo_errors = priv->stats_rx.fifo_errors; + stats->collisions = priv->stats_tx.collisions; +} + +static int ave_set_mac_address(struct net_device *ndev, void *p) +{ + int ret = eth_mac_addr(ndev, p); + + if (ret) + return ret; + + ave_macaddr_init(ndev); + + return 0; +} + +static const struct net_device_ops ave_netdev_ops = { + .ndo_init = ave_init, + .ndo_uninit = ave_uninit, + .ndo_open = ave_open, + .ndo_stop = ave_stop, + .ndo_start_xmit = ave_start_xmit, + .ndo_do_ioctl = ave_ioctl, + .ndo_set_rx_mode = ave_set_rx_mode, + .ndo_get_stats64 = ave_get_stats64, + .ndo_set_mac_address = ave_set_mac_address, +}; + +static int ave_probe(struct platform_device *pdev) +{ + const struct ave_soc_data *data; + struct device *dev = &pdev->dev; + char buf[ETHTOOL_FWVERS_LEN]; + phy_interface_t phy_mode; + struct ave_private *priv; + struct net_device *ndev; + struct device_node *np; + struct resource *res; + const void *mac_addr; + void __iomem *base; + u64 dma_mask; + int irq, ret; + u32 ave_id; + + data = of_device_get_match_data(dev); + if (WARN_ON(!data)) + return -EINVAL; + + np = dev->of_node; + phy_mode = of_get_phy_mode(np); + if (phy_mode < 0) { + dev_err(dev, "phy-mode not found\n"); + return -EINVAL; + } + if ((!phy_interface_mode_is_rgmii(phy_mode)) && + phy_mode != PHY_INTERFACE_MODE_RMII && + phy_mode != PHY_INTERFACE_MODE_MII) { + dev_err(dev, "phy-mode is invalid\n"); + return -EINVAL; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "IRQ not found\n"); + return irq; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + ndev = alloc_etherdev(sizeof(struct ave_private)); + if (!ndev) { + dev_err(dev, "can't allocate ethernet device\n"); + return -ENOMEM; + } + + ndev->netdev_ops = &ave_netdev_ops; + ndev->ethtool_ops = &ave_ethtool_ops; + SET_NETDEV_DEV(ndev, dev); + + ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM); + ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM); + + ndev->max_mtu = AVE_MAX_ETHFRAME - (ETH_HLEN + ETH_FCS_LEN); + + mac_addr = of_get_mac_address(np); + if (mac_addr) + ether_addr_copy(ndev->dev_addr, mac_addr); + + /* if the mac address is invalid, use random mac address */ + if (!is_valid_ether_addr(ndev->dev_addr)) { + eth_hw_addr_random(ndev); + dev_warn(dev, "Using random MAC address: %pM\n", + ndev->dev_addr); + } + + priv = netdev_priv(ndev); + priv->base = base; + priv->irq = irq; + priv->ndev = ndev; + priv->msg_enable = netif_msg_init(-1, AVE_DEFAULT_MSG_ENABLE); + priv->phy_mode = phy_mode; + priv->data = data; + + if (IS_DESC_64BIT(priv)) { + priv->desc_size = AVE_DESC_SIZE_64; + priv->tx.daddr = AVE_TXDM_64; + priv->rx.daddr = AVE_RXDM_64; + dma_mask = DMA_BIT_MASK(64); + } else { + priv->desc_size = AVE_DESC_SIZE_32; + priv->tx.daddr = AVE_TXDM_32; + priv->rx.daddr = AVE_RXDM_32; + dma_mask = DMA_BIT_MASK(32); + } + ret = dma_set_mask(dev, dma_mask); + if (ret) + goto out_free_netdev; + + priv->tx.ndesc = AVE_NR_TXDESC; + priv->rx.ndesc = AVE_NR_RXDESC; + + u64_stats_init(&priv->stats_tx.syncp); + u64_stats_init(&priv->stats_rx.syncp); + + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) { + ret = PTR_ERR(priv->clk); + goto out_free_netdev; + } + + priv->rst = devm_reset_control_get_optional_shared(dev, NULL); + if (IS_ERR(priv->rst)) { + ret = PTR_ERR(priv->rst); + goto out_free_netdev; + } + + priv->mdio = devm_mdiobus_alloc(dev); + if (!priv->mdio) { + ret = -ENOMEM; + goto out_free_netdev; + } + priv->mdio->priv = ndev; + priv->mdio->parent = dev; + priv->mdio->read = ave_mdiobus_read; + priv->mdio->write = ave_mdiobus_write; + priv->mdio->name = "uniphier-mdio"; + snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%x", + pdev->name, pdev->id); + + /* Register as a NAPI supported driver */ + netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx, priv->rx.ndesc); + netif_tx_napi_add(ndev, &priv->napi_tx, ave_napi_poll_tx, + priv->tx.ndesc); + + platform_set_drvdata(pdev, ndev); + + ret = register_netdev(ndev); + if (ret) { + dev_err(dev, "failed to register netdevice\n"); + goto out_del_napi; + } + + /* get ID and version */ + ave_id = readl(priv->base + AVE_IDR); + ave_hw_read_version(ndev, buf, sizeof(buf)); + + dev_info(dev, "Socionext %c%c%c%c Ethernet IP %s (irq=%d, phy=%s)\n", + (ave_id >> 24) & 0xff, (ave_id >> 16) & 0xff, + (ave_id >> 8) & 0xff, (ave_id >> 0) & 0xff, + buf, priv->irq, phy_modes(phy_mode)); + + return 0; + +out_del_napi: + netif_napi_del(&priv->napi_rx); + netif_napi_del(&priv->napi_tx); +out_free_netdev: + free_netdev(ndev); + + return ret; +} + +static int ave_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct ave_private *priv = netdev_priv(ndev); + + unregister_netdev(ndev); + netif_napi_del(&priv->napi_rx); + netif_napi_del(&priv->napi_tx); + free_netdev(ndev); + + return 0; +} + +static const struct ave_soc_data ave_pro4_data = { + .is_desc_64bit = false, +}; + +static const struct ave_soc_data ave_pxs2_data = { + .is_desc_64bit = false, +}; + +static const struct ave_soc_data ave_ld11_data = { + .is_desc_64bit = false, +}; + +static const struct ave_soc_data ave_ld20_data = { + .is_desc_64bit = true, +}; + +static const struct of_device_id of_ave_match[] = { + { + .compatible = "socionext,uniphier-pro4-ave4", + .data = &ave_pro4_data, + }, + { + .compatible = "socionext,uniphier-pxs2-ave4", + .data = &ave_pxs2_data, + }, + { + .compatible = "socionext,uniphier-ld11-ave4", + .data = &ave_ld11_data, + }, + { + .compatible = "socionext,uniphier-ld20-ave4", + .data = &ave_ld20_data, + }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, of_ave_match); + +static struct platform_driver ave_driver = { + .probe = ave_probe, + .remove = ave_remove, + .driver = { + .name = "ave", + .of_match_table = of_ave_match, + }, +}; +module_platform_driver(ave_driver); + +MODULE_DESCRIPTION("Socionext UniPhier AVE ethernet driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index ce2ea2d491ac..2ffe76c0ff74 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -474,7 +474,7 @@ struct mac_device_info; /* Helpers to program the MAC core */ struct stmmac_ops { /* MAC core initialization */ - void (*core_init)(struct mac_device_info *hw, int mtu); + void (*core_init)(struct mac_device_info *hw, struct net_device *dev); /* Enable the MAC RX/TX */ void (*set_mac)(void __iomem *ioaddr, bool enable); /* Enable and verify that the IPC module is supported */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index 4404650b32c5..5270d26f0bc6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -40,9 +40,7 @@ #define PRG_ETH0_CLK_M250_DIV_SHIFT 7 #define PRG_ETH0_CLK_M250_DIV_WIDTH 3 -/* divides the result of m25_sel by either 5 (bit unset) or 10 (bit set) */ -#define PRG_ETH0_CLK_M25_DIV_SHIFT 10 -#define PRG_ETH0_CLK_M25_DIV_WIDTH 1 +#define PRG_ETH0_RGMII_TX_CLK_EN 10 #define PRG_ETH0_INVERTED_RMII_CLK BIT(11) #define PRG_ETH0_TX_AND_PHY_REF_CLK BIT(12) @@ -63,8 +61,11 @@ struct meson8b_dwmac { struct clk_divider m250_div; struct clk *m250_div_clk; - struct clk_divider m25_div; - struct clk *m25_div_clk; + struct clk_fixed_factor fixed_div2; + struct clk *fixed_div2_clk; + + struct clk_gate rgmii_tx_en; + struct clk *rgmii_tx_en_clk; u32 tx_delay_ns; }; @@ -81,7 +82,7 @@ static void meson8b_dwmac_mask_bits(struct meson8b_dwmac *dwmac, u32 reg, writel(data, dwmac->regs + reg); } -static int meson8b_init_clk(struct meson8b_dwmac *dwmac) +static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac) { struct clk_init_data init; int i, ret; @@ -89,11 +90,6 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac) char clk_name[32]; const char *clk_div_parents[1]; const char *mux_parent_names[MUX_CLK_NUM_PARENTS]; - static const struct clk_div_table clk_25m_div_table[] = { - { .val = 0, .div = 5 }, - { .val = 1, .div = 10 }, - { /* sentinel */ }, - }; /* get the mux parents from DT */ for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) { @@ -116,7 +112,7 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac) snprintf(clk_name, sizeof(clk_name), "%s#m250_sel", dev_name(dev)); init.name = clk_name; init.ops = &clk_mux_ops; - init.flags = 0; + init.flags = CLK_SET_RATE_PARENT; init.parent_names = mux_parent_names; init.num_parents = MUX_CLK_NUM_PARENTS; @@ -144,31 +140,48 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac) dwmac->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT; dwmac->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH; dwmac->m250_div.hw.init = &init; - dwmac->m250_div.flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO; + dwmac->m250_div.flags = CLK_DIVIDER_ONE_BASED | + CLK_DIVIDER_ALLOW_ZERO | + CLK_DIVIDER_ROUND_CLOSEST; dwmac->m250_div_clk = devm_clk_register(dev, &dwmac->m250_div.hw); if (WARN_ON(IS_ERR(dwmac->m250_div_clk))) return PTR_ERR(dwmac->m250_div_clk); - /* create the m25_div */ - snprintf(clk_name, sizeof(clk_name), "%s#m25_div", dev_name(dev)); + /* create the fixed_div2 */ + snprintf(clk_name, sizeof(clk_name), "%s#fixed_div2", dev_name(dev)); init.name = devm_kstrdup(dev, clk_name, GFP_KERNEL); - init.ops = &clk_divider_ops; - init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT; + init.ops = &clk_fixed_factor_ops; + init.flags = CLK_SET_RATE_PARENT; clk_div_parents[0] = __clk_get_name(dwmac->m250_div_clk); init.parent_names = clk_div_parents; init.num_parents = ARRAY_SIZE(clk_div_parents); - dwmac->m25_div.reg = dwmac->regs + PRG_ETH0; - dwmac->m25_div.shift = PRG_ETH0_CLK_M25_DIV_SHIFT; - dwmac->m25_div.width = PRG_ETH0_CLK_M25_DIV_WIDTH; - dwmac->m25_div.table = clk_25m_div_table; - dwmac->m25_div.hw.init = &init; - dwmac->m25_div.flags = CLK_DIVIDER_ALLOW_ZERO; + dwmac->fixed_div2.mult = 1; + dwmac->fixed_div2.div = 2; + dwmac->fixed_div2.hw.init = &init; - dwmac->m25_div_clk = devm_clk_register(dev, &dwmac->m25_div.hw); - if (WARN_ON(IS_ERR(dwmac->m25_div_clk))) - return PTR_ERR(dwmac->m25_div_clk); + dwmac->fixed_div2_clk = devm_clk_register(dev, &dwmac->fixed_div2.hw); + if (WARN_ON(IS_ERR(dwmac->fixed_div2_clk))) + return PTR_ERR(dwmac->fixed_div2_clk); + + /* create the rgmii_tx_en */ + init.name = devm_kasprintf(dev, GFP_KERNEL, "%s#rgmii_tx_en", + dev_name(dev)); + init.ops = &clk_gate_ops; + init.flags = CLK_SET_RATE_PARENT; + clk_div_parents[0] = __clk_get_name(dwmac->fixed_div2_clk); + init.parent_names = clk_div_parents; + init.num_parents = ARRAY_SIZE(clk_div_parents); + + dwmac->rgmii_tx_en.reg = dwmac->regs + PRG_ETH0; + dwmac->rgmii_tx_en.bit_idx = PRG_ETH0_RGMII_TX_CLK_EN; + dwmac->rgmii_tx_en.hw.init = &init; + + dwmac->rgmii_tx_en_clk = devm_clk_register(dev, + &dwmac->rgmii_tx_en.hw); + if (WARN_ON(IS_ERR(dwmac->rgmii_tx_en_clk))) + return PTR_ERR(dwmac->rgmii_tx_en_clk); return 0; } @@ -176,7 +189,6 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac) static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac) { int ret; - unsigned long clk_rate; u8 tx_dly_val = 0; switch (dwmac->phy_mode) { @@ -191,9 +203,6 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac) case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_TXID: - /* Generate a 25MHz clock for the PHY */ - clk_rate = 25 * 1000 * 1000; - /* enable RGMII mode */ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE, PRG_ETH0_RGMII_MODE); @@ -204,12 +213,28 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac) meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK, tx_dly_val << PRG_ETH0_TXDLY_SHIFT); + + /* Configure the 125MHz RGMII TX clock, the IP block changes + * the output automatically (= without us having to configure + * a register) based on the line-speed (125MHz for Gbit speeds, + * 25MHz for 100Mbit/s and 2.5MHz for 10Mbit/s). + */ + ret = clk_set_rate(dwmac->rgmii_tx_en_clk, 125 * 1000 * 1000); + if (ret) { + dev_err(&dwmac->pdev->dev, + "failed to set RGMII TX clock\n"); + return ret; + } + + ret = clk_prepare_enable(dwmac->rgmii_tx_en_clk); + if (ret) { + dev_err(&dwmac->pdev->dev, + "failed to enable the RGMII TX clock\n"); + return ret; + } break; case PHY_INTERFACE_MODE_RMII: - /* Use the rate of the mux clock for the internal RMII PHY */ - clk_rate = clk_get_rate(dwmac->m250_mux_clk); - /* disable RGMII mode -> enables RMII mode */ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE, 0); @@ -231,20 +256,6 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac) return -EINVAL; } - ret = clk_prepare_enable(dwmac->m25_div_clk); - if (ret) { - dev_err(&dwmac->pdev->dev, "failed to enable the PHY clock\n"); - return ret; - } - - ret = clk_set_rate(dwmac->m25_div_clk, clk_rate); - if (ret) { - clk_disable_unprepare(dwmac->m25_div_clk); - - dev_err(&dwmac->pdev->dev, "failed to set PHY clock\n"); - return ret; - } - /* enable TX_CLK and PHY_REF_CLK generator */ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TX_AND_PHY_REF_CLK, PRG_ETH0_TX_AND_PHY_REF_CLK); @@ -294,7 +305,7 @@ static int meson8b_dwmac_probe(struct platform_device *pdev) &dwmac->tx_delay_ns)) dwmac->tx_delay_ns = 2; - ret = meson8b_init_clk(dwmac); + ret = meson8b_init_rgmii_tx_clk(dwmac); if (ret) goto err_remove_config_dt; @@ -311,7 +322,8 @@ static int meson8b_dwmac_probe(struct platform_device *pdev) return 0; err_clk_disable: - clk_disable_unprepare(dwmac->m25_div_clk); + if (phy_interface_mode_is_rgmii(dwmac->phy_mode)) + clk_disable_unprepare(dwmac->rgmii_tx_en_clk); err_remove_config_dt: stmmac_remove_config_dt(pdev, plat_dat); @@ -322,7 +334,8 @@ static int meson8b_dwmac_remove(struct platform_device *pdev) { struct meson8b_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev); - clk_disable_unprepare(dwmac->m25_div_clk); + if (phy_interface_mode_is_rgmii(dwmac->phy_mode)) + clk_disable_unprepare(dwmac->rgmii_tx_en_clk); return stmmac_pltfr_remove(pdev); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 9eb7f65d8000..a3fa65b1ca8e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -483,7 +483,8 @@ static int sun8i_dwmac_init(struct platform_device *pdev, void *priv) return 0; } -static void sun8i_dwmac_core_init(struct mac_device_info *hw, int mtu) +static void sun8i_dwmac_core_init(struct mac_device_info *hw, + struct net_device *dev) { void __iomem *ioaddr = hw->pcsr; u32 v; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index 8a86340ff2d3..540d21786a43 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -25,18 +25,28 @@ #include <linux/crc32.h> #include <linux/slab.h> #include <linux/ethtool.h> +#include <net/dsa.h> #include <asm/io.h> #include "stmmac_pcs.h" #include "dwmac1000.h" -static void dwmac1000_core_init(struct mac_device_info *hw, int mtu) +static void dwmac1000_core_init(struct mac_device_info *hw, + struct net_device *dev) { void __iomem *ioaddr = hw->pcsr; u32 value = readl(ioaddr + GMAC_CONTROL); + int mtu = dev->mtu; /* Configure GMAC core */ value |= GMAC_CORE_INIT; + /* Clear ACS bit because Ethernet switch tagging formats such as + * Broadcom tags can look like invalid LLC/SNAP packets and cause the + * hardware to truncate packets on reception. + */ + if (netdev_uses_dsa(dev)) + value &= ~GMAC_CONTROL_ACS; + if (mtu > 1500) value |= GMAC_CONTROL_2K; if (mtu > 2000) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c index 8ef517356313..91b23f9db31a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c @@ -25,15 +25,26 @@ *******************************************************************************/ #include <linux/crc32.h> +#include <net/dsa.h> #include <asm/io.h> #include "dwmac100.h" -static void dwmac100_core_init(struct mac_device_info *hw, int mtu) +static void dwmac100_core_init(struct mac_device_info *hw, + struct net_device *dev) { void __iomem *ioaddr = hw->pcsr; u32 value = readl(ioaddr + MAC_CONTROL); - writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL); + value |= MAC_CORE_INIT; + + /* Clear ASTP bit because Ethernet switch tagging formats such as + * Broadcom tags can look like invalid LLC/SNAP packets and cause the + * hardware to truncate packets on reception. + */ + if (netdev_uses_dsa(dev)) + value &= ~MAC_CONTROL_ASTP; + + writel(value, ioaddr + MAC_CONTROL); #ifdef STMMAC_VLAN_TAG_USED writel(ETH_P_8021Q, ioaddr + MAC_VLAN1); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index f3ed8f7853eb..ed222b20fcf1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -17,16 +17,26 @@ #include <linux/slab.h> #include <linux/ethtool.h> #include <linux/io.h> +#include <net/dsa.h> #include "stmmac_pcs.h" #include "dwmac4.h" -static void dwmac4_core_init(struct mac_device_info *hw, int mtu) +static void dwmac4_core_init(struct mac_device_info *hw, + struct net_device *dev) { void __iomem *ioaddr = hw->pcsr; u32 value = readl(ioaddr + GMAC_CONFIG); + int mtu = dev->mtu; value |= GMAC_CORE_INIT; + /* Clear ACS bit because Ethernet switch tagging formats such as + * Broadcom tags can look like invalid LLC/SNAP packets and cause the + * hardware to truncate packets on reception. + */ + if (netdev_uses_dsa(dev)) + value &= ~GMAC_CONFIG_ACS; + if (mtu > 1500) value |= GMAC_CONFIG_2K; if (mtu > 2000) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 7e089bf906b4..c728ffa095de 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -334,7 +334,7 @@ static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, if (tx_own) tdes3 |= TDES3_OWN; - if (is_fs & tx_own) + if (is_fs && tx_own) /* When the own bit, for the first frame, has to be set, all * descriptors for the same frame has to be set before, to * avoid race condition. @@ -377,7 +377,7 @@ static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs, if (tx_own) tdes3 |= TDES3_OWN; - if (is_fs & tx_own) + if (is_fs && tx_own) /* When the own bit, for the first frame, has to be set, all * descriptors for the same frame has to be set before, to * avoid race condition. @@ -406,7 +406,7 @@ static void dwmac4_display_ring(void *head, unsigned int size, bool rx) pr_info("%s descriptor ring:\n", rx ? "RX" : "TX"); for (i = 0; i < size; i++) { - pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", + pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", i, (unsigned int)virt_to_phys(p), le32_to_cpu(p->des0), le32_to_cpu(p->des1), le32_to_cpu(p->des2), le32_to_cpu(p->des3)); diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 2a828a312814..6768a25b6aa0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -341,7 +341,7 @@ static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, if (tx_own) tdes0 |= ETDES0_OWN; - if (is_fs & tx_own) + if (is_fs && tx_own) /* When the own bit, for the first frame, has to be set, all * descriptors for the same frame has to be set before, to * avoid race condition. @@ -428,7 +428,7 @@ static void enh_desc_display_ring(void *head, unsigned int size, bool rx) u64 x; x = *(u64 *)ep; - pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", + pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", i, (unsigned int)virt_to_phys(ep), (unsigned int)x, (unsigned int)(x >> 32), ep->basic.des2, ep->basic.des3); diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index db4cee57bb24..ebd9e5e00f16 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -288,7 +288,7 @@ static void ndesc_display_ring(void *head, unsigned int size, bool rx) u64 x; x = *(u64 *)p; - pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x", + pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x", i, (unsigned int)virt_to_phys(p), (unsigned int)x, (unsigned int)(x >> 32), p->des2, p->des3); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index c0af0bc4e714..7ad841434ec8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2003,22 +2003,60 @@ static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, static void stmmac_dma_interrupt(struct stmmac_priv *priv) { u32 tx_channel_count = priv->plat->tx_queues_to_use; - int status; + u32 rx_channel_count = priv->plat->rx_queues_to_use; + u32 channels_to_check = tx_channel_count > rx_channel_count ? + tx_channel_count : rx_channel_count; u32 chan; + bool poll_scheduled = false; + int status[channels_to_check]; + + /* Each DMA channel can be used for rx and tx simultaneously, yet + * napi_struct is embedded in struct stmmac_rx_queue rather than in a + * stmmac_channel struct. + * Because of this, stmmac_poll currently checks (and possibly wakes) + * all tx queues rather than just a single tx queue. + */ + for (chan = 0; chan < channels_to_check; chan++) + status[chan] = priv->hw->dma->dma_interrupt(priv->ioaddr, + &priv->xstats, + chan); - for (chan = 0; chan < tx_channel_count; chan++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; + for (chan = 0; chan < rx_channel_count; chan++) { + if (likely(status[chan] & handle_rx)) { + struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; - status = priv->hw->dma->dma_interrupt(priv->ioaddr, - &priv->xstats, chan); - if (likely((status & handle_rx)) || (status & handle_tx)) { if (likely(napi_schedule_prep(&rx_q->napi))) { stmmac_disable_dma_irq(priv, chan); __napi_schedule(&rx_q->napi); + poll_scheduled = true; + } + } + } + + /* If we scheduled poll, we already know that tx queues will be checked. + * If we didn't schedule poll, see if any DMA channel (used by tx) has a + * completed transmission, if so, call stmmac_poll (once). + */ + if (!poll_scheduled) { + for (chan = 0; chan < tx_channel_count; chan++) { + if (status[chan] & handle_tx) { + /* It doesn't matter what rx queue we choose + * here. We use 0 since it always exists. + */ + struct stmmac_rx_queue *rx_q = + &priv->rx_queue[0]; + + if (likely(napi_schedule_prep(&rx_q->napi))) { + stmmac_disable_dma_irq(priv, chan); + __napi_schedule(&rx_q->napi); + } + break; } } + } - if (unlikely(status & tx_hard_error_bump_tc)) { + for (chan = 0; chan < tx_channel_count; chan++) { + if (unlikely(status[chan] & tx_hard_error_bump_tc)) { /* Try to bump up the dma threshold on this failure */ if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && (tc <= 256)) { @@ -2035,7 +2073,7 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv) chan); priv->xstats.threshold = tc; } - } else if (unlikely(status == tx_hard_error)) { + } else if (unlikely(status[chan] == tx_hard_error)) { stmmac_tx_err(priv, chan); } } @@ -2489,7 +2527,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) } /* Initialize the MAC Core */ - priv->hw->mac->core_init(priv->hw, dev->mtu); + priv->hw->mac->core_init(priv->hw, dev); /* Initialize MTL*/ if (priv->synopsys_id >= DWMAC_CORE_4_00) @@ -3404,9 +3442,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) if (netif_msg_rx_status(priv)) { netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n", p, entry, des); - if (frame_len > ETH_FRAME_LEN) - netdev_dbg(priv->dev, "frame size %d, COE: %d\n", - frame_len, status); + netdev_dbg(priv->dev, "frame size %d, COE: %d\n", + frame_len, status); } /* The zero-copy is always used for all the sizes diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c index d655a4261e98..eb1c6b03c329 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c @@ -333,9 +333,8 @@ void xlgmac_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx) { struct ethhdr *eth = (struct ethhdr *)skb->data; - unsigned char *buf = skb->data; unsigned char buffer[128]; - unsigned int i, j; + unsigned int i; netdev_dbg(netdev, "\n************** SKB dump ****************\n"); @@ -346,22 +345,13 @@ void xlgmac_print_pkt(struct net_device *netdev, netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source); netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto)); - for (i = 0, j = 0; i < skb->len;) { - j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx", - buf[i++]); - - if ((i % 32) == 0) { - netdev_dbg(netdev, " %#06x: %s\n", i - 32, buffer); - j = 0; - } else if ((i % 16) == 0) { - buffer[j++] = ' '; - buffer[j++] = ' '; - } else if ((i % 4) == 0) { - buffer[j++] = ' '; - } + for (i = 0; i < skb->len; i += 32) { + unsigned int len = min(skb->len - i, 32U); + + hex_dump_to_buffer(&skb->data[i], len, 32, 1, + buffer, sizeof(buffer), false); + netdev_dbg(netdev, " %#06x: %s\n", i, buffer); } - if (i % 32) - netdev_dbg(netdev, " %#06x: %s\n", i - (i % 32), buffer); netdev_dbg(netdev, "\n************** SKB dump ****************\n"); } diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index a73600dceb8b..3c85a0885f9b 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -88,6 +88,7 @@ do { \ #define CPSW_VERSION_4 0x190112 #define HOST_PORT_NUM 0 +#define CPSW_ALE_PORTS_NUM 3 #define SLIVER_SIZE 0x40 #define CPSW1_HOST_PORT_OFFSET 0x028 @@ -352,6 +353,27 @@ struct cpsw_hw_stats { u32 rxdmaoverruns; }; +struct cpsw_slave_data { + struct device_node *phy_node; + char phy_id[MII_BUS_ID_SIZE]; + int phy_if; + u8 mac_addr[ETH_ALEN]; + u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */ +}; + +struct cpsw_platform_data { + struct cpsw_slave_data *slave_data; + u32 ss_reg_ofs; /* Subsystem control register offset */ + u32 channels; /* number of cpdma channels (symmetric) */ + u32 slaves; /* number of slave cpgmac ports */ + u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */ + u32 ale_entries; /* ale table size */ + u32 bd_ram_size; /*buffer descriptor ram size */ + u32 mac_control; /* Mac control register */ + u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/ + bool dual_emac; /* Enable Dual EMAC mode */ +}; + struct cpsw_slave { void __iomem *regs; struct cpsw_sliver_regs __iomem *sliver; @@ -365,12 +387,12 @@ struct cpsw_slave { static inline u32 slave_read(struct cpsw_slave *slave, u32 offset) { - return __raw_readl(slave->regs + offset); + return readl_relaxed(slave->regs + offset); } static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset) { - __raw_writel(val, slave->regs + offset); + writel_relaxed(val, slave->regs + offset); } struct cpsw_vector { @@ -660,8 +682,8 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) static void cpsw_intr_enable(struct cpsw_common *cpsw) { - __raw_writel(0xFF, &cpsw->wr_regs->tx_en); - __raw_writel(0xFF, &cpsw->wr_regs->rx_en); + writel_relaxed(0xFF, &cpsw->wr_regs->tx_en); + writel_relaxed(0xFF, &cpsw->wr_regs->rx_en); cpdma_ctlr_int_ctrl(cpsw->dma, true); return; @@ -669,8 +691,8 @@ static void cpsw_intr_enable(struct cpsw_common *cpsw) static void cpsw_intr_disable(struct cpsw_common *cpsw) { - __raw_writel(0, &cpsw->wr_regs->tx_en); - __raw_writel(0, &cpsw->wr_regs->rx_en); + writel_relaxed(0, &cpsw->wr_regs->tx_en); + writel_relaxed(0, &cpsw->wr_regs->rx_en); cpdma_ctlr_int_ctrl(cpsw->dma, false); return; @@ -949,18 +971,14 @@ static inline void soft_reset(const char *module, void __iomem *reg) { unsigned long timeout = jiffies + HZ; - __raw_writel(1, reg); + writel_relaxed(1, reg); do { cpu_relax(); - } while ((__raw_readl(reg) & 1) && time_after(timeout, jiffies)); + } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies)); - WARN(__raw_readl(reg) & 1, "failed to soft-reset %s\n", module); + WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module); } -#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \ - ((mac)[2] << 16) | ((mac)[3] << 24)) -#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8)) - static void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv) { @@ -1015,7 +1033,7 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave, if (mac_control != slave->mac_control) { phy_print_status(phy); - __raw_writel(mac_control, &slave->sliver->mac_control); + writel_relaxed(mac_control, &slave->sliver->mac_control); } slave->mac_control = mac_control; @@ -1278,7 +1296,7 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) soft_reset_slave(slave); /* setup priority mapping */ - __raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map); + writel_relaxed(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map); switch (cpsw->version) { case CPSW_VERSION_1: @@ -1304,7 +1322,7 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) } /* setup max packet size, and mac address */ - __raw_writel(cpsw->rx_packet_max, &slave->sliver->rx_maxlen); + writel_relaxed(cpsw->rx_packet_max, &slave->sliver->rx_maxlen); cpsw_set_slave_mac(slave, priv); slave->mac_control = 0; /* no link yet */ @@ -1395,9 +1413,9 @@ static void cpsw_init_host_port(struct cpsw_priv *priv) writel(fifo_mode, &cpsw->host_port_regs->tx_in_ctl); /* setup host port priority mapping */ - __raw_writel(CPDMA_TX_PRIORITY_MAP, - &cpsw->host_port_regs->cpdma_tx_pri_map); - __raw_writel(0, &cpsw->host_port_regs->cpdma_rx_chan_map); + writel_relaxed(CPDMA_TX_PRIORITY_MAP, + &cpsw->host_port_regs->cpdma_tx_pri_map); + writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map); cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); @@ -1514,10 +1532,10 @@ static int cpsw_ndo_open(struct net_device *ndev) /* initialize shared resources for every ndev */ if (!cpsw->usage_count) { /* disable priority elevation */ - __raw_writel(0, &cpsw->regs->ptype); + writel_relaxed(0, &cpsw->regs->ptype); /* enable statistics collection only on all ports */ - __raw_writel(0x7, &cpsw->regs->stat_port_en); + writel_relaxed(0x7, &cpsw->regs->stat_port_en); /* Enable internal fifo flow control */ writel(0x7, &cpsw->regs->flow_control); @@ -1701,7 +1719,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv) slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE); slave_write(slave, ctrl, CPSW2_CONTROL); - __raw_writel(ETH_P_1588, &cpsw->regs->ts_ltype); + writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype); } static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) @@ -2298,7 +2316,6 @@ static int cpsw_check_ch_settings(struct cpsw_common *cpsw, static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx) { - int (*poll)(struct napi_struct *, int); struct cpsw_common *cpsw = priv->cpsw; void (*handler)(void *, int, int); struct netdev_queue *queue; @@ -2309,12 +2326,10 @@ static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx) ch = &cpsw->rx_ch_num; vec = cpsw->rxv; handler = cpsw_rx_handler; - poll = cpsw_rx_poll; } else { ch = &cpsw->tx_ch_num; vec = cpsw->txv; handler = cpsw_tx_handler; - poll = cpsw_tx_poll; } while (*ch < ch_num) { @@ -3050,17 +3065,23 @@ static int cpsw_probe(struct platform_device *pdev) } cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0); + if (IS_ERR(cpsw->txv[0].ch)) { + dev_err(priv->dev, "error initializing tx dma channel\n"); + ret = PTR_ERR(cpsw->txv[0].ch); + goto clean_dma_ret; + } + cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1); - if (WARN_ON(!cpsw->rxv[0].ch || !cpsw->txv[0].ch)) { - dev_err(priv->dev, "error initializing dma channels\n"); - ret = -ENOMEM; + if (IS_ERR(cpsw->rxv[0].ch)) { + dev_err(priv->dev, "error initializing rx dma channel\n"); + ret = PTR_ERR(cpsw->rxv[0].ch); goto clean_dma_ret; } ale_params.dev = &pdev->dev; ale_params.ale_ageout = ale_ageout; ale_params.ale_entries = data->ale_entries; - ale_params.ale_ports = data->slaves; + ale_params.ale_ports = CPSW_ALE_PORTS_NUM; cpsw->ale = cpsw_ale_create(&ale_params); if (!cpsw->ale) { @@ -3072,14 +3093,14 @@ static int cpsw_probe(struct platform_device *pdev) cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpsw->dev->of_node); if (IS_ERR(cpsw->cpts)) { ret = PTR_ERR(cpsw->cpts); - goto clean_ale_ret; + goto clean_dma_ret; } ndev->irq = platform_get_irq(pdev, 1); if (ndev->irq < 0) { dev_err(priv->dev, "error getting irq resource\n"); ret = ndev->irq; - goto clean_ale_ret; + goto clean_dma_ret; } of_id = of_match_device(cpsw_of_mtable, &pdev->dev); @@ -3103,7 +3124,7 @@ static int cpsw_probe(struct platform_device *pdev) if (ret) { dev_err(priv->dev, "error registering net device\n"); ret = -ENODEV; - goto clean_ale_ret; + goto clean_dma_ret; } if (cpsw->data.dual_emac) { @@ -3126,7 +3147,7 @@ static int cpsw_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 1); if (irq < 0) { ret = irq; - goto clean_ale_ret; + goto clean_dma_ret; } cpsw->irqs_table[0] = irq; @@ -3134,14 +3155,14 @@ static int cpsw_probe(struct platform_device *pdev) 0, dev_name(&pdev->dev), cpsw); if (ret < 0) { dev_err(priv->dev, "error attaching irq (%d)\n", ret); - goto clean_ale_ret; + goto clean_dma_ret; } /* TX IRQ */ irq = platform_get_irq(pdev, 2); if (irq < 0) { ret = irq; - goto clean_ale_ret; + goto clean_dma_ret; } cpsw->irqs_table[1] = irq; @@ -3149,7 +3170,7 @@ static int cpsw_probe(struct platform_device *pdev) 0, dev_name(&pdev->dev), cpsw); if (ret < 0) { dev_err(priv->dev, "error attaching irq (%d)\n", ret); - goto clean_ale_ret; + goto clean_dma_ret; } cpsw_notice(priv, probe, @@ -3162,8 +3183,6 @@ static int cpsw_probe(struct platform_device *pdev) clean_unregister_netdev_ret: unregister_netdev(ndev); -clean_ale_ret: - cpsw_ale_destroy(cpsw->ale); clean_dma_ret: cpdma_ctlr_destroy(cpsw->dma); clean_dt_ret: @@ -3193,7 +3212,6 @@ static int cpsw_remove(struct platform_device *pdev) unregister_netdev(ndev); cpts_release(cpsw->cpts); - cpsw_ale_destroy(cpsw->ale); cpdma_ctlr_destroy(cpsw->dma); cpsw_remove_dt(pdev); pm_runtime_put_sync(&pdev->dev); diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h index 6c3037aa2cd3..cf111db3dc27 100644 --- a/drivers/net/ethernet/ti/cpsw.h +++ b/drivers/net/ethernet/ti/cpsw.h @@ -17,26 +17,9 @@ #include <linux/if_ether.h> #include <linux/phy.h> -struct cpsw_slave_data { - struct device_node *phy_node; - char phy_id[MII_BUS_ID_SIZE]; - int phy_if; - u8 mac_addr[ETH_ALEN]; - u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */ -}; - -struct cpsw_platform_data { - struct cpsw_slave_data *slave_data; - u32 ss_reg_ofs; /* Subsystem control register offset */ - u32 channels; /* number of cpdma channels (symmetric) */ - u32 slaves; /* number of slave cpgmac ports */ - u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */ - u32 ale_entries; /* ale table size */ - u32 bd_ram_size; /*buffer descriptor ram size */ - u32 mac_control; /* Mac control register */ - u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/ - bool dual_emac; /* Enable Dual EMAC mode */ -}; +#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \ + ((mac)[2] << 16) | ((mac)[3] << 24)) +#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8)) void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave); int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr); diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index b432a75fb874..93dc05c194d3 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -150,11 +150,11 @@ static int cpsw_ale_read(struct cpsw_ale *ale, int idx, u32 *ale_entry) WARN_ON(idx > ale->params.ale_entries); - __raw_writel(idx, ale->params.ale_regs + ALE_TABLE_CONTROL); + writel_relaxed(idx, ale->params.ale_regs + ALE_TABLE_CONTROL); for (i = 0; i < ALE_ENTRY_WORDS; i++) - ale_entry[i] = __raw_readl(ale->params.ale_regs + - ALE_TABLE + 4 * i); + ale_entry[i] = readl_relaxed(ale->params.ale_regs + + ALE_TABLE + 4 * i); return idx; } @@ -166,11 +166,11 @@ static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry) WARN_ON(idx > ale->params.ale_entries); for (i = 0; i < ALE_ENTRY_WORDS; i++) - __raw_writel(ale_entry[i], ale->params.ale_regs + - ALE_TABLE + 4 * i); + writel_relaxed(ale_entry[i], ale->params.ale_regs + + ALE_TABLE + 4 * i); - __raw_writel(idx | ALE_TABLE_WRITE, ale->params.ale_regs + - ALE_TABLE_CONTROL); + writel_relaxed(idx | ALE_TABLE_WRITE, ale->params.ale_regs + + ALE_TABLE_CONTROL); return idx; } @@ -723,7 +723,7 @@ int cpsw_ale_control_set(struct cpsw_ale *ale, int port, int control, if (info->port_offset == 0 && info->port_shift == 0) port = 0; /* global, port is a dont care */ - if (port < 0 || port > ale->params.ale_ports) + if (port < 0 || port >= ale->params.ale_ports) return -EINVAL; mask = BITMASK(info->bits); @@ -733,9 +733,9 @@ int cpsw_ale_control_set(struct cpsw_ale *ale, int port, int control, offset = info->offset + (port * info->port_offset); shift = info->shift + (port * info->port_shift); - tmp = __raw_readl(ale->params.ale_regs + offset); + tmp = readl_relaxed(ale->params.ale_regs + offset); tmp = (tmp & ~(mask << shift)) | (value << shift); - __raw_writel(tmp, ale->params.ale_regs + offset); + writel_relaxed(tmp, ale->params.ale_regs + offset); return 0; } @@ -754,13 +754,13 @@ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control) if (info->port_offset == 0 && info->port_shift == 0) port = 0; /* global, port is a dont care */ - if (port < 0 || port > ale->params.ale_ports) + if (port < 0 || port >= ale->params.ale_ports) return -EINVAL; offset = info->offset + (port * info->port_offset); shift = info->shift + (port * info->port_shift); - tmp = __raw_readl(ale->params.ale_regs + offset) >> shift; + tmp = readl_relaxed(ale->params.ale_regs + offset) >> shift; return tmp & BITMASK(info->bits); } EXPORT_SYMBOL_GPL(cpsw_ale_control_get); @@ -779,9 +779,37 @@ static void cpsw_ale_timer(struct timer_list *t) void cpsw_ale_start(struct cpsw_ale *ale) { + cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1); + cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1); + + timer_setup(&ale->timer, cpsw_ale_timer, 0); + if (ale->ageout) { + ale->timer.expires = jiffies + ale->ageout; + add_timer(&ale->timer); + } +} +EXPORT_SYMBOL_GPL(cpsw_ale_start); + +void cpsw_ale_stop(struct cpsw_ale *ale) +{ + del_timer_sync(&ale->timer); + cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0); +} +EXPORT_SYMBOL_GPL(cpsw_ale_stop); + +struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params) +{ + struct cpsw_ale *ale; u32 rev, ale_entries; - rev = __raw_readl(ale->params.ale_regs + ALE_IDVER); + ale = devm_kzalloc(params->dev, sizeof(*ale), GFP_KERNEL); + if (!ale) + return NULL; + + ale->params = *params; + ale->ageout = ale->params.ale_ageout * HZ; + + rev = readl_relaxed(ale->params.ale_regs + ALE_IDVER); if (!ale->params.major_ver_mask) ale->params.major_ver_mask = 0xff; ale->version = @@ -793,8 +821,8 @@ void cpsw_ale_start(struct cpsw_ale *ale) if (!ale->params.ale_entries) { ale_entries = - __raw_readl(ale->params.ale_regs + ALE_STATUS) & - ALE_STATUS_SIZE_MASK; + readl_relaxed(ale->params.ale_regs + ALE_STATUS) & + ALE_STATUS_SIZE_MASK; /* ALE available on newer NetCP switches has introduced * a register, ALE_STATUS, to indicate the size of ALE * table which shows the size as a multiple of 1024 entries. @@ -816,9 +844,9 @@ void cpsw_ale_start(struct cpsw_ale *ale) "ALE Table size %ld\n", ale->params.ale_entries); /* set default bits for existing h/w */ - ale->port_mask_bits = 3; - ale->port_num_bits = 2; - ale->vlan_field_bits = 3; + ale->port_mask_bits = ale->params.ale_ports; + ale->port_num_bits = order_base_2(ale->params.ale_ports); + ale->vlan_field_bits = ale->params.ale_ports; /* Set defaults override for ALE on NetCP NU switch and for version * 1R3 @@ -847,57 +875,12 @@ void cpsw_ale_start(struct cpsw_ale *ale) ale_controls[ALE_PORT_UNTAGGED_EGRESS].shift = 0; ale_controls[ALE_PORT_UNTAGGED_EGRESS].offset = ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS; - ale->port_mask_bits = ale->params.ale_ports; - ale->port_num_bits = ale->params.ale_ports - 1; - ale->vlan_field_bits = ale->params.ale_ports; - } else if (ale->version == ALE_VERSION_1R3) { - ale->port_mask_bits = ale->params.ale_ports; - ale->port_num_bits = 3; - ale->vlan_field_bits = ale->params.ale_ports; } - cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1); - cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1); - - timer_setup(&ale->timer, cpsw_ale_timer, 0); - if (ale->ageout) { - ale->timer.expires = jiffies + ale->ageout; - add_timer(&ale->timer); - } -} -EXPORT_SYMBOL_GPL(cpsw_ale_start); - -void cpsw_ale_stop(struct cpsw_ale *ale) -{ - del_timer_sync(&ale->timer); -} -EXPORT_SYMBOL_GPL(cpsw_ale_stop); - -struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params) -{ - struct cpsw_ale *ale; - - ale = kzalloc(sizeof(*ale), GFP_KERNEL); - if (!ale) - return NULL; - - ale->params = *params; - ale->ageout = ale->params.ale_ageout * HZ; - return ale; } EXPORT_SYMBOL_GPL(cpsw_ale_create); -int cpsw_ale_destroy(struct cpsw_ale *ale) -{ - if (!ale) - return -EINVAL; - cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0); - kfree(ale); - return 0; -} -EXPORT_SYMBOL_GPL(cpsw_ale_destroy); - void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data) { int i; diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h index 25d24e8d0904..d4fe9016429b 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.h +++ b/drivers/net/ethernet/ti/cpsw_ale.h @@ -100,7 +100,6 @@ enum cpsw_ale_port_state { #define ALE_ENTRY_WORDS DIV_ROUND_UP(ALE_ENTRY_BITS, 32) struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params); -int cpsw_ale_destroy(struct cpsw_ale *ale); void cpsw_ale_start(struct cpsw_ale *ale); void cpsw_ale_stop(struct cpsw_ale *ale); diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index e4d6edf387b3..6f9173ff9414 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -893,7 +893,7 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, chan_num = rx_type ? rx_chan_num(chan_num) : tx_chan_num(chan_num); if (__chan_linear(chan_num) >= ctlr->num_chan) - return NULL; + return ERR_PTR(-EINVAL); chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL); if (!chan) diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 4bb561856af5..abceea802ea1 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1385,11 +1385,6 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd) return -EOPNOTSUPP; } -static int match_first_device(struct device *dev, void *data) -{ - return !strncmp(dev_name(dev), "davinci_mdio", 12); -} - /** * emac_dev_open - EMAC device open * @ndev: The DaVinci EMAC network adapter @@ -1489,8 +1484,8 @@ static int emac_dev_open(struct net_device *ndev) /* use the first phy on the bus if pdata did not give us a phy id */ if (!phydev && !priv->phy_id) { - phy = bus_find_device(&mdio_bus_type, NULL, NULL, - match_first_device); + phy = bus_find_device_by_name(&mdio_bus_type, NULL, + "davinci_mdio"); if (phy) { priv->phy_id = dev_name(phy); if (!priv->phy_id || !*priv->phy_id) @@ -1875,10 +1870,17 @@ static int davinci_emac_probe(struct platform_device *pdev) priv->txchan = cpdma_chan_create(priv->dma, EMAC_DEF_TX_CH, emac_tx_handler, 0); + if (IS_ERR(priv->txchan)) { + dev_err(&pdev->dev, "error initializing tx dma channel\n"); + rc = PTR_ERR(priv->txchan); + goto no_cpdma_chan; + } + priv->rxchan = cpdma_chan_create(priv->dma, EMAC_DEF_RX_CH, emac_rx_handler, 1); - if (WARN_ON(!priv->txchan || !priv->rxchan)) { - rc = -ENOMEM; + if (IS_ERR(priv->rxchan)) { + dev_err(&pdev->dev, "error initializing rx dma channel\n"); + rc = PTR_ERR(priv->rxchan); goto no_cpdma_chan; } diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index e831c49713ee..56dbc0b9fedc 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -27,6 +27,7 @@ #include <linux/net_tstamp.h> #include <linux/ethtool.h> +#include "cpsw.h" #include "cpsw_ale.h" #include "netcp.h" #include "cpts.h" @@ -2047,10 +2048,6 @@ static const struct ethtool_ops keystone_ethtool_ops = { .get_ts_info = keystone_get_ts_info, }; -#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \ - ((mac)[2] << 16) | ((mac)[3] << 24)) -#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8)) - static void gbe_set_slave_mac(struct gbe_slave *slave, struct gbe_intf *gbe_intf) { @@ -3692,7 +3689,6 @@ static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv) del_timer_sync(&gbe_dev->timer); cpts_release(gbe_dev->cpts); cpsw_ale_stop(gbe_dev->ale); - cpsw_ale_destroy(gbe_dev->ale); netcp_txpipe_close(&gbe_dev->tx_pipe); free_secondary_ports(gbe_dev); diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 64fda2e1040e..b919e89a9b93 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1652,19 +1652,16 @@ static __net_init int geneve_init_net(struct net *net) return 0; } -static void __net_exit geneve_exit_net(struct net *net) +static void geneve_destroy_tunnels(struct net *net, struct list_head *head) { struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_dev *geneve, *next; struct net_device *dev, *aux; - LIST_HEAD(list); - - rtnl_lock(); /* gather any geneve devices that were moved into this ns */ for_each_netdev_safe(net, dev, aux) if (dev->rtnl_link_ops == &geneve_link_ops) - unregister_netdevice_queue(dev, &list); + unregister_netdevice_queue(dev, head); /* now gather any other geneve devices that were created in this ns */ list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) { @@ -1672,18 +1669,29 @@ static void __net_exit geneve_exit_net(struct net *net) * to the list by the previous loop. */ if (!net_eq(dev_net(geneve->dev), net)) - unregister_netdevice_queue(geneve->dev, &list); + unregister_netdevice_queue(geneve->dev, head); } + WARN_ON_ONCE(!list_empty(&gn->sock_list)); +} + +static void __net_exit geneve_exit_batch_net(struct list_head *net_list) +{ + struct net *net; + LIST_HEAD(list); + + rtnl_lock(); + list_for_each_entry(net, net_list, exit_list) + geneve_destroy_tunnels(net, &list); + /* unregister the devices gathered above */ unregister_netdevice_many(&list); rtnl_unlock(); - WARN_ON_ONCE(!list_empty(&gn->sock_list)); } static struct pernet_operations geneve_net_ops = { .init = geneve_init_net, - .exit = geneve_exit_net, + .exit_batch = geneve_exit_batch_net, .id = &geneve_net_id, .size = sizeof(struct geneve_net), }; diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 88ddfb92122b..0db3bd1ea06f 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -146,7 +146,6 @@ struct hv_netvsc_packet { struct netvsc_device_info { unsigned char mac_adr[ETH_ALEN]; - int ring_size; u32 num_chn; u32 send_sections; u32 recv_sections; @@ -188,18 +187,22 @@ struct rndis_message; struct netvsc_device; struct net_device_context; +extern u32 netvsc_ring_bytes; +extern struct reciprocal_value netvsc_ring_reciprocal; + struct netvsc_device *netvsc_device_add(struct hv_device *device, const struct netvsc_device_info *info); int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx); void netvsc_device_remove(struct hv_device *device); -int netvsc_send(struct net_device_context *ndc, +int netvsc_send(struct net_device *net, struct hv_netvsc_packet *packet, struct rndis_message *rndis_msg, struct hv_page_buffer *page_buffer, struct sk_buff *skb); -void netvsc_linkstatus_callback(struct hv_device *device_obj, +void netvsc_linkstatus_callback(struct net_device *net, struct rndis_message *resp); int netvsc_recv_callback(struct net_device *net, + struct netvsc_device *nvdev, struct vmbus_channel *channel, void *data, u32 len, const struct ndis_tcp_ip_checksum_info *csum_info, @@ -220,7 +223,6 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev, const u8 *key); int rndis_filter_receive(struct net_device *ndev, struct netvsc_device *net_dev, - struct hv_device *dev, struct vmbus_channel *channel, void *data, u32 buflen); @@ -635,14 +637,27 @@ struct nvsp_message { #define NETVSC_MTU 65535 #define NETVSC_MTU_MIN ETH_MIN_MTU -#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */ -#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024*1024*15) /* 15MB */ -#define NETVSC_SEND_BUFFER_SIZE (1024 * 1024 * 15) /* 15MB */ +/* Max buffer sizes allowed by a host */ +#define NETVSC_RECEIVE_BUFFER_SIZE (1024 * 1024 * 31) /* 31MB */ +#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024 * 1024 * 15) /* 15MB */ +#define NETVSC_RECEIVE_BUFFER_DEFAULT (1024 * 1024 * 16) + +#define NETVSC_SEND_BUFFER_SIZE (1024 * 1024 * 15) /* 15MB */ +#define NETVSC_SEND_BUFFER_DEFAULT (1024 * 1024) + #define NETVSC_INVALID_INDEX -1 #define NETVSC_SEND_SECTION_SIZE 6144 #define NETVSC_RECV_SECTION_SIZE 1728 +/* Default size of TX buf: 1MB, RX buf: 16MB */ +#define NETVSC_MIN_TX_SECTIONS 10 +#define NETVSC_DEFAULT_TX (NETVSC_SEND_BUFFER_DEFAULT \ + / NETVSC_SEND_SECTION_SIZE) +#define NETVSC_MIN_RX_SECTIONS 10 +#define NETVSC_DEFAULT_RX (NETVSC_RECEIVE_BUFFER_DEFAULT \ + / NETVSC_RECV_SECTION_SIZE) + #define NETVSC_RECEIVE_BUFFER_ID 0xcafe #define NETVSC_SEND_BUFFER_ID 0 @@ -690,6 +705,7 @@ struct netvsc_ethtool_stats { unsigned long tx_busy; unsigned long tx_send_full; unsigned long rx_comp_busy; + unsigned long rx_no_memory; unsigned long stop_queue; unsigned long wake_queue; }; @@ -804,13 +820,9 @@ struct netvsc_device { struct rndis_device *extension; - int ring_size; - u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ u32 pkt_align; /* alignment bytes, e.g. 8 */ - atomic_t open_cnt; - struct netvsc_channel chan_table[VRSS_CHANNEL_MAX]; struct rcu_head rcu; @@ -1425,32 +1437,6 @@ struct rndis_message { (sizeof(msg) + (sizeof(struct rndis_message) - \ sizeof(union rndis_message_container))) -/* get pointer to info buffer with message pointer */ -#define MESSAGE_TO_INFO_BUFFER(msg) \ - (((unsigned char *)(msg)) + msg->info_buf_offset) - -/* get pointer to status buffer with message pointer */ -#define MESSAGE_TO_STATUS_BUFFER(msg) \ - (((unsigned char *)(msg)) + msg->status_buf_offset) - -/* get pointer to OOBD buffer with message pointer */ -#define MESSAGE_TO_OOBD_BUFFER(msg) \ - (((unsigned char *)(msg)) + msg->oob_data_offset) - -/* get pointer to data buffer with message pointer */ -#define MESSAGE_TO_DATA_BUFFER(msg) \ - (((unsigned char *)(msg)) + msg->per_pkt_info_offset) - -/* get pointer to contained message from NDIS_MESSAGE pointer */ -#define RNDIS_MESSAGE_PTR_TO_MESSAGE_PTR(rndis_msg) \ - ((void *) &rndis_msg->msg) - -/* get pointer to contained message from NDIS_MESSAGE pointer */ -#define RNDIS_MESSAGE_RAW_PTR_TO_MESSAGE_PTR(rndis_msg) \ - ((void *) rndis_msg) - - - #define RNDIS_HEADER_SIZE (sizeof(struct rndis_message) - \ sizeof(union rndis_message_container)) diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index bfc79698b8f4..17e529af79dc 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -31,6 +31,7 @@ #include <linux/vmalloc.h> #include <linux/rtnetlink.h> #include <linux/prefetch.h> +#include <linux/reciprocal_div.h> #include <asm/sync_bitops.h> @@ -72,7 +73,7 @@ static struct netvsc_device *alloc_net_device(void) init_waitqueue_head(&net_device->wait_drain); net_device->destroy = false; - atomic_set(&net_device->open_cnt, 0); + net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; @@ -267,6 +268,11 @@ static int netvsc_init_buf(struct hv_device *device, buf_size = device_info->recv_sections * device_info->recv_section_size; buf_size = roundup(buf_size, PAGE_SIZE); + /* Legacy hosts only allow smaller receive buffer */ + if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2) + buf_size = min_t(unsigned int, buf_size, + NETVSC_RECEIVE_BUFFER_SIZE_LEGACY); + net_device->recv_buf = vzalloc(buf_size); if (!net_device->recv_buf) { netdev_err(ndev, @@ -588,14 +594,11 @@ void netvsc_device_remove(struct hv_device *device) * Get the percentage of available bytes to write in the ring. * The return value is in range from 0 to 100. */ -static inline u32 hv_ringbuf_avail_percent( - struct hv_ring_buffer_info *ring_info) +static u32 hv_ringbuf_avail_percent(const struct hv_ring_buffer_info *ring_info) { - u32 avail_read, avail_write; - - hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write); + u32 avail_write = hv_get_bytes_to_write(ring_info); - return avail_write * 100 / ring_info->ring_datasize; + return reciprocal_divide(avail_write * 100, netvsc_ring_reciprocal); } static inline void netvsc_free_send_slot(struct netvsc_device *net_device, @@ -698,26 +701,26 @@ static u32 netvsc_get_next_send_section(struct netvsc_device *net_device) return NETVSC_INVALID_INDEX; } -static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, - unsigned int section_index, - u32 pend_size, - struct hv_netvsc_packet *packet, - struct rndis_message *rndis_msg, - struct hv_page_buffer *pb, - struct sk_buff *skb) +static void netvsc_copy_to_send_buf(struct netvsc_device *net_device, + unsigned int section_index, + u32 pend_size, + struct hv_netvsc_packet *packet, + struct rndis_message *rndis_msg, + struct hv_page_buffer *pb, + bool xmit_more) { char *start = net_device->send_buf; char *dest = start + (section_index * net_device->send_section_size) + pend_size; int i; - u32 msg_size = 0; u32 padding = 0; - u32 remain = packet->total_data_buflen % net_device->pkt_align; u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt : packet->page_buf_cnt; + u32 remain; /* Add padding */ - if (skb->xmit_more && remain && !packet->cp_partial) { + remain = packet->total_data_buflen & (net_device->pkt_align - 1); + if (xmit_more && remain) { padding = net_device->pkt_align - remain; rndis_msg->msg_len += padding; packet->total_data_buflen += padding; @@ -729,16 +732,11 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, u32 len = pb[i].len; memcpy(dest, (src + offset), len); - msg_size += len; dest += len; } - if (padding) { + if (padding) memset(dest, 0, padding); - msg_size += padding; - } - - return msg_size; } static inline int netvsc_send_pkt( @@ -831,12 +829,13 @@ static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send, } /* RCU already held by caller */ -int netvsc_send(struct net_device_context *ndev_ctx, +int netvsc_send(struct net_device *ndev, struct hv_netvsc_packet *packet, struct rndis_message *rndis_msg, struct hv_page_buffer *pb, struct sk_buff *skb) { + struct net_device_context *ndev_ctx = netdev_priv(ndev); struct netvsc_device *net_device = rcu_dereference_bh(ndev_ctx->nvdev); struct hv_device *device = ndev_ctx->device_ctx; @@ -847,8 +846,7 @@ int netvsc_send(struct net_device_context *ndev_ctx, struct multi_send_data *msdp; struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL; struct sk_buff *msd_skb = NULL; - bool try_batch; - bool xmit_more = (skb != NULL) ? skb->xmit_more : false; + bool try_batch, xmit_more; /* If device is rescinded, return error and packet will get dropped. */ if (unlikely(!net_device || net_device->destroy)) @@ -899,10 +897,17 @@ int netvsc_send(struct net_device_context *ndev_ctx, } } + /* Keep aggregating only if stack says more data is coming + * and not doing mixed modes send and not flow blocked + */ + xmit_more = skb->xmit_more && + !packet->cp_partial && + !netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx)); + if (section_index != NETVSC_INVALID_INDEX) { netvsc_copy_to_send_buf(net_device, section_index, msd_len, - packet, rndis_msg, pb, skb); + packet, rndis_msg, pb, xmit_more); packet->send_buf_index = section_index; @@ -922,7 +927,7 @@ int netvsc_send(struct net_device_context *ndev_ctx, if (msdp->skb) dev_consume_skb_any(msdp->skb); - if (xmit_more && !packet->cp_partial) { + if (xmit_more) { msdp->skb = skb; msdp->pkt = packet; msdp->count++; @@ -1085,7 +1090,7 @@ static int netvsc_receive(struct net_device *ndev, u32 buflen = vmxferpage_packet->ranges[i].byte_count; /* Pass it to the upper layer */ - status = rndis_filter_receive(ndev, net_device, device, + status = rndis_filter_receive(ndev, net_device, channel, data, buflen); } @@ -1249,7 +1254,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, const struct netvsc_device_info *device_info) { int i, ret = 0; - int ring_size = device_info->ring_size; struct netvsc_device *net_device; struct net_device *ndev = hv_get_drvdata(device); struct net_device_context *net_device_ctx = netdev_priv(ndev); @@ -1261,8 +1265,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) net_device_ctx->tx_table[i] = 0; - net_device->ring_size = ring_size; - /* Because the device uses NAPI, all the interrupt batching and * control is done via Net softirq, not the channel handling */ @@ -1289,10 +1291,9 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, netvsc_poll, NAPI_POLL_WEIGHT); /* Open the channel */ - ret = vmbus_open(device->channel, ring_size * PAGE_SIZE, - ring_size * PAGE_SIZE, NULL, 0, - netvsc_channel_cb, - net_device->chan_table); + ret = vmbus_open(device->channel, netvsc_ring_bytes, + netvsc_ring_bytes, NULL, 0, + netvsc_channel_cb, net_device->chan_table); if (ret != 0) { netif_napi_del(&net_device->chan_table[0].napi); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 5129647d420c..c5584c2d440e 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -35,6 +35,7 @@ #include <linux/slab.h> #include <linux/rtnetlink.h> #include <linux/netpoll.h> +#include <linux/reciprocal_div.h> #include <net/arp.h> #include <net/route.h> @@ -46,17 +47,15 @@ #include "hyperv_net.h" #define RING_SIZE_MIN 64 -#define NETVSC_MIN_TX_SECTIONS 10 -#define NETVSC_DEFAULT_TX 192 /* ~1M */ -#define NETVSC_MIN_RX_SECTIONS 10 /* ~64K */ -#define NETVSC_DEFAULT_RX 10485 /* Max ~16M */ #define LINKCHANGE_INT (2 * HZ) #define VF_TAKEOVER_INT (HZ / 10) -static int ring_size = 128; -module_param(ring_size, int, S_IRUGO); +static unsigned int ring_size __ro_after_init = 128; +module_param(ring_size, uint, S_IRUGO); MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); +unsigned int netvsc_ring_bytes __ro_after_init; +struct reciprocal_value netvsc_ring_reciprocal __ro_after_init; static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFUP | @@ -174,17 +173,15 @@ out: return ret; } -static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size, - int pkt_type) +static inline void *init_ppi_data(struct rndis_message *msg, + u32 ppi_size, u32 pkt_type) { - struct rndis_packet *rndis_pkt; + struct rndis_packet *rndis_pkt = &msg->msg.pkt; struct rndis_per_packet_info *ppi; - rndis_pkt = &msg->msg.pkt; rndis_pkt->data_offset += ppi_size; - - ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt + - rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len); + ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset + + rndis_pkt->per_pkt_info_len; ppi->size = ppi_size; ppi->type = pkt_type; @@ -192,7 +189,7 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size, rndis_pkt->per_pkt_info_len += ppi_size; - return ppi; + return ppi + 1; } /* Azure hosts don't support non-TCP port numbers in hashing for fragmented @@ -469,10 +466,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) int ret; unsigned int num_data_pgs; struct rndis_message *rndis_msg; - struct rndis_packet *rndis_pkt; struct net_device *vf_netdev; u32 rndis_msg_size; - struct rndis_per_packet_info *ppi; u32 hash; struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT]; @@ -527,34 +522,36 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) rndis_msg = (struct rndis_message *)skb->head; - memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE); - /* Add the rndis header */ rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET; rndis_msg->msg_len = packet->total_data_buflen; - rndis_pkt = &rndis_msg->msg.pkt; - rndis_pkt->data_offset = sizeof(struct rndis_packet); - rndis_pkt->data_len = packet->total_data_buflen; - rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet); + + rndis_msg->msg.pkt = (struct rndis_packet) { + .data_offset = sizeof(struct rndis_packet), + .data_len = packet->total_data_buflen, + .per_pkt_info_offset = sizeof(struct rndis_packet), + }; rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet); hash = skb_get_hash_raw(skb); if (hash != 0 && net->real_num_tx_queues > 1) { + u32 *hash_info; + rndis_msg_size += NDIS_HASH_PPI_SIZE; - ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE, - NBL_HASH_VALUE); - *(u32 *)((void *)ppi + ppi->ppi_offset) = hash; + hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE, + NBL_HASH_VALUE); + *hash_info = hash; } if (skb_vlan_tag_present(skb)) { struct ndis_pkt_8021q_info *vlan; rndis_msg_size += NDIS_VLAN_PPI_SIZE; - ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE, - IEEE_8021Q_INFO); + vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE, + IEEE_8021Q_INFO); - vlan = (void *)ppi + ppi->ppi_offset; + vlan->value = 0; vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK; vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; @@ -564,11 +561,10 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) struct ndis_tcp_lso_info *lso_info; rndis_msg_size += NDIS_LSO_PPI_SIZE; - ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE, - TCP_LARGESEND_PKTINFO); - - lso_info = (void *)ppi + ppi->ppi_offset; + lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE, + TCP_LARGESEND_PKTINFO); + lso_info->value = 0; lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE; if (skb->protocol == htons(ETH_P_IP)) { lso_info->lso_v2_transmit.ip_version = @@ -593,12 +589,10 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) struct ndis_tcp_ip_checksum_info *csum_info; rndis_msg_size += NDIS_CSUM_PPI_SIZE; - ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE, - TCPIP_CHKSUM_PKTINFO); - - csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi + - ppi->ppi_offset); + csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE, + TCPIP_CHKSUM_PKTINFO); + csum_info->value = 0; csum_info->transmit.tcp_header_offset = skb_transport_offset(skb); if (skb->protocol == htons(ETH_P_IP)) { @@ -632,7 +626,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) /* timestamp packet in software */ skb_tx_timestamp(skb); - ret = netvsc_send(net_device_ctx, packet, rndis_msg, pb, skb); + ret = netvsc_send(net, packet, rndis_msg, pb, skb); if (likely(ret == 0)) return NETDEV_TX_OK; @@ -658,22 +652,14 @@ no_memory: /* * netvsc_linkstatus_callback - Link up/down notification */ -void netvsc_linkstatus_callback(struct hv_device *device_obj, +void netvsc_linkstatus_callback(struct net_device *net, struct rndis_message *resp) { struct rndis_indicate_status *indicate = &resp->msg.indicate_status; - struct net_device *net; - struct net_device_context *ndev_ctx; + struct net_device_context *ndev_ctx = netdev_priv(net); struct netvsc_reconfig *event; unsigned long flags; - net = hv_get_drvdata(device_obj); - - if (!net) - return; - - ndev_ctx = netdev_priv(net); - /* Update the physical link speed when changing to another vSwitch */ if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) { u32 speed; @@ -753,34 +739,26 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, * "wire" on the specified device. */ int netvsc_recv_callback(struct net_device *net, + struct netvsc_device *net_device, struct vmbus_channel *channel, void *data, u32 len, const struct ndis_tcp_ip_checksum_info *csum_info, const struct ndis_pkt_8021q_info *vlan) { struct net_device_context *net_device_ctx = netdev_priv(net); - struct netvsc_device *net_device; u16 q_idx = channel->offermsg.offer.sub_channel_index; - struct netvsc_channel *nvchan; + struct netvsc_channel *nvchan = &net_device->chan_table[q_idx]; struct sk_buff *skb; struct netvsc_stats *rx_stats; if (net->reg_state != NETREG_REGISTERED) return NVSP_STAT_FAIL; - rcu_read_lock(); - net_device = rcu_dereference(net_device_ctx->nvdev); - if (unlikely(!net_device)) - goto drop; - - nvchan = &net_device->chan_table[q_idx]; - /* Allocate a skb - TODO direct I/O to pages? */ skb = netvsc_alloc_recv_skb(net, &nvchan->napi, csum_info, vlan, data, len); if (unlikely(!skb)) { -drop: - ++net->stats.rx_dropped; + ++net_device_ctx->eth_stats.rx_no_memory; rcu_read_unlock(); return NVSP_STAT_FAIL; } @@ -804,8 +782,6 @@ drop: u64_stats_update_end(&rx_stats->syncp); napi_gro_receive(&nvchan->napi, skb); - rcu_read_unlock(); - return 0; } @@ -860,7 +836,6 @@ static int netvsc_set_channels(struct net_device *net, memset(&device_info, 0, sizeof(device_info)); device_info.num_chn = count; - device_info.ring_size = ring_size; device_info.send_sections = nvdev->send_section_cnt; device_info.send_section_size = nvdev->send_section_size; device_info.recv_sections = nvdev->recv_section_cnt; @@ -975,7 +950,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) rndis_filter_close(nvdev); memset(&device_info, 0, sizeof(device_info)); - device_info.ring_size = ring_size; device_info.num_chn = nvdev->num_chn; device_info.send_sections = nvdev->send_section_cnt; device_info.send_section_size = nvdev->send_section_size; @@ -1133,12 +1107,13 @@ static const struct { u16 offset; } netvsc_stats[] = { { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) }, - { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) }, + { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) }, { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) }, { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) }, { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) }, { "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) }, { "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) }, + { "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) }, { "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) }, { "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) }, }, vf_stats[] = { @@ -1539,7 +1514,6 @@ static int netvsc_set_ringparam(struct net_device *ndev, memset(&device_info, 0, sizeof(device_info)); device_info.num_chn = nvdev->num_chn; - device_info.ring_size = ring_size; device_info.send_sections = new_tx; device_info.send_section_size = nvdev->send_section_size; device_info.recv_sections = new_rx; @@ -1995,7 +1969,6 @@ static int netvsc_probe(struct hv_device *dev, /* Notify the netvsc driver of the new device */ memset(&device_info, 0, sizeof(device_info)); - device_info.ring_size = ring_size; device_info.num_chn = VRSS_CHANNEL_DEFAULT; device_info.send_sections = NETVSC_DEFAULT_TX; device_info.send_section_size = NETVSC_SEND_SECTION_SIZE; @@ -2158,11 +2131,13 @@ static int __init netvsc_drv_init(void) if (ring_size < RING_SIZE_MIN) { ring_size = RING_SIZE_MIN; - pr_info("Increased ring_size to %d (min allowed)\n", + pr_info("Increased ring_size to %u (min allowed)\n", ring_size); } - ret = vmbus_driver_register(&netvsc_drv); + netvsc_ring_bytes = ring_size * PAGE_SIZE; + netvsc_ring_reciprocal = reciprocal_value(netvsc_ring_bytes); + ret = vmbus_driver_register(&netvsc_drv); if (ret) return ret; diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 7b637c7dd1e5..c3ca191fea7f 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -134,11 +134,9 @@ static void put_rndis_request(struct rndis_device *dev, kfree(req); } -static void dump_rndis_message(struct hv_device *hv_dev, +static void dump_rndis_message(struct net_device *netdev, const struct rndis_message *rndis_msg) { - struct net_device *netdev = hv_get_drvdata(hv_dev); - switch (rndis_msg->ndis_msg_type) { case RNDIS_MSG_PACKET: netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, " @@ -217,7 +215,6 @@ static int rndis_filter_send_request(struct rndis_device *dev, struct hv_netvsc_packet *packet; struct hv_page_buffer page_buf[2]; struct hv_page_buffer *pb = page_buf; - struct net_device_context *net_device_ctx = netdev_priv(dev->ndev); int ret; /* Setup the packet to send it */ @@ -245,7 +242,7 @@ static int rndis_filter_send_request(struct rndis_device *dev, } rcu_read_lock_bh(); - ret = netvsc_send(net_device_ctx, packet, NULL, pb, NULL); + ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL); rcu_read_unlock_bh(); return ret; @@ -354,6 +351,7 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type) } static int rndis_filter_receive_data(struct net_device *ndev, + struct netvsc_device *nvdev, struct rndis_device *dev, struct rndis_message *msg, struct vmbus_channel *channel, @@ -390,14 +388,14 @@ static int rndis_filter_receive_data(struct net_device *ndev, */ data = (void *)((unsigned long)data + data_offset); csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO); - return netvsc_recv_callback(ndev, channel, + + return netvsc_recv_callback(ndev, nvdev, channel, data, rndis_pkt->data_len, csum_info, vlan); } int rndis_filter_receive(struct net_device *ndev, struct netvsc_device *net_dev, - struct hv_device *dev, struct vmbus_channel *channel, void *data, u32 buflen) { @@ -419,11 +417,12 @@ int rndis_filter_receive(struct net_device *ndev, } if (netif_msg_rx_status(net_device_ctx)) - dump_rndis_message(dev, rndis_msg); + dump_rndis_message(ndev, rndis_msg); switch (rndis_msg->ndis_msg_type) { case RNDIS_MSG_PACKET: - return rndis_filter_receive_data(ndev, rndis_dev, rndis_msg, + return rndis_filter_receive_data(ndev, net_dev, + rndis_dev, rndis_msg, channel, data, buflen); case RNDIS_MSG_INIT_C: case RNDIS_MSG_QUERY_C: @@ -434,7 +433,7 @@ int rndis_filter_receive(struct net_device *ndev, case RNDIS_MSG_INDICATE: /* notification msgs */ - netvsc_linkstatus_callback(dev, rndis_msg); + netvsc_linkstatus_callback(ndev, rndis_msg); break; default: netdev_err(ndev, @@ -1040,8 +1039,8 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) /* Set the channel before opening.*/ nvchan->channel = new_sc; - ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE, - nvscdev->ring_size * PAGE_SIZE, NULL, 0, + ret = vmbus_open(new_sc, netvsc_ring_bytes, + netvsc_ring_bytes, NULL, 0, netvsc_channel_cb, nvchan); if (ret == 0) napi_enable(&nvchan->napi); @@ -1222,7 +1221,6 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, struct ndis_recv_scale_cap rsscap; u32 rsscap_size = sizeof(struct ndis_recv_scale_cap); u32 mtu, size; - const struct cpumask *node_cpu_mask; u32 num_possible_rss_qs; int i, ret; @@ -1291,14 +1289,8 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, if (ret || rsscap.num_recv_que < 2) goto out; - /* - * We will limit the VRSS channels to the number CPUs in the NUMA node - * the primary channel is currently bound to. - * - * This also guarantees that num_possible_rss_qs <= num_online_cpus - */ - node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu)); - num_possible_rss_qs = min_t(u32, cpumask_weight(node_cpu_mask), + /* This guarantees that num_possible_rss_qs <= num_online_cpus */ + num_possible_rss_qs = min_t(u32, num_online_cpus(), rsscap.num_recv_que); net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, num_possible_rss_qs); @@ -1362,9 +1354,6 @@ int rndis_filter_open(struct netvsc_device *nvdev) if (!nvdev) return -EINVAL; - if (atomic_inc_return(&nvdev->open_cnt) != 1) - return 0; - return rndis_filter_open_device(nvdev->extension); } @@ -1373,13 +1362,12 @@ int rndis_filter_close(struct netvsc_device *nvdev) if (!nvdev) return -EINVAL; - if (atomic_dec_return(&nvdev->open_cnt) != 0) - return 0; - return rndis_filter_close_device(nvdev->extension); } bool rndis_filter_opened(const struct netvsc_device *nvdev) { - return atomic_read(&nvdev->open_cnt) > 0; + const struct rndis_device *dev = nvdev->extension; + + return dev->state == RNDIS_DEV_DATAINITIALIZED; } diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c index 400fdbd3a120..64f1b1e77bc0 100644 --- a/drivers/net/ieee802154/adf7242.c +++ b/drivers/net/ieee802154/adf7242.c @@ -1,7 +1,7 @@ /* * Analog Devices ADF7242 Low-Power IEEE 802.15.4 Transceiver * - * Copyright 2009-2015 Analog Devices Inc. + * Copyright 2009-2017 Analog Devices Inc. * * Licensed under the GPL-2 or later. * @@ -344,12 +344,18 @@ static int adf7242_wait_status(struct adf7242_local *lp, unsigned int status, return ret; } -static int adf7242_wait_ready(struct adf7242_local *lp, int line) +static int adf7242_wait_rc_ready(struct adf7242_local *lp, int line) { return adf7242_wait_status(lp, STAT_RC_READY | STAT_SPI_READY, STAT_RC_READY | STAT_SPI_READY, line); } +static int adf7242_wait_spi_ready(struct adf7242_local *lp, int line) +{ + return adf7242_wait_status(lp, STAT_SPI_READY, + STAT_SPI_READY, line); +} + static int adf7242_write_fbuf(struct adf7242_local *lp, u8 *data, u8 len) { u8 *buf = lp->buf; @@ -369,7 +375,7 @@ static int adf7242_write_fbuf(struct adf7242_local *lp, u8 *data, u8 len) spi_message_add_tail(&xfer_head, &msg); spi_message_add_tail(&xfer_buf, &msg); - adf7242_wait_ready(lp, __LINE__); + adf7242_wait_spi_ready(lp, __LINE__); mutex_lock(&lp->bmux); buf[0] = CMD_SPI_PKT_WR; @@ -401,7 +407,7 @@ static int adf7242_read_fbuf(struct adf7242_local *lp, spi_message_add_tail(&xfer_head, &msg); spi_message_add_tail(&xfer_buf, &msg); - adf7242_wait_ready(lp, __LINE__); + adf7242_wait_spi_ready(lp, __LINE__); mutex_lock(&lp->bmux); if (packet_read) { @@ -432,7 +438,7 @@ static int adf7242_read_reg(struct adf7242_local *lp, u16 addr, u8 *data) .rx_buf = lp->buf_read_rx, }; - adf7242_wait_ready(lp, __LINE__); + adf7242_wait_spi_ready(lp, __LINE__); mutex_lock(&lp->bmux); lp->buf_read_tx[0] = CMD_SPI_MEM_RD(addr); @@ -462,7 +468,7 @@ static int adf7242_write_reg(struct adf7242_local *lp, u16 addr, u8 data) { int status; - adf7242_wait_ready(lp, __LINE__); + adf7242_wait_spi_ready(lp, __LINE__); mutex_lock(&lp->bmux); lp->buf_reg_tx[0] = CMD_SPI_MEM_WR(addr); @@ -484,7 +490,7 @@ static int adf7242_cmd(struct adf7242_local *lp, unsigned int cmd) dev_vdbg(&lp->spi->dev, "%s : CMD=0x%X\n", __func__, cmd); if (cmd != CMD_RC_PC_RESET_NO_WAIT) - adf7242_wait_ready(lp, __LINE__); + adf7242_wait_rc_ready(lp, __LINE__); mutex_lock(&lp->bmux); lp->buf_cmd = cmd; @@ -557,6 +563,22 @@ static int adf7242_verify_firmware(struct adf7242_local *lp, return 0; } +static void adf7242_clear_irqstat(struct adf7242_local *lp) +{ + adf7242_write_reg(lp, REG_IRQ1_SRC1, IRQ_CCA_COMPLETE | IRQ_SFD_RX | + IRQ_SFD_TX | IRQ_RX_PKT_RCVD | IRQ_TX_PKT_SENT | + IRQ_FRAME_VALID | IRQ_ADDRESS_VALID | IRQ_CSMA_CA); +} + +static int adf7242_cmd_rx(struct adf7242_local *lp) +{ + /* Wait until the ACK is sent */ + adf7242_wait_status(lp, RC_STATUS_PHY_RDY, RC_STATUS_MASK, __LINE__); + adf7242_clear_irqstat(lp); + + return adf7242_cmd(lp, CMD_RC_RX); +} + static int adf7242_set_txpower(struct ieee802154_hw *hw, int mbm) { struct adf7242_local *lp = hw->priv; @@ -660,7 +682,7 @@ static int adf7242_start(struct ieee802154_hw *hw) struct adf7242_local *lp = hw->priv; adf7242_cmd(lp, CMD_RC_PHY_RDY); - adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF); + adf7242_clear_irqstat(lp); enable_irq(lp->spi->irq); set_bit(FLAG_START, &lp->flags); @@ -671,10 +693,10 @@ static void adf7242_stop(struct ieee802154_hw *hw) { struct adf7242_local *lp = hw->priv; + disable_irq(lp->spi->irq); adf7242_cmd(lp, CMD_RC_IDLE); clear_bit(FLAG_START, &lp->flags); - disable_irq(lp->spi->irq); - adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF); + adf7242_clear_irqstat(lp); } static int adf7242_channel(struct ieee802154_hw *hw, u8 page, u8 channel) @@ -789,9 +811,12 @@ static int adf7242_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) struct adf7242_local *lp = hw->priv; int ret; + /* ensure existing instances of the IRQ handler have completed */ + disable_irq(lp->spi->irq); set_bit(FLAG_XMIT, &lp->flags); reinit_completion(&lp->tx_complete); adf7242_cmd(lp, CMD_RC_PHY_RDY); + adf7242_clear_irqstat(lp); ret = adf7242_write_fbuf(lp, skb->data, skb->len); if (ret) @@ -800,6 +825,7 @@ static int adf7242_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) ret = adf7242_cmd(lp, CMD_RC_CSMACA); if (ret) goto err; + enable_irq(lp->spi->irq); ret = wait_for_completion_interruptible_timeout(&lp->tx_complete, HZ / 10); @@ -822,7 +848,7 @@ static int adf7242_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) err: clear_bit(FLAG_XMIT, &lp->flags); - adf7242_cmd(lp, CMD_RC_RX); + adf7242_cmd_rx(lp); return ret; } @@ -846,7 +872,7 @@ static int adf7242_rx(struct adf7242_local *lp) skb = dev_alloc_skb(len); if (!skb) { - adf7242_cmd(lp, CMD_RC_RX); + adf7242_cmd_rx(lp); return -ENOMEM; } @@ -854,14 +880,14 @@ static int adf7242_rx(struct adf7242_local *lp) ret = adf7242_read_fbuf(lp, data, len, true); if (ret < 0) { kfree_skb(skb); - adf7242_cmd(lp, CMD_RC_RX); + adf7242_cmd_rx(lp); return ret; } lqi = data[len - 2]; lp->rssi = data[len - 1]; - adf7242_cmd(lp, CMD_RC_RX); + ret = adf7242_cmd_rx(lp); skb_trim(skb, len - 2); /* Don't put RSSI/LQI or CRC into the frame */ @@ -870,7 +896,7 @@ static int adf7242_rx(struct adf7242_local *lp) dev_dbg(&lp->spi->dev, "%s: ret=%d len=%d lqi=%d rssi=%d\n", __func__, ret, (int)len, (int)lqi, lp->rssi); - return 0; + return ret; } static const struct ieee802154_ops adf7242_ops = { @@ -888,7 +914,7 @@ static const struct ieee802154_ops adf7242_ops = { .set_cca_ed_level = adf7242_set_cca_ed_level, }; -static void adf7242_debug(u8 irq1) +static void adf7242_debug(struct adf7242_local *lp, u8 irq1) { #ifdef DEBUG u8 stat; @@ -906,9 +932,12 @@ static void adf7242_debug(u8 irq1) irq1 & IRQ_FRAME_VALID ? "IRQ_FRAME_VALID\n" : "", irq1 & IRQ_ADDRESS_VALID ? "IRQ_ADDRESS_VALID\n" : ""); - dev_dbg(&lp->spi->dev, "%s STATUS = %X:\n%s\n%s%s%s%s%s\n", + dev_dbg(&lp->spi->dev, "%s STATUS = %X:\n%s\n%s\n%s\n%s\n%s%s%s%s%s\n", __func__, stat, + stat & STAT_SPI_READY ? "SPI_READY" : "SPI_BUSY", + stat & STAT_IRQ_STATUS ? "IRQ_PENDING" : "IRQ_CLEAR", stat & STAT_RC_READY ? "RC_READY" : "RC_BUSY", + stat & STAT_CCA_RESULT ? "CHAN_IDLE" : "CHAN_BUSY", (stat & 0xf) == RC_STATUS_IDLE ? "RC_STATUS_IDLE" : "", (stat & 0xf) == RC_STATUS_MEAS ? "RC_STATUS_MEAS" : "", (stat & 0xf) == RC_STATUS_PHY_RDY ? "RC_STATUS_PHY_RDY" : "", @@ -923,20 +952,20 @@ static irqreturn_t adf7242_isr(int irq, void *data) unsigned int xmit; u8 irq1; - adf7242_wait_status(lp, RC_STATUS_PHY_RDY, RC_STATUS_MASK, __LINE__); - adf7242_read_reg(lp, REG_IRQ1_SRC1, &irq1); - adf7242_write_reg(lp, REG_IRQ1_SRC1, irq1); if (!(irq1 & (IRQ_RX_PKT_RCVD | IRQ_CSMA_CA))) dev_err(&lp->spi->dev, "%s :ERROR IRQ1 = 0x%X\n", __func__, irq1); - adf7242_debug(irq1); + adf7242_debug(lp, irq1); xmit = test_bit(FLAG_XMIT, &lp->flags); if (xmit && (irq1 & IRQ_CSMA_CA)) { + adf7242_wait_status(lp, RC_STATUS_PHY_RDY, + RC_STATUS_MASK, __LINE__); + if (ADF7242_REPORT_CSMA_CA_STAT) { u8 astat; @@ -957,6 +986,7 @@ static irqreturn_t adf7242_isr(int irq, void *data) lp->tx_stat = SUCCESS; } complete(&lp->tx_complete); + adf7242_clear_irqstat(lp); } else if (!xmit && (irq1 & IRQ_RX_PKT_RCVD) && (irq1 & IRQ_FRAME_VALID)) { adf7242_rx(lp); @@ -965,16 +995,19 @@ static irqreturn_t adf7242_isr(int irq, void *data) dev_dbg(&lp->spi->dev, "%s:%d : ERROR IRQ1 = 0x%X\n", __func__, __LINE__, irq1); adf7242_cmd(lp, CMD_RC_PHY_RDY); - adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF); - adf7242_cmd(lp, CMD_RC_RX); + adf7242_cmd_rx(lp); } else { /* This can only be xmit without IRQ, likely a RX packet. * we get an TX IRQ shortly - do nothing or let the xmit * timeout handle this */ + dev_dbg(&lp->spi->dev, "%s:%d : ERROR IRQ1 = 0x%X, xmit %d\n", __func__, __LINE__, irq1, xmit); + adf7242_wait_status(lp, RC_STATUS_PHY_RDY, + RC_STATUS_MASK, __LINE__); complete(&lp->tx_complete); + adf7242_clear_irqstat(lp); } return IRQ_HANDLED; @@ -994,7 +1027,7 @@ static int adf7242_soft_reset(struct adf7242_local *lp, int line) adf7242_set_promiscuous_mode(lp->hw, lp->promiscuous); adf7242_set_csma_params(lp->hw, lp->min_be, lp->max_be, lp->max_cca_retries); - adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF); + adf7242_clear_irqstat(lp); if (test_bit(FLAG_START, &lp->flags)) { enable_irq(lp->spi->irq); @@ -1060,7 +1093,7 @@ static int adf7242_hw_init(struct adf7242_local *lp) adf7242_write_reg(lp, REG_IRQ1_EN0, 0); adf7242_write_reg(lp, REG_IRQ1_EN1, IRQ_RX_PKT_RCVD | IRQ_CSMA_CA); - adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF); + adf7242_clear_irqstat(lp); adf7242_write_reg(lp, REG_IRQ1_SRC0, 0xFF); adf7242_cmd(lp, CMD_RC_IDLE); @@ -1086,8 +1119,11 @@ static int adf7242_stats_show(struct seq_file *file, void *offset) irq1 & IRQ_FRAME_VALID ? "IRQ_FRAME_VALID\n" : "", irq1 & IRQ_ADDRESS_VALID ? "IRQ_ADDRESS_VALID\n" : ""); - seq_printf(file, "STATUS = %X:\n%s\n%s%s%s%s%s\n", stat, + seq_printf(file, "STATUS = %X:\n%s\n%s\n%s\n%s\n%s%s%s%s%s\n", stat, + stat & STAT_SPI_READY ? "SPI_READY" : "SPI_BUSY", + stat & STAT_IRQ_STATUS ? "IRQ_PENDING" : "IRQ_CLEAR", stat & STAT_RC_READY ? "RC_READY" : "RC_BUSY", + stat & STAT_CCA_RESULT ? "CHAN_IDLE" : "CHAN_BUSY", (stat & 0xf) == RC_STATUS_IDLE ? "RC_STATUS_IDLE" : "", (stat & 0xf) == RC_STATUS_MEAS ? "RC_STATUS_MEAS" : "", (stat & 0xf) == RC_STATUS_PHY_RDY ? "RC_STATUS_PHY_RDY" : "", @@ -1257,12 +1293,14 @@ static int adf7242_remove(struct spi_device *spi) static const struct of_device_id adf7242_of_match[] = { { .compatible = "adi,adf7242", }, + { .compatible = "adi,adf7241", }, { }, }; MODULE_DEVICE_TABLE(of, adf7242_of_match); static const struct spi_device_id adf7242_device_id[] = { { .name = "adf7242", }, + { .name = "adf7241", }, { }, }; MODULE_DEVICE_TABLE(spi, adf7242_device_id); diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 77cc4fbaeace..c1f008fe4e1d 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -315,13 +315,13 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb, *pskb = skb; } - ipvlan_skb_crossing_ns(skb, dev); if (local) { skb->pkt_type = PACKET_HOST; if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS) success = true; } else { + skb->dev = dev; ret = RX_HANDLER_ANOTHER; success = true; } @@ -586,7 +586,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) return NET_XMIT_SUCCESS; } - ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev); + skb->dev = ipvlan->phy_dev; return dev_queue_xmit(skb); } @@ -664,8 +664,6 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb, struct sk_buff *skb = *pskb; struct ethhdr *eth = eth_hdr(skb); rx_handler_result_t ret = RX_HANDLER_PASS; - void *lyr3h; - int addr_type; if (is_multicast_ether_addr(eth->h_dest)) { if (ipvlan_external_frame(skb, port)) { @@ -683,15 +681,8 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb, } } } else { - struct ipvl_addr *addr; - - lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type); - if (!lyr3h) - return ret; - - addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true); - if (addr) - ret = ipvlan_rcv_frame(addr, pskb, false); + /* Perform like l3 mode for non-multicast packet */ + ret = ipvlan_handle_mode_l3(pskb, port); } return ret; diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 30cb803e2fe5..2469df118fbf 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -850,6 +850,19 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) return ipvlan_del_addr(ipvlan, ip6_addr, true); } +static bool ipvlan_is_valid_dev(const struct net_device *dev) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + + if (!netif_is_ipvlan(dev)) + return false; + + if (!ipvlan || !ipvlan->port) + return false; + + return true; +} + static int ipvlan_addr6_event(struct notifier_block *unused, unsigned long event, void *ptr) { @@ -857,10 +870,7 @@ static int ipvlan_addr6_event(struct notifier_block *unused, struct net_device *dev = (struct net_device *)if6->idev->dev; struct ipvl_dev *ipvlan = netdev_priv(dev); - if (!netif_is_ipvlan(dev)) - return NOTIFY_DONE; - - if (!ipvlan || !ipvlan->port) + if (!ipvlan_is_valid_dev(dev)) return NOTIFY_DONE; switch (event) { @@ -888,10 +898,7 @@ static int ipvlan_addr6_validator_event(struct notifier_block *unused, if (in_softirq()) return NOTIFY_DONE; - if (!netif_is_ipvlan(dev)) - return NOTIFY_DONE; - - if (!ipvlan || !ipvlan->port) + if (!ipvlan_is_valid_dev(dev)) return NOTIFY_DONE; switch (event) { @@ -932,10 +939,7 @@ static int ipvlan_addr4_event(struct notifier_block *unused, struct ipvl_dev *ipvlan = netdev_priv(dev); struct in_addr ip4_addr; - if (!netif_is_ipvlan(dev)) - return NOTIFY_DONE; - - if (!ipvlan || !ipvlan->port) + if (!ipvlan_is_valid_dev(dev)) return NOTIFY_DONE; switch (event) { @@ -961,10 +965,7 @@ static int ipvlan_addr4_validator_event(struct notifier_block *unused, struct net_device *dev = (struct net_device *)ivi->ivi_dev->dev; struct ipvl_dev *ipvlan = netdev_priv(dev); - if (!netif_is_ipvlan(dev)) - return NOTIFY_DONE; - - if (!ipvlan || !ipvlan->port) + if (!ipvlan_is_valid_dev(dev)) return NOTIFY_DONE; switch (event) { diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 1d025ab9568f..7de88b33d5b9 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -393,7 +393,10 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) #define MACSEC_PORT_SCB (0x0000) #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL) -#define DEFAULT_SAK_LEN 16 +#define MACSEC_GCM_AES_128_SAK_LEN 16 +#define MACSEC_GCM_AES_256_SAK_LEN 32 + +#define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN #define DEFAULT_SEND_SCI true #define DEFAULT_ENCRYPT false #define DEFAULT_ENCODING_SA 0 @@ -2362,15 +2365,26 @@ static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb) { struct macsec_tx_sc *tx_sc = &secy->tx_sc; struct nlattr *secy_nest = nla_nest_start(skb, MACSEC_ATTR_SECY); + u64 csid; if (!secy_nest) return 1; + switch (secy->key_len) { + case MACSEC_GCM_AES_128_SAK_LEN: + csid = MACSEC_DEFAULT_CIPHER_ID; + break; + case MACSEC_GCM_AES_256_SAK_LEN: + csid = MACSEC_CIPHER_ID_GCM_AES_256; + break; + default: + goto cancel; + } + if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci, MACSEC_SECY_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, - MACSEC_DEFAULT_CIPHER_ID, - MACSEC_SECY_ATTR_PAD) || + csid, MACSEC_SECY_ATTR_PAD) || nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || @@ -3015,8 +3029,8 @@ static void macsec_setup(struct net_device *dev) eth_zero_addr(dev->broadcast); } -static void macsec_changelink_common(struct net_device *dev, - struct nlattr *data[]) +static int macsec_changelink_common(struct net_device *dev, + struct nlattr *data[]) { struct macsec_secy *secy; struct macsec_tx_sc *tx_sc; @@ -3056,6 +3070,22 @@ static void macsec_changelink_common(struct net_device *dev, if (data[IFLA_MACSEC_VALIDATION]) secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]); + + if (data[IFLA_MACSEC_CIPHER_SUITE]) { + switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) { + case MACSEC_CIPHER_ID_GCM_AES_128: + case MACSEC_DEFAULT_CIPHER_ID: + secy->key_len = MACSEC_GCM_AES_128_SAK_LEN; + break; + case MACSEC_CIPHER_ID_GCM_AES_256: + secy->key_len = MACSEC_GCM_AES_256_SAK_LEN; + break; + default: + return -EINVAL; + } + } + + return 0; } static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], @@ -3071,9 +3101,7 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], data[IFLA_MACSEC_PORT]) return -EINVAL; - macsec_changelink_common(dev, data); - - return 0; + return macsec_changelink_common(dev, data); } static void macsec_del_dev(struct macsec_dev *macsec) @@ -3270,8 +3298,11 @@ static int macsec_newlink(struct net *net, struct net_device *dev, if (err) goto unlink; - if (data) - macsec_changelink_common(dev, data); + if (data) { + err = macsec_changelink_common(dev, data); + if (err) + goto del_dev; + } err = register_macsec_dev(real_dev, dev); if (err < 0) @@ -3320,8 +3351,9 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[], } switch (csid) { + case MACSEC_CIPHER_ID_GCM_AES_128: + case MACSEC_CIPHER_ID_GCM_AES_256: case MACSEC_DEFAULT_CIPHER_ID: - case MACSEC_DEFAULT_CIPHER_ALT: if (icv_len < MACSEC_MIN_ICV_LEN || icv_len > MACSEC_STD_ICV_LEN) return -EINVAL; @@ -3390,12 +3422,24 @@ static int macsec_fill_info(struct sk_buff *skb, { struct macsec_secy *secy = &macsec_priv(dev)->secy; struct macsec_tx_sc *tx_sc = &secy->tx_sc; + u64 csid; + + switch (secy->key_len) { + case MACSEC_GCM_AES_128_SAK_LEN: + csid = MACSEC_DEFAULT_CIPHER_ID; + break; + case MACSEC_GCM_AES_256_SAK_LEN: + csid = MACSEC_CIPHER_ID_GCM_AES_256; + break; + default: + goto nla_put_failure; + } if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci, IFLA_MACSEC_PAD) || nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE, - MACSEC_DEFAULT_CIPHER_ID, IFLA_MACSEC_PAD) || + csid, IFLA_MACSEC_PAD) || nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile new file mode 100644 index 000000000000..09388c06171d --- /dev/null +++ b/drivers/net/netdevsim/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_NETDEVSIM) += netdevsim.o + +netdevsim-objs := \ + netdev.o \ + +ifeq ($(CONFIG_BPF_SYSCALL),y) +netdevsim-objs += \ + bpf.o +endif diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c new file mode 100644 index 000000000000..de73c1ff0939 --- /dev/null +++ b/drivers/net/netdevsim/bpf.c @@ -0,0 +1,643 @@ +/* + * Copyright (C) 2017 Netronome Systems, Inc. + * + * This software is licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree. + * + * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" + * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, + * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE + * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME + * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + */ + +#include <linux/bpf.h> +#include <linux/bpf_verifier.h> +#include <linux/debugfs.h> +#include <linux/kernel.h> +#include <linux/mutex.h> +#include <linux/rtnetlink.h> +#include <net/pkt_cls.h> + +#include "netdevsim.h" + +#define pr_vlog(env, fmt, ...) \ + bpf_verifier_log_write(env, "[netdevsim] " fmt, ##__VA_ARGS__) + +struct nsim_bpf_bound_prog { + struct netdevsim *ns; + struct bpf_prog *prog; + struct dentry *ddir; + const char *state; + bool is_loaded; + struct list_head l; +}; + +#define NSIM_BPF_MAX_KEYS 2 + +struct nsim_bpf_bound_map { + struct netdevsim *ns; + struct bpf_offloaded_map *map; + struct mutex mutex; + struct nsim_map_entry { + void *key; + void *value; + } entry[NSIM_BPF_MAX_KEYS]; + struct list_head l; +}; + +static int nsim_debugfs_bpf_string_read(struct seq_file *file, void *data) +{ + const char **str = file->private; + + if (*str) + seq_printf(file, "%s\n", *str); + + return 0; +} + +static int nsim_debugfs_bpf_string_open(struct inode *inode, struct file *f) +{ + return single_open(f, nsim_debugfs_bpf_string_read, inode->i_private); +} + +static const struct file_operations nsim_bpf_string_fops = { + .owner = THIS_MODULE, + .open = nsim_debugfs_bpf_string_open, + .release = single_release, + .read = seq_read, + .llseek = seq_lseek +}; + +static int +nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn) +{ + struct nsim_bpf_bound_prog *state; + + state = env->prog->aux->offload->dev_priv; + if (state->ns->bpf_bind_verifier_delay && !insn_idx) + msleep(state->ns->bpf_bind_verifier_delay); + + if (insn_idx == env->prog->len - 1) + pr_vlog(env, "Hello from netdevsim!\n"); + + return 0; +} + +static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops = { + .insn_hook = nsim_bpf_verify_insn, +}; + +static bool nsim_xdp_offload_active(struct netdevsim *ns) +{ + return ns->xdp_prog_mode == XDP_ATTACHED_HW; +} + +static void nsim_prog_set_loaded(struct bpf_prog *prog, bool loaded) +{ + struct nsim_bpf_bound_prog *state; + + if (!prog || !prog->aux->offload) + return; + + state = prog->aux->offload->dev_priv; + state->is_loaded = loaded; +} + +static int +nsim_bpf_offload(struct netdevsim *ns, struct bpf_prog *prog, bool oldprog) +{ + nsim_prog_set_loaded(ns->bpf_offloaded, false); + + WARN(!!ns->bpf_offloaded != oldprog, + "bad offload state, expected offload %sto be active", + oldprog ? "" : "not "); + ns->bpf_offloaded = prog; + ns->bpf_offloaded_id = prog ? prog->aux->id : 0; + nsim_prog_set_loaded(prog, true); + + return 0; +} + +int nsim_bpf_setup_tc_block_cb(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + struct tc_cls_bpf_offload *cls_bpf = type_data; + struct bpf_prog *prog = cls_bpf->prog; + struct netdevsim *ns = cb_priv; + struct bpf_prog *oldprog; + + if (type != TC_SETUP_CLSBPF) { + NSIM_EA(cls_bpf->common.extack, + "only offload of BPF classifiers supported"); + return -EOPNOTSUPP; + } + + if (!tc_cls_can_offload_and_chain0(ns->netdev, &cls_bpf->common)) + return -EOPNOTSUPP; + + if (cls_bpf->common.protocol != htons(ETH_P_ALL)) { + NSIM_EA(cls_bpf->common.extack, + "only ETH_P_ALL supported as filter protocol"); + return -EOPNOTSUPP; + } + + if (!ns->bpf_tc_accept) { + NSIM_EA(cls_bpf->common.extack, + "netdevsim configured to reject BPF TC offload"); + return -EOPNOTSUPP; + } + /* Note: progs without skip_sw will probably not be dev bound */ + if (prog && !prog->aux->offload && !ns->bpf_tc_non_bound_accept) { + NSIM_EA(cls_bpf->common.extack, + "netdevsim configured to reject unbound programs"); + return -EOPNOTSUPP; + } + + if (cls_bpf->command != TC_CLSBPF_OFFLOAD) + return -EOPNOTSUPP; + + oldprog = cls_bpf->oldprog; + + /* Don't remove if oldprog doesn't match driver's state */ + if (ns->bpf_offloaded != oldprog) { + oldprog = NULL; + if (!cls_bpf->prog) + return 0; + if (ns->bpf_offloaded) { + NSIM_EA(cls_bpf->common.extack, + "driver and netdev offload states mismatch"); + return -EBUSY; + } + } + + return nsim_bpf_offload(ns, cls_bpf->prog, oldprog); +} + +int nsim_bpf_disable_tc(struct netdevsim *ns) +{ + if (ns->bpf_offloaded && !nsim_xdp_offload_active(ns)) + return -EBUSY; + return 0; +} + +static int nsim_xdp_offload_prog(struct netdevsim *ns, struct netdev_bpf *bpf) +{ + if (!nsim_xdp_offload_active(ns) && !bpf->prog) + return 0; + if (!nsim_xdp_offload_active(ns) && bpf->prog && ns->bpf_offloaded) { + NSIM_EA(bpf->extack, "TC program is already loaded"); + return -EBUSY; + } + + return nsim_bpf_offload(ns, bpf->prog, nsim_xdp_offload_active(ns)); +} + +static int nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf) +{ + int err; + + if (ns->xdp_prog && (bpf->flags ^ ns->xdp_flags) & XDP_FLAGS_MODES) { + NSIM_EA(bpf->extack, "program loaded with different flags"); + return -EBUSY; + } + + if (bpf->command == XDP_SETUP_PROG && !ns->bpf_xdpdrv_accept) { + NSIM_EA(bpf->extack, "driver XDP disabled in DebugFS"); + return -EOPNOTSUPP; + } + if (bpf->command == XDP_SETUP_PROG_HW && !ns->bpf_xdpoffload_accept) { + NSIM_EA(bpf->extack, "XDP offload disabled in DebugFS"); + return -EOPNOTSUPP; + } + + if (bpf->command == XDP_SETUP_PROG_HW) { + err = nsim_xdp_offload_prog(ns, bpf); + if (err) + return err; + } + + if (ns->xdp_prog) + bpf_prog_put(ns->xdp_prog); + + ns->xdp_prog = bpf->prog; + ns->xdp_flags = bpf->flags; + + if (!bpf->prog) + ns->xdp_prog_mode = XDP_ATTACHED_NONE; + else if (bpf->command == XDP_SETUP_PROG) + ns->xdp_prog_mode = XDP_ATTACHED_DRV; + else + ns->xdp_prog_mode = XDP_ATTACHED_HW; + + return 0; +} + +static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog) +{ + struct nsim_bpf_bound_prog *state; + char name[16]; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + state->ns = ns; + state->prog = prog; + state->state = "verify"; + + /* Program id is not populated yet when we create the state. */ + sprintf(name, "%u", ns->prog_id_gen++); + state->ddir = debugfs_create_dir(name, ns->ddir_bpf_bound_progs); + if (IS_ERR_OR_NULL(state->ddir)) { + kfree(state); + return -ENOMEM; + } + + debugfs_create_u32("id", 0400, state->ddir, &prog->aux->id); + debugfs_create_file("state", 0400, state->ddir, + &state->state, &nsim_bpf_string_fops); + debugfs_create_bool("loaded", 0400, state->ddir, &state->is_loaded); + + list_add_tail(&state->l, &ns->bpf_bound_progs); + + prog->aux->offload->dev_priv = state; + + return 0; +} + +static void nsim_bpf_destroy_prog(struct bpf_prog *prog) +{ + struct nsim_bpf_bound_prog *state; + + state = prog->aux->offload->dev_priv; + WARN(state->is_loaded, + "offload state destroyed while program still bound"); + debugfs_remove_recursive(state->ddir); + list_del(&state->l); + kfree(state); +} + +static int nsim_setup_prog_checks(struct netdevsim *ns, struct netdev_bpf *bpf) +{ + if (bpf->prog && bpf->prog->aux->offload) { + NSIM_EA(bpf->extack, "attempt to load offloaded prog to drv"); + return -EINVAL; + } + if (ns->netdev->mtu > NSIM_XDP_MAX_MTU) { + NSIM_EA(bpf->extack, "MTU too large w/ XDP enabled"); + return -EINVAL; + } + if (nsim_xdp_offload_active(ns)) { + NSIM_EA(bpf->extack, "xdp offload active, can't load drv prog"); + return -EBUSY; + } + return 0; +} + +static int +nsim_setup_prog_hw_checks(struct netdevsim *ns, struct netdev_bpf *bpf) +{ + struct nsim_bpf_bound_prog *state; + + if (!bpf->prog) + return 0; + + if (!bpf->prog->aux->offload) { + NSIM_EA(bpf->extack, "xdpoffload of non-bound program"); + return -EINVAL; + } + if (bpf->prog->aux->offload->netdev != ns->netdev) { + NSIM_EA(bpf->extack, "program bound to different dev"); + return -EINVAL; + } + + state = bpf->prog->aux->offload->dev_priv; + if (WARN_ON(strcmp(state->state, "xlated"))) { + NSIM_EA(bpf->extack, "offloading program in bad state"); + return -EINVAL; + } + return 0; +} + +static bool +nsim_map_key_match(struct bpf_map *map, struct nsim_map_entry *e, void *key) +{ + return e->key && !memcmp(key, e->key, map->key_size); +} + +static int nsim_map_key_find(struct bpf_offloaded_map *offmap, void *key) +{ + struct nsim_bpf_bound_map *nmap = offmap->dev_priv; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(nmap->entry); i++) + if (nsim_map_key_match(&offmap->map, &nmap->entry[i], key)) + return i; + + return -ENOENT; +} + +static int +nsim_map_alloc_elem(struct bpf_offloaded_map *offmap, unsigned int idx) +{ + struct nsim_bpf_bound_map *nmap = offmap->dev_priv; + + nmap->entry[idx].key = kmalloc(offmap->map.key_size, GFP_USER); + if (!nmap->entry[idx].key) + return -ENOMEM; + nmap->entry[idx].value = kmalloc(offmap->map.value_size, GFP_USER); + if (!nmap->entry[idx].value) { + kfree(nmap->entry[idx].key); + nmap->entry[idx].key = NULL; + return -ENOMEM; + } + + return 0; +} + +static int +nsim_map_get_next_key(struct bpf_offloaded_map *offmap, + void *key, void *next_key) +{ + struct nsim_bpf_bound_map *nmap = offmap->dev_priv; + int idx = -ENOENT; + + mutex_lock(&nmap->mutex); + + if (key) + idx = nsim_map_key_find(offmap, key); + if (idx == -ENOENT) + idx = 0; + else + idx++; + + for (; idx < ARRAY_SIZE(nmap->entry); idx++) { + if (nmap->entry[idx].key) { + memcpy(next_key, nmap->entry[idx].key, + offmap->map.key_size); + break; + } + } + + mutex_unlock(&nmap->mutex); + + if (idx == ARRAY_SIZE(nmap->entry)) + return -ENOENT; + return 0; +} + +static int +nsim_map_lookup_elem(struct bpf_offloaded_map *offmap, void *key, void *value) +{ + struct nsim_bpf_bound_map *nmap = offmap->dev_priv; + int idx; + + mutex_lock(&nmap->mutex); + + idx = nsim_map_key_find(offmap, key); + if (idx >= 0) + memcpy(value, nmap->entry[idx].value, offmap->map.value_size); + + mutex_unlock(&nmap->mutex); + + return idx < 0 ? idx : 0; +} + +static int +nsim_map_update_elem(struct bpf_offloaded_map *offmap, + void *key, void *value, u64 flags) +{ + struct nsim_bpf_bound_map *nmap = offmap->dev_priv; + int idx, err = 0; + + mutex_lock(&nmap->mutex); + + idx = nsim_map_key_find(offmap, key); + if (idx < 0 && flags == BPF_EXIST) { + err = idx; + goto exit_unlock; + } + if (idx >= 0 && flags == BPF_NOEXIST) { + err = -EEXIST; + goto exit_unlock; + } + + if (idx < 0) { + for (idx = 0; idx < ARRAY_SIZE(nmap->entry); idx++) + if (!nmap->entry[idx].key) + break; + if (idx == ARRAY_SIZE(nmap->entry)) { + err = -E2BIG; + goto exit_unlock; + } + + err = nsim_map_alloc_elem(offmap, idx); + if (err) + goto exit_unlock; + } + + memcpy(nmap->entry[idx].key, key, offmap->map.key_size); + memcpy(nmap->entry[idx].value, value, offmap->map.value_size); +exit_unlock: + mutex_unlock(&nmap->mutex); + + return err; +} + +static int nsim_map_delete_elem(struct bpf_offloaded_map *offmap, void *key) +{ + struct nsim_bpf_bound_map *nmap = offmap->dev_priv; + int idx; + + if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY) + return -EINVAL; + + mutex_lock(&nmap->mutex); + + idx = nsim_map_key_find(offmap, key); + if (idx >= 0) { + kfree(nmap->entry[idx].key); + kfree(nmap->entry[idx].value); + memset(&nmap->entry[idx], 0, sizeof(nmap->entry[idx])); + } + + mutex_unlock(&nmap->mutex); + + return idx < 0 ? idx : 0; +} + +static const struct bpf_map_dev_ops nsim_bpf_map_ops = { + .map_get_next_key = nsim_map_get_next_key, + .map_lookup_elem = nsim_map_lookup_elem, + .map_update_elem = nsim_map_update_elem, + .map_delete_elem = nsim_map_delete_elem, +}; + +static int +nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap) +{ + struct nsim_bpf_bound_map *nmap; + unsigned int i; + int err; + + if (WARN_ON(offmap->map.map_type != BPF_MAP_TYPE_ARRAY && + offmap->map.map_type != BPF_MAP_TYPE_HASH)) + return -EINVAL; + if (offmap->map.max_entries > NSIM_BPF_MAX_KEYS) + return -ENOMEM; + if (offmap->map.map_flags) + return -EINVAL; + + nmap = kzalloc(sizeof(*nmap), GFP_USER); + if (!nmap) + return -ENOMEM; + + offmap->dev_priv = nmap; + nmap->ns = ns; + nmap->map = offmap; + mutex_init(&nmap->mutex); + + if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY) { + for (i = 0; i < ARRAY_SIZE(nmap->entry); i++) { + u32 *key; + + err = nsim_map_alloc_elem(offmap, i); + if (err) + goto err_free; + key = nmap->entry[i].key; + *key = i; + } + } + + offmap->dev_ops = &nsim_bpf_map_ops; + list_add_tail(&nmap->l, &ns->bpf_bound_maps); + + return 0; + +err_free: + while (--i) { + kfree(nmap->entry[i].key); + kfree(nmap->entry[i].value); + } + kfree(nmap); + return err; +} + +static void nsim_bpf_map_free(struct bpf_offloaded_map *offmap) +{ + struct nsim_bpf_bound_map *nmap = offmap->dev_priv; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(nmap->entry); i++) { + kfree(nmap->entry[i].key); + kfree(nmap->entry[i].value); + } + list_del_init(&nmap->l); + mutex_destroy(&nmap->mutex); + kfree(nmap); +} + +int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf) +{ + struct netdevsim *ns = netdev_priv(dev); + struct nsim_bpf_bound_prog *state; + int err; + + ASSERT_RTNL(); + + switch (bpf->command) { + case BPF_OFFLOAD_VERIFIER_PREP: + if (!ns->bpf_bind_accept) + return -EOPNOTSUPP; + + err = nsim_bpf_create_prog(ns, bpf->verifier.prog); + if (err) + return err; + + bpf->verifier.ops = &nsim_bpf_analyzer_ops; + return 0; + case BPF_OFFLOAD_TRANSLATE: + state = bpf->offload.prog->aux->offload->dev_priv; + + state->state = "xlated"; + return 0; + case BPF_OFFLOAD_DESTROY: + nsim_bpf_destroy_prog(bpf->offload.prog); + return 0; + case XDP_QUERY_PROG: + bpf->prog_attached = ns->xdp_prog_mode; + bpf->prog_id = ns->xdp_prog ? ns->xdp_prog->aux->id : 0; + bpf->prog_flags = ns->xdp_prog ? ns->xdp_flags : 0; + return 0; + case XDP_SETUP_PROG: + err = nsim_setup_prog_checks(ns, bpf); + if (err) + return err; + + return nsim_xdp_set_prog(ns, bpf); + case XDP_SETUP_PROG_HW: + err = nsim_setup_prog_hw_checks(ns, bpf); + if (err) + return err; + + return nsim_xdp_set_prog(ns, bpf); + case BPF_OFFLOAD_MAP_ALLOC: + if (!ns->bpf_map_accept) + return -EOPNOTSUPP; + + return nsim_bpf_map_alloc(ns, bpf->offmap); + case BPF_OFFLOAD_MAP_FREE: + nsim_bpf_map_free(bpf->offmap); + return 0; + default: + return -EINVAL; + } +} + +int nsim_bpf_init(struct netdevsim *ns) +{ + INIT_LIST_HEAD(&ns->bpf_bound_progs); + INIT_LIST_HEAD(&ns->bpf_bound_maps); + + debugfs_create_u32("bpf_offloaded_id", 0400, ns->ddir, + &ns->bpf_offloaded_id); + + ns->bpf_bind_accept = true; + debugfs_create_bool("bpf_bind_accept", 0600, ns->ddir, + &ns->bpf_bind_accept); + debugfs_create_u32("bpf_bind_verifier_delay", 0600, ns->ddir, + &ns->bpf_bind_verifier_delay); + ns->ddir_bpf_bound_progs = + debugfs_create_dir("bpf_bound_progs", ns->ddir); + if (IS_ERR_OR_NULL(ns->ddir_bpf_bound_progs)) + return -ENOMEM; + + ns->bpf_tc_accept = true; + debugfs_create_bool("bpf_tc_accept", 0600, ns->ddir, + &ns->bpf_tc_accept); + debugfs_create_bool("bpf_tc_non_bound_accept", 0600, ns->ddir, + &ns->bpf_tc_non_bound_accept); + ns->bpf_xdpdrv_accept = true; + debugfs_create_bool("bpf_xdpdrv_accept", 0600, ns->ddir, + &ns->bpf_xdpdrv_accept); + ns->bpf_xdpoffload_accept = true; + debugfs_create_bool("bpf_xdpoffload_accept", 0600, ns->ddir, + &ns->bpf_xdpoffload_accept); + + ns->bpf_map_accept = true; + debugfs_create_bool("bpf_map_accept", 0600, ns->ddir, + &ns->bpf_map_accept); + + return 0; +} + +void nsim_bpf_uninit(struct netdevsim *ns) +{ + WARN_ON(!list_empty(&ns->bpf_bound_progs)); + WARN_ON(!list_empty(&ns->bpf_bound_maps)); + WARN_ON(ns->xdp_prog); + WARN_ON(ns->bpf_offloaded); +} diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c new file mode 100644 index 000000000000..3fd567928f3d --- /dev/null +++ b/drivers/net/netdevsim/netdev.c @@ -0,0 +1,504 @@ +/* + * Copyright (C) 2017 Netronome Systems, Inc. + * + * This software is licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree. + * + * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" + * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, + * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE + * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME + * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + */ + +#include <linux/debugfs.h> +#include <linux/etherdevice.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/slab.h> +#include <net/netlink.h> +#include <net/pkt_cls.h> +#include <net/rtnetlink.h> + +#include "netdevsim.h" + +struct nsim_vf_config { + int link_state; + u16 min_tx_rate; + u16 max_tx_rate; + u16 vlan; + __be16 vlan_proto; + u16 qos; + u8 vf_mac[ETH_ALEN]; + bool spoofchk_enabled; + bool trusted; + bool rss_query_enabled; +}; + +static u32 nsim_dev_id; + +static int nsim_num_vf(struct device *dev) +{ + struct netdevsim *ns = to_nsim(dev); + + return ns->num_vfs; +} + +static struct bus_type nsim_bus = { + .name = DRV_NAME, + .dev_name = DRV_NAME, + .num_vf = nsim_num_vf, +}; + +static int nsim_vfs_enable(struct netdevsim *ns, unsigned int num_vfs) +{ + ns->vfconfigs = kcalloc(num_vfs, sizeof(struct nsim_vf_config), + GFP_KERNEL); + if (!ns->vfconfigs) + return -ENOMEM; + ns->num_vfs = num_vfs; + + return 0; +} + +static void nsim_vfs_disable(struct netdevsim *ns) +{ + kfree(ns->vfconfigs); + ns->vfconfigs = NULL; + ns->num_vfs = 0; +} + +static ssize_t +nsim_numvfs_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct netdevsim *ns = to_nsim(dev); + unsigned int num_vfs; + int ret; + + ret = kstrtouint(buf, 0, &num_vfs); + if (ret) + return ret; + + rtnl_lock(); + if (ns->num_vfs == num_vfs) + goto exit_good; + if (ns->num_vfs && num_vfs) { + ret = -EBUSY; + goto exit_unlock; + } + + if (num_vfs) { + ret = nsim_vfs_enable(ns, num_vfs); + if (ret) + goto exit_unlock; + } else { + nsim_vfs_disable(ns); + } +exit_good: + ret = count; +exit_unlock: + rtnl_unlock(); + + return ret; +} + +static ssize_t +nsim_numvfs_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct netdevsim *ns = to_nsim(dev); + + return sprintf(buf, "%u\n", ns->num_vfs); +} + +static struct device_attribute nsim_numvfs_attr = + __ATTR(sriov_numvfs, 0664, nsim_numvfs_show, nsim_numvfs_store); + +static struct attribute *nsim_dev_attrs[] = { + &nsim_numvfs_attr.attr, + NULL, +}; + +static const struct attribute_group nsim_dev_attr_group = { + .attrs = nsim_dev_attrs, +}; + +static const struct attribute_group *nsim_dev_attr_groups[] = { + &nsim_dev_attr_group, + NULL, +}; + +static void nsim_dev_release(struct device *dev) +{ + struct netdevsim *ns = to_nsim(dev); + + nsim_vfs_disable(ns); + free_netdev(ns->netdev); +} + +static struct device_type nsim_dev_type = { + .groups = nsim_dev_attr_groups, + .release = nsim_dev_release, +}; + +static int nsim_init(struct net_device *dev) +{ + struct netdevsim *ns = netdev_priv(dev); + int err; + + ns->netdev = dev; + ns->ddir = debugfs_create_dir(netdev_name(dev), nsim_ddir); + if (IS_ERR_OR_NULL(ns->ddir)) + return -ENOMEM; + + err = nsim_bpf_init(ns); + if (err) + goto err_debugfs_destroy; + + ns->dev.id = nsim_dev_id++; + ns->dev.bus = &nsim_bus; + ns->dev.type = &nsim_dev_type; + err = device_register(&ns->dev); + if (err) + goto err_bpf_uninit; + + SET_NETDEV_DEV(dev, &ns->dev); + + return 0; + +err_bpf_uninit: + nsim_bpf_uninit(ns); +err_debugfs_destroy: + debugfs_remove_recursive(ns->ddir); + return err; +} + +static void nsim_uninit(struct net_device *dev) +{ + struct netdevsim *ns = netdev_priv(dev); + + debugfs_remove_recursive(ns->ddir); + nsim_bpf_uninit(ns); +} + +static void nsim_free(struct net_device *dev) +{ + struct netdevsim *ns = netdev_priv(dev); + + device_unregister(&ns->dev); + /* netdev and vf state will be freed out of device_release() */ +} + +static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct netdevsim *ns = netdev_priv(dev); + + u64_stats_update_begin(&ns->syncp); + ns->tx_packets++; + ns->tx_bytes += skb->len; + u64_stats_update_end(&ns->syncp); + + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +static void nsim_set_rx_mode(struct net_device *dev) +{ +} + +static int nsim_change_mtu(struct net_device *dev, int new_mtu) +{ + struct netdevsim *ns = netdev_priv(dev); + + if (ns->xdp_prog_mode == XDP_ATTACHED_DRV && + new_mtu > NSIM_XDP_MAX_MTU) + return -EBUSY; + + dev->mtu = new_mtu; + + return 0; +} + +static void +nsim_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct netdevsim *ns = netdev_priv(dev); + unsigned int start; + + do { + start = u64_stats_fetch_begin(&ns->syncp); + stats->tx_bytes = ns->tx_bytes; + stats->tx_packets = ns->tx_packets; + } while (u64_stats_fetch_retry(&ns->syncp, start)); +} + +static int +nsim_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) +{ + return nsim_bpf_setup_tc_block_cb(type, type_data, cb_priv); +} + +static int +nsim_setup_tc_block(struct net_device *dev, struct tc_block_offload *f) +{ + struct netdevsim *ns = netdev_priv(dev); + + if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + switch (f->command) { + case TC_BLOCK_BIND: + return tcf_block_cb_register(f->block, nsim_setup_tc_block_cb, + ns, ns); + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, nsim_setup_tc_block_cb, ns); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int nsim_set_vf_mac(struct net_device *dev, int vf, u8 *mac) +{ + struct netdevsim *ns = netdev_priv(dev); + + /* Only refuse multicast addresses, zero address can mean unset/any. */ + if (vf >= ns->num_vfs || is_multicast_ether_addr(mac)) + return -EINVAL; + memcpy(ns->vfconfigs[vf].vf_mac, mac, ETH_ALEN); + + return 0; +} + +static int nsim_set_vf_vlan(struct net_device *dev, int vf, + u16 vlan, u8 qos, __be16 vlan_proto) +{ + struct netdevsim *ns = netdev_priv(dev); + + if (vf >= ns->num_vfs || vlan > 4095 || qos > 7) + return -EINVAL; + + ns->vfconfigs[vf].vlan = vlan; + ns->vfconfigs[vf].qos = qos; + ns->vfconfigs[vf].vlan_proto = vlan_proto; + + return 0; +} + +static int nsim_set_vf_rate(struct net_device *dev, int vf, int min, int max) +{ + struct netdevsim *ns = netdev_priv(dev); + + if (vf >= ns->num_vfs) + return -EINVAL; + + ns->vfconfigs[vf].min_tx_rate = min; + ns->vfconfigs[vf].max_tx_rate = max; + + return 0; +} + +static int nsim_set_vf_spoofchk(struct net_device *dev, int vf, bool val) +{ + struct netdevsim *ns = netdev_priv(dev); + + if (vf >= ns->num_vfs) + return -EINVAL; + ns->vfconfigs[vf].spoofchk_enabled = val; + + return 0; +} + +static int nsim_set_vf_rss_query_en(struct net_device *dev, int vf, bool val) +{ + struct netdevsim *ns = netdev_priv(dev); + + if (vf >= ns->num_vfs) + return -EINVAL; + ns->vfconfigs[vf].rss_query_enabled = val; + + return 0; +} + +static int nsim_set_vf_trust(struct net_device *dev, int vf, bool val) +{ + struct netdevsim *ns = netdev_priv(dev); + + if (vf >= ns->num_vfs) + return -EINVAL; + ns->vfconfigs[vf].trusted = val; + + return 0; +} + +static int +nsim_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi) +{ + struct netdevsim *ns = netdev_priv(dev); + + if (vf >= ns->num_vfs) + return -EINVAL; + + ivi->vf = vf; + ivi->linkstate = ns->vfconfigs[vf].link_state; + ivi->min_tx_rate = ns->vfconfigs[vf].min_tx_rate; + ivi->max_tx_rate = ns->vfconfigs[vf].max_tx_rate; + ivi->vlan = ns->vfconfigs[vf].vlan; + ivi->vlan_proto = ns->vfconfigs[vf].vlan_proto; + ivi->qos = ns->vfconfigs[vf].qos; + memcpy(&ivi->mac, ns->vfconfigs[vf].vf_mac, ETH_ALEN); + ivi->spoofchk = ns->vfconfigs[vf].spoofchk_enabled; + ivi->trusted = ns->vfconfigs[vf].trusted; + ivi->rss_query_en = ns->vfconfigs[vf].rss_query_enabled; + + return 0; +} + +static int nsim_set_vf_link_state(struct net_device *dev, int vf, int state) +{ + struct netdevsim *ns = netdev_priv(dev); + + if (vf >= ns->num_vfs) + return -EINVAL; + + switch (state) { + case IFLA_VF_LINK_STATE_AUTO: + case IFLA_VF_LINK_STATE_ENABLE: + case IFLA_VF_LINK_STATE_DISABLE: + break; + default: + return -EINVAL; + } + + ns->vfconfigs[vf].link_state = state; + + return 0; +} + +static int +nsim_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) +{ + switch (type) { + case TC_SETUP_BLOCK: + return nsim_setup_tc_block(dev, type_data); + default: + return -EOPNOTSUPP; + } +} + +static int +nsim_set_features(struct net_device *dev, netdev_features_t features) +{ + struct netdevsim *ns = netdev_priv(dev); + + if ((dev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC)) + return nsim_bpf_disable_tc(ns); + + return 0; +} + +static const struct net_device_ops nsim_netdev_ops = { + .ndo_init = nsim_init, + .ndo_uninit = nsim_uninit, + .ndo_start_xmit = nsim_start_xmit, + .ndo_set_rx_mode = nsim_set_rx_mode, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = nsim_change_mtu, + .ndo_get_stats64 = nsim_get_stats64, + .ndo_set_vf_mac = nsim_set_vf_mac, + .ndo_set_vf_vlan = nsim_set_vf_vlan, + .ndo_set_vf_rate = nsim_set_vf_rate, + .ndo_set_vf_spoofchk = nsim_set_vf_spoofchk, + .ndo_set_vf_trust = nsim_set_vf_trust, + .ndo_get_vf_config = nsim_get_vf_config, + .ndo_set_vf_link_state = nsim_set_vf_link_state, + .ndo_set_vf_rss_query_en = nsim_set_vf_rss_query_en, + .ndo_setup_tc = nsim_setup_tc, + .ndo_set_features = nsim_set_features, + .ndo_bpf = nsim_bpf, +}; + +static void nsim_setup(struct net_device *dev) +{ + ether_setup(dev); + eth_hw_addr_random(dev); + + dev->netdev_ops = &nsim_netdev_ops; + dev->priv_destructor = nsim_free; + + dev->tx_queue_len = 0; + dev->flags |= IFF_NOARP; + dev->flags &= ~IFF_MULTICAST; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | + IFF_NO_QUEUE; + dev->features |= NETIF_F_HIGHDMA | + NETIF_F_SG | + NETIF_F_FRAGLIST | + NETIF_F_HW_CSUM | + NETIF_F_TSO; + dev->hw_features |= NETIF_F_HW_TC; + dev->max_mtu = ETH_MAX_MTU; +} + +static int nsim_validate(struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + if (tb[IFLA_ADDRESS]) { + if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) + return -EINVAL; + if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) + return -EADDRNOTAVAIL; + } + return 0; +} + +static struct rtnl_link_ops nsim_link_ops __read_mostly = { + .kind = DRV_NAME, + .priv_size = sizeof(struct netdevsim), + .setup = nsim_setup, + .validate = nsim_validate, +}; + +struct dentry *nsim_ddir; + +static int __init nsim_module_init(void) +{ + int err; + + nsim_ddir = debugfs_create_dir(DRV_NAME, NULL); + if (IS_ERR_OR_NULL(nsim_ddir)) + return -ENOMEM; + + err = bus_register(&nsim_bus); + if (err) + goto err_debugfs_destroy; + + err = rtnl_link_register(&nsim_link_ops); + if (err) + goto err_unreg_bus; + + return 0; + +err_unreg_bus: + bus_unregister(&nsim_bus); +err_debugfs_destroy: + debugfs_remove_recursive(nsim_ddir); + return err; +} + +static void __exit nsim_module_exit(void) +{ + rtnl_link_unregister(&nsim_link_ops); + bus_unregister(&nsim_bus); + debugfs_remove_recursive(nsim_ddir); +} + +module_init(nsim_module_init); +module_exit(nsim_module_exit); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_RTNL_LINK(DRV_NAME); diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h new file mode 100644 index 000000000000..ea081c10efb8 --- /dev/null +++ b/drivers/net/netdevsim/netdevsim.h @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2017 Netronome Systems, Inc. + * + * This software is licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree. + * + * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" + * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, + * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE + * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME + * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + */ + +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/netdevice.h> +#include <linux/u64_stats_sync.h> + +#define DRV_NAME "netdevsim" + +#define NSIM_XDP_MAX_MTU 4000 + +#define NSIM_EA(extack, msg) NL_SET_ERR_MSG_MOD((extack), msg) + +struct bpf_prog; +struct dentry; +struct nsim_vf_config; + +struct netdevsim { + struct net_device *netdev; + + u64 tx_packets; + u64 tx_bytes; + struct u64_stats_sync syncp; + + struct device dev; + + struct dentry *ddir; + + unsigned int num_vfs; + struct nsim_vf_config *vfconfigs; + + struct bpf_prog *bpf_offloaded; + u32 bpf_offloaded_id; + + u32 xdp_flags; + int xdp_prog_mode; + struct bpf_prog *xdp_prog; + + u32 prog_id_gen; + + bool bpf_bind_accept; + u32 bpf_bind_verifier_delay; + struct dentry *ddir_bpf_bound_progs; + struct list_head bpf_bound_progs; + + bool bpf_tc_accept; + bool bpf_tc_non_bound_accept; + bool bpf_xdpdrv_accept; + bool bpf_xdpoffload_accept; + + bool bpf_map_accept; + struct list_head bpf_bound_maps; +}; + +extern struct dentry *nsim_ddir; + +#ifdef CONFIG_BPF_SYSCALL +int nsim_bpf_init(struct netdevsim *ns); +void nsim_bpf_uninit(struct netdevsim *ns); +int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf); +int nsim_bpf_disable_tc(struct netdevsim *ns); +int nsim_bpf_setup_tc_block_cb(enum tc_setup_type type, + void *type_data, void *cb_priv); +#else +static inline int nsim_bpf_init(struct netdevsim *ns) +{ + return 0; +} + +static inline void nsim_bpf_uninit(struct netdevsim *ns) +{ +} + +static inline int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf) +{ + return bpf->command == XDP_QUERY_PROG ? 0 : -EOPNOTSUPP; +} + +static inline int nsim_bpf_disable_tc(struct netdevsim *ns) +{ + return 0; +} + +static inline int +nsim_bpf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + return -EOPNOTSUPP; +} +#endif + +static inline struct netdevsim *to_nsim(struct device *ptr) +{ + return container_of(ptr, struct netdevsim, dev); +} diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c index 18141c022b13..6fe5dc9201d0 100644 --- a/drivers/net/phy/amd.c +++ b/drivers/net/phy/amd.c @@ -68,8 +68,6 @@ static struct phy_driver am79c_driver[] = { { .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = am79c_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = am79c_ack_interrupt, .config_intr = am79c_config_intr, } }; diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index e911e4990b20..411cf1072bae 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -71,7 +71,6 @@ MODULE_LICENSE("GPL"); struct at803x_priv { bool phy_reset:1; - struct gpio_desc *gpiod_reset; }; struct at803x_context { @@ -216,56 +215,33 @@ static int at803x_suspend(struct phy_device *phydev) int value; int wol_enabled; - mutex_lock(&phydev->lock); - value = phy_read(phydev, AT803X_INTR_ENABLE); wol_enabled = value & AT803X_INTR_ENABLE_WOL; - value = phy_read(phydev, MII_BMCR); - if (wol_enabled) - value |= BMCR_ISOLATE; + value = BMCR_ISOLATE; else - value |= BMCR_PDOWN; - - phy_write(phydev, MII_BMCR, value); + value = BMCR_PDOWN; - mutex_unlock(&phydev->lock); + phy_modify(phydev, MII_BMCR, 0, value); return 0; } static int at803x_resume(struct phy_device *phydev) { - int value; - - value = phy_read(phydev, MII_BMCR); - value &= ~(BMCR_PDOWN | BMCR_ISOLATE); - phy_write(phydev, MII_BMCR, value); - - return 0; + return phy_modify(phydev, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE, 0); } static int at803x_probe(struct phy_device *phydev) { struct device *dev = &phydev->mdio.dev; struct at803x_priv *priv; - struct gpio_desc *gpiod_reset; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; - if (phydev->drv->phy_id != ATH8030_PHY_ID) - goto does_not_require_reset_workaround; - - gpiod_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); - if (IS_ERR(gpiod_reset)) - return PTR_ERR(gpiod_reset); - - priv->gpiod_reset = gpiod_reset; - -does_not_require_reset_workaround: phydev->priv = priv; return 0; @@ -339,14 +315,14 @@ static void at803x_link_change_notify(struct phy_device *phydev) * cannot recover from by software. */ if (phydev->state == PHY_NOLINK) { - if (priv->gpiod_reset && !priv->phy_reset) { + if (phydev->mdio.reset && !priv->phy_reset) { struct at803x_context context; at803x_context_save(phydev, &context); - gpiod_set_value(priv->gpiod_reset, 1); + phy_device_reset(phydev, 1); msleep(1); - gpiod_set_value(priv->gpiod_reset, 0); + phy_device_reset(phydev, 0); msleep(1); at803x_context_restore(phydev, &context); @@ -404,8 +380,6 @@ static struct phy_driver at803x_driver[] = { .resume = at803x_resume, .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = at803x_ack_interrupt, .config_intr = at803x_config_intr, }, { @@ -422,8 +396,6 @@ static struct phy_driver at803x_driver[] = { .resume = at803x_resume, .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_INTERRUPT, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = at803x_ack_interrupt, .config_intr = at803x_config_intr, }, { @@ -439,8 +411,6 @@ static struct phy_driver at803x_driver[] = { .resume = at803x_resume, .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .aneg_done = at803x_aneg_done, .ack_interrupt = &at803x_ack_interrupt, .config_intr = &at803x_config_intr, diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c index 3fe8cc5c177e..6838129839ca 100644 --- a/drivers/net/phy/bcm-cygnus.c +++ b/drivers/net/phy/bcm-cygnus.c @@ -136,8 +136,6 @@ static struct phy_driver bcm_cygnus_phy_driver[] = { .name = "Broadcom Cygnus PHY", .features = PHY_GBIT_FEATURES, .config_init = bcm_cygnus_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, .suspend = genphy_suspend, diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c index b0492ef2cdaa..cf14613745c9 100644 --- a/drivers/net/phy/bcm63xx.c +++ b/drivers/net/phy/bcm63xx.c @@ -69,8 +69,6 @@ static struct phy_driver bcm63xx_driver[] = { .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), .flags = PHY_HAS_INTERRUPT | PHY_IS_INTERNAL, .config_init = bcm63xx_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm63xx_config_intr, }, { @@ -81,8 +79,6 @@ static struct phy_driver bcm63xx_driver[] = { .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), .flags = PHY_HAS_INTERRUPT | PHY_IS_INTERNAL, .config_init = bcm63xx_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm63xx_config_intr, } }; diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index 8b33f688ac8a..421feb8f92fe 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -611,8 +611,6 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev) .features = PHY_GBIT_FEATURES, \ .flags = PHY_IS_INTERNAL, \ .config_init = bcm7xxx_28nm_config_init, \ - .config_aneg = genphy_config_aneg, \ - .read_status = genphy_read_status, \ .resume = bcm7xxx_28nm_resume, \ .get_tunable = bcm7xxx_28nm_get_tunable, \ .set_tunable = bcm7xxx_28nm_set_tunable, \ @@ -630,8 +628,6 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev) .features = PHY_BASIC_FEATURES, \ .flags = PHY_IS_INTERNAL, \ .config_init = bcm7xxx_28nm_ephy_config_init, \ - .config_aneg = genphy_config_aneg, \ - .read_status = genphy_read_status, \ .resume = bcm7xxx_28nm_ephy_resume, \ .get_sset_count = bcm_phy_get_sset_count, \ .get_strings = bcm_phy_get_strings, \ @@ -647,8 +643,6 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev) .features = PHY_BASIC_FEATURES, \ .flags = PHY_IS_INTERNAL, \ .config_init = bcm7xxx_config_init, \ - .config_aneg = genphy_config_aneg, \ - .read_status = genphy_read_status, \ .suspend = bcm7xxx_suspend, \ .resume = bcm7xxx_config_init, \ } diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index d7ed69deabfb..3bb6b66dc7bf 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -540,6 +540,37 @@ static int brcm_fet_config_intr(struct phy_device *phydev) return err; } +struct bcm53xx_phy_priv { + u64 *stats; +}; + +static int bcm53xx_phy_probe(struct phy_device *phydev) +{ + struct bcm53xx_phy_priv *priv; + + priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + phydev->priv = priv; + + priv->stats = devm_kcalloc(&phydev->mdio.dev, + bcm_phy_get_sset_count(phydev), sizeof(u64), + GFP_KERNEL); + if (!priv->stats) + return -ENOMEM; + + return 0; +} + +static void bcm53xx_phy_get_stats(struct phy_device *phydev, + struct ethtool_stats *stats, u64 *data) +{ + struct bcm53xx_phy_priv *priv = phydev->priv; + + bcm_phy_get_stats(phydev, priv->stats, stats, data); +} + static struct phy_driver broadcom_drivers[] = { { .phy_id = PHY_ID_BCM5411, @@ -548,8 +579,6 @@ static struct phy_driver broadcom_drivers[] = { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, }, { @@ -559,8 +588,6 @@ static struct phy_driver broadcom_drivers[] = { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, }, { @@ -570,8 +597,6 @@ static struct phy_driver broadcom_drivers[] = { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, }, { @@ -581,8 +606,6 @@ static struct phy_driver broadcom_drivers[] = { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, }, { @@ -592,8 +615,6 @@ static struct phy_driver broadcom_drivers[] = { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, }, { @@ -603,8 +624,6 @@ static struct phy_driver broadcom_drivers[] = { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, }, { @@ -614,8 +633,6 @@ static struct phy_driver broadcom_drivers[] = { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, }, { @@ -626,7 +643,6 @@ static struct phy_driver broadcom_drivers[] = { .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .config_aneg = bcm5481_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, }, { @@ -637,7 +653,6 @@ static struct phy_driver broadcom_drivers[] = { .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .config_aneg = bcm5481_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, }, { @@ -647,7 +662,6 @@ static struct phy_driver broadcom_drivers[] = { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = bcm5482_config_init, - .config_aneg = genphy_config_aneg, .read_status = bcm5482_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, @@ -658,8 +672,6 @@ static struct phy_driver broadcom_drivers[] = { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, }, { @@ -669,8 +681,6 @@ static struct phy_driver broadcom_drivers[] = { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, }, { @@ -680,8 +690,6 @@ static struct phy_driver broadcom_drivers[] = { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, }, { @@ -691,8 +699,6 @@ static struct phy_driver broadcom_drivers[] = { .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = brcm_fet_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = brcm_fet_ack_interrupt, .config_intr = brcm_fet_config_intr, }, { @@ -702,10 +708,18 @@ static struct phy_driver broadcom_drivers[] = { .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = brcm_fet_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = brcm_fet_ack_interrupt, .config_intr = brcm_fet_config_intr, +}, { + .phy_id = PHY_ID_BCM5395, + .phy_id_mask = 0xfffffff0, + .name = "Broadcom BCM5395", + .flags = PHY_IS_INTERNAL, + .features = PHY_GBIT_FEATURES, + .get_sset_count = bcm_phy_get_sset_count, + .get_strings = bcm_phy_get_strings, + .get_stats = bcm53xx_phy_get_stats, + .probe = bcm53xx_phy_probe, } }; module_phy_driver(broadcom_drivers); @@ -726,6 +740,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = { { PHY_ID_BCM57780, 0xfffffff0 }, { PHY_ID_BCMAC131, 0xfffffff0 }, { PHY_ID_BCM5241, 0xfffffff0 }, + { PHY_ID_BCM5395, 0xfffffff0 }, { } }; diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c index d339c1afea77..c05af00bf4b6 100644 --- a/drivers/net/phy/cicada.c +++ b/drivers/net/phy/cicada.c @@ -110,8 +110,6 @@ static struct phy_driver cis820x_driver[] = { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = &cis820x_config_init, - .config_aneg = &genphy_config_aneg, - .read_status = &genphy_read_status, .ack_interrupt = &cis820x_ack_interrupt, .config_intr = &cis820x_config_intr, }, { @@ -121,8 +119,6 @@ static struct phy_driver cis820x_driver[] = { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = &cis820x_config_init, - .config_aneg = &genphy_config_aneg, - .read_status = &genphy_read_status, .ack_interrupt = &cis820x_ack_interrupt, .config_intr = &cis820x_config_intr, } }; diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c index e28913d9ea7e..5ee99b3b428c 100644 --- a/drivers/net/phy/davicom.c +++ b/drivers/net/phy/davicom.c @@ -153,7 +153,6 @@ static struct phy_driver dm91xx_driver[] = { .flags = PHY_HAS_INTERRUPT, .config_init = dm9161_config_init, .config_aneg = dm9161_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = dm9161_ack_interrupt, .config_intr = dm9161_config_intr, }, { @@ -164,7 +163,6 @@ static struct phy_driver dm91xx_driver[] = { .flags = PHY_HAS_INTERRUPT, .config_init = dm9161_config_init, .config_aneg = dm9161_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = dm9161_ack_interrupt, .config_intr = dm9161_config_intr, }, { @@ -175,7 +173,6 @@ static struct phy_driver dm91xx_driver[] = { .flags = PHY_HAS_INTERRUPT, .config_init = dm9161_config_init, .config_aneg = dm9161_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = dm9161_ack_interrupt, .config_intr = dm9161_config_intr, }, { @@ -184,8 +181,6 @@ static struct phy_driver dm91xx_driver[] = { .phy_id_mask = 0x0ffffff0, .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_INTERRUPT, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = dm9161_ack_interrupt, .config_intr = dm9161_config_intr, } }; diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index cbd629822f04..654f42d00092 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -1502,8 +1502,6 @@ static struct phy_driver dp83640_driver = { .probe = dp83640_probe, .remove = dp83640_remove, .config_init = dp83640_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = dp83640_ack_interrupt, .config_intr = dp83640_config_intr, .ts_info = dp83640_ts_info, diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index 14335d14e9e4..6e8a2a4f3a6e 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -325,8 +325,6 @@ static struct phy_driver dp83822_driver[] = { .set_wol = dp83822_set_wol, .ack_interrupt = dp83822_ack_interrupt, .config_intr = dp83822_config_intr, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .suspend = dp83822_suspend, .resume = dp83822_resume, }, diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c index 3966d43c5146..cd09c3af2117 100644 --- a/drivers/net/phy/dp83848.c +++ b/drivers/net/phy/dp83848.c @@ -95,8 +95,6 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl); .config_init = genphy_config_init, \ .suspend = genphy_suspend, \ .resume = genphy_resume, \ - .config_aneg = genphy_config_aneg, \ - .read_status = genphy_read_status, \ \ /* IRQ related */ \ .ack_interrupt = dp83848_ack_interrupt, \ diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index c1ab976cc800..ab58224f897f 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -324,8 +324,6 @@ static struct phy_driver dp83867_driver[] = { .ack_interrupt = dp83867_ack_interrupt, .config_intr = dp83867_config_intr, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .suspend = genphy_suspend, .resume = genphy_resume, }, diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index eb5167210681..001fe1df7557 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c @@ -115,37 +115,6 @@ int fixed_phy_set_link_update(struct phy_device *phydev, } EXPORT_SYMBOL_GPL(fixed_phy_set_link_update); -int fixed_phy_update_state(struct phy_device *phydev, - const struct fixed_phy_status *status, - const struct fixed_phy_status *changed) -{ - struct fixed_mdio_bus *fmb = &platform_fmb; - struct fixed_phy *fp; - - if (!phydev || phydev->mdio.bus != fmb->mii_bus) - return -EINVAL; - - list_for_each_entry(fp, &fmb->phys, node) { - if (fp->addr == phydev->mdio.addr) { - write_seqcount_begin(&fp->seqcount); -#define _UPD(x) if (changed->x) \ - fp->status.x = status->x - _UPD(link); - _UPD(speed); - _UPD(duplex); - _UPD(pause); - _UPD(asym_pause); -#undef _UPD - fixed_phy_update(fp); - write_seqcount_end(&fp->seqcount); - return 0; - } - } - - return -ENOENT; -} -EXPORT_SYMBOL(fixed_phy_update_state); - int fixed_phy_add(unsigned int irq, int phy_addr, struct fixed_phy_status *status, int link_gpio) diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c index 567280a72241..791587a49215 100644 --- a/drivers/net/phy/icplus.c +++ b/drivers/net/phy/icplus.c @@ -227,8 +227,6 @@ static struct phy_driver icplus_driver[] = { .phy_id_mask = 0x0ffffff0, .features = PHY_GBIT_FEATURES, .config_init = &ip1001_config_init, - .config_aneg = &genphy_config_aneg, - .read_status = &genphy_read_status, .suspend = genphy_suspend, .resume = genphy_resume, }, { @@ -239,8 +237,6 @@ static struct phy_driver icplus_driver[] = { .flags = PHY_HAS_INTERRUPT, .ack_interrupt = ip101a_g_ack_interrupt, .config_init = &ip101a_g_config_init, - .config_aneg = &genphy_config_aneg, - .read_status = &genphy_read_status, .suspend = genphy_suspend, .resume = genphy_resume, } }; diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c index 55f8c52dd2f1..a11f80cb5388 100644 --- a/drivers/net/phy/intel-xway.c +++ b/drivers/net/phy/intel-xway.c @@ -243,7 +243,6 @@ static struct phy_driver xway_gphy[] = { .flags = PHY_HAS_INTERRUPT, .config_init = xway_gphy_config_init, .config_aneg = xway_gphy14_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = xway_gphy_ack_interrupt, .did_interrupt = xway_gphy_did_interrupt, .config_intr = xway_gphy_config_intr, @@ -257,7 +256,6 @@ static struct phy_driver xway_gphy[] = { .flags = PHY_HAS_INTERRUPT, .config_init = xway_gphy_config_init, .config_aneg = xway_gphy14_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = xway_gphy_ack_interrupt, .did_interrupt = xway_gphy_did_interrupt, .config_intr = xway_gphy_config_intr, @@ -271,7 +269,6 @@ static struct phy_driver xway_gphy[] = { .flags = PHY_HAS_INTERRUPT, .config_init = xway_gphy_config_init, .config_aneg = xway_gphy14_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = xway_gphy_ack_interrupt, .did_interrupt = xway_gphy_did_interrupt, .config_intr = xway_gphy_config_intr, @@ -285,7 +282,6 @@ static struct phy_driver xway_gphy[] = { .flags = PHY_HAS_INTERRUPT, .config_init = xway_gphy_config_init, .config_aneg = xway_gphy14_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = xway_gphy_ack_interrupt, .did_interrupt = xway_gphy_did_interrupt, .config_intr = xway_gphy_config_intr, @@ -298,8 +294,6 @@ static struct phy_driver xway_gphy[] = { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = xway_gphy_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = xway_gphy_ack_interrupt, .did_interrupt = xway_gphy_did_interrupt, .config_intr = xway_gphy_config_intr, @@ -312,8 +306,6 @@ static struct phy_driver xway_gphy[] = { .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = xway_gphy_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = xway_gphy_ack_interrupt, .did_interrupt = xway_gphy_did_interrupt, .config_intr = xway_gphy_config_intr, @@ -326,8 +318,6 @@ static struct phy_driver xway_gphy[] = { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = xway_gphy_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = xway_gphy_ack_interrupt, .did_interrupt = xway_gphy_did_interrupt, .config_intr = xway_gphy_config_intr, @@ -340,8 +330,6 @@ static struct phy_driver xway_gphy[] = { .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = xway_gphy_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = xway_gphy_ack_interrupt, .did_interrupt = xway_gphy_did_interrupt, .config_intr = xway_gphy_config_intr, diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c index 09d215177fff..c14b254b2879 100644 --- a/drivers/net/phy/lxt.c +++ b/drivers/net/phy/lxt.c @@ -259,8 +259,6 @@ static struct phy_driver lxt97x_driver[] = { .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = lxt970_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = lxt970_ack_interrupt, .config_intr = lxt970_config_intr, }, { @@ -269,8 +267,6 @@ static struct phy_driver lxt97x_driver[] = { .phy_id_mask = 0xfffffff0, .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_INTERRUPT, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = lxt971_ack_interrupt, .config_intr = lxt971_config_intr, }, { @@ -290,7 +286,6 @@ static struct phy_driver lxt97x_driver[] = { .flags = 0, .probe = lxt973_probe, .config_aneg = lxt973_config_aneg, - .read_status = genphy_read_status, } }; module_phy_driver(lxt97x_driver); diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 82104edca393..22d9bc9c33a4 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -83,7 +83,7 @@ #define MII_88E1121_PHY_MSCR_REG 21 #define MII_88E1121_PHY_MSCR_RX_DELAY BIT(5) #define MII_88E1121_PHY_MSCR_TX_DELAY BIT(4) -#define MII_88E1121_PHY_MSCR_DELAY_MASK (~(BIT(5) | BIT(4))) +#define MII_88E1121_PHY_MSCR_DELAY_MASK (BIT(5) | BIT(4)) #define MII_88E1121_MISC_TEST 0x1a #define MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK 0x1f00 @@ -96,6 +96,17 @@ #define MII_88E1510_TEMP_SENSOR 0x1b #define MII_88E1510_TEMP_SENSOR_MASK 0xff +#define MII_88E6390_MISC_TEST 0x1b +#define MII_88E6390_MISC_TEST_SAMPLE_1S 0 +#define MII_88E6390_MISC_TEST_SAMPLE_10MS BIT(14) +#define MII_88E6390_MISC_TEST_SAMPLE_DISABLE BIT(15) +#define MII_88E6390_MISC_TEST_SAMPLE_ENABLE 0 +#define MII_88E6390_MISC_TEST_SAMPLE_MASK (0x3 << 14) + +#define MII_88E6390_TEMP_SENSOR 0x1c +#define MII_88E6390_TEMP_SENSOR_MASK 0xff +#define MII_88E6390_TEMP_SENSOR_SAMPLES 10 + #define MII_88E1318S_PHY_MSCR1_REG 16 #define MII_88E1318S_PHY_MSCR1_PAD_ODD BIT(6) @@ -177,27 +188,19 @@ struct marvell_priv { struct device *hwmon_dev; }; -static int marvell_get_page(struct phy_device *phydev) +static int marvell_read_page(struct phy_device *phydev) { - return phy_read(phydev, MII_MARVELL_PHY_PAGE); + return __phy_read(phydev, MII_MARVELL_PHY_PAGE); } -static int marvell_set_page(struct phy_device *phydev, int page) +static int marvell_write_page(struct phy_device *phydev, int page) { - return phy_write(phydev, MII_MARVELL_PHY_PAGE, page); + return __phy_write(phydev, MII_MARVELL_PHY_PAGE, page); } -static int marvell_get_set_page(struct phy_device *phydev, int page) +static int marvell_set_page(struct phy_device *phydev, int page) { - int oldpage = marvell_get_page(phydev); - - if (oldpage < 0) - return oldpage; - - if (page != oldpage) - return marvell_set_page(phydev, page); - - return 0; + return phy_write(phydev, MII_MARVELL_PHY_PAGE, page); } static int marvell_ack_interrupt(struct phy_device *phydev) @@ -399,7 +402,7 @@ static int m88e1111_config_aneg(struct phy_device *phydev) static int marvell_of_reg_init(struct phy_device *phydev) { const __be32 *paddr; - int len, i, saved_page, current_page, ret; + int len, i, saved_page, current_page, ret = 0; if (!phydev->mdio.dev.of_node) return 0; @@ -409,12 +412,11 @@ static int marvell_of_reg_init(struct phy_device *phydev) if (!paddr || len < (4 * sizeof(*paddr))) return 0; - saved_page = marvell_get_page(phydev); + saved_page = phy_save_page(phydev); if (saved_page < 0) - return saved_page; + goto err; current_page = saved_page; - ret = 0; len /= sizeof(*paddr); for (i = 0; i < len - 3; i += 4) { u16 page = be32_to_cpup(paddr + i); @@ -425,14 +427,14 @@ static int marvell_of_reg_init(struct phy_device *phydev) if (page != current_page) { current_page = page; - ret = marvell_set_page(phydev, page); + ret = marvell_write_page(phydev, page); if (ret < 0) goto err; } val = 0; if (mask) { - val = phy_read(phydev, reg); + val = __phy_read(phydev, reg); if (val < 0) { ret = val; goto err; @@ -441,17 +443,12 @@ static int marvell_of_reg_init(struct phy_device *phydev) } val |= val_bits; - ret = phy_write(phydev, reg, val); + ret = __phy_write(phydev, reg, val); if (ret < 0) goto err; } err: - if (current_page != saved_page) { - i = marvell_set_page(phydev, saved_page); - if (ret == 0) - ret = i; - } - return ret; + return phy_restore_page(phydev, saved_page, ret); } #else static int marvell_of_reg_init(struct phy_device *phydev) @@ -462,34 +459,21 @@ static int marvell_of_reg_init(struct phy_device *phydev) static int m88e1121_config_aneg_rgmii_delays(struct phy_device *phydev) { - int err, oldpage, mscr; - - oldpage = marvell_get_set_page(phydev, MII_MARVELL_MSCR_PAGE); - if (oldpage < 0) - return oldpage; - - mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG); - if (mscr < 0) { - err = mscr; - goto out; - } - - mscr &= MII_88E1121_PHY_MSCR_DELAY_MASK; + int mscr; if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) - mscr |= (MII_88E1121_PHY_MSCR_RX_DELAY | - MII_88E1121_PHY_MSCR_TX_DELAY); + mscr = MII_88E1121_PHY_MSCR_RX_DELAY | + MII_88E1121_PHY_MSCR_TX_DELAY; else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) - mscr |= MII_88E1121_PHY_MSCR_RX_DELAY; + mscr = MII_88E1121_PHY_MSCR_RX_DELAY; else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) - mscr |= MII_88E1121_PHY_MSCR_TX_DELAY; - - err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr); - -out: - marvell_set_page(phydev, oldpage); + mscr = MII_88E1121_PHY_MSCR_TX_DELAY; + else + mscr = 0; - return err; + return phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE, + MII_88E1121_PHY_MSCR_REG, + MII_88E1121_PHY_MSCR_DELAY_MASK, mscr); } static int m88e1121_config_aneg(struct phy_device *phydev) @@ -498,7 +482,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev) if (phy_interface_is_rgmii(phydev)) { err = m88e1121_config_aneg_rgmii_delays(phydev); - if (err) + if (err < 0) return err; } @@ -515,20 +499,11 @@ static int m88e1121_config_aneg(struct phy_device *phydev) static int m88e1318_config_aneg(struct phy_device *phydev) { - int err, oldpage, mscr; - - oldpage = marvell_get_set_page(phydev, MII_MARVELL_MSCR_PAGE); - if (oldpage < 0) - return oldpage; - - mscr = phy_read(phydev, MII_88E1318S_PHY_MSCR1_REG); - mscr |= MII_88E1318S_PHY_MSCR1_PAD_ODD; - - err = phy_write(phydev, MII_88E1318S_PHY_MSCR1_REG, mscr); - if (err < 0) - return err; + int err; - err = marvell_set_page(phydev, oldpage); + err = phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE, + MII_88E1318S_PHY_MSCR1_REG, + 0, MII_88E1318S_PHY_MSCR1_PAD_ODD); if (err < 0) return err; @@ -700,19 +675,14 @@ static int m88e1116r_config_init(struct phy_device *phydev) static int m88e3016_config_init(struct phy_device *phydev) { - int reg; + int ret; /* Enable Scrambler and Auto-Crossover */ - reg = phy_read(phydev, MII_88E3016_PHY_SPEC_CTRL); - if (reg < 0) - return reg; - - reg &= ~MII_88E3016_DISABLE_SCRAMBLER; - reg |= MII_88E3016_AUTO_MDIX_CROSSOVER; - - reg = phy_write(phydev, MII_88E3016_PHY_SPEC_CTRL, reg); - if (reg < 0) - return reg; + ret = phy_modify(phydev, MII_88E3016_PHY_SPEC_CTRL, + MII_88E3016_DISABLE_SCRAMBLER, + MII_88E3016_AUTO_MDIX_CROSSOVER); + if (ret < 0) + return ret; return marvell_config_init(phydev); } @@ -721,42 +691,33 @@ static int m88e1111_config_init_hwcfg_mode(struct phy_device *phydev, u16 mode, int fibre_copper_auto) { - int temp; - - temp = phy_read(phydev, MII_M1111_PHY_EXT_SR); - if (temp < 0) - return temp; - - temp &= ~(MII_M1111_HWCFG_MODE_MASK | - MII_M1111_HWCFG_FIBER_COPPER_AUTO | - MII_M1111_HWCFG_FIBER_COPPER_RES); - temp |= mode; - if (fibre_copper_auto) - temp |= MII_M1111_HWCFG_FIBER_COPPER_AUTO; + mode |= MII_M1111_HWCFG_FIBER_COPPER_AUTO; - return phy_write(phydev, MII_M1111_PHY_EXT_SR, temp); + return phy_modify(phydev, MII_M1111_PHY_EXT_SR, + MII_M1111_HWCFG_MODE_MASK | + MII_M1111_HWCFG_FIBER_COPPER_AUTO | + MII_M1111_HWCFG_FIBER_COPPER_RES, + mode); } static int m88e1111_config_init_rgmii_delays(struct phy_device *phydev) { - int temp; - - temp = phy_read(phydev, MII_M1111_PHY_EXT_CR); - if (temp < 0) - return temp; + int delay; if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) { - temp |= (MII_M1111_RGMII_RX_DELAY | MII_M1111_RGMII_TX_DELAY); + delay = MII_M1111_RGMII_RX_DELAY | MII_M1111_RGMII_TX_DELAY; } else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) { - temp &= ~MII_M1111_RGMII_TX_DELAY; - temp |= MII_M1111_RGMII_RX_DELAY; + delay = MII_M1111_RGMII_RX_DELAY; } else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) { - temp &= ~MII_M1111_RGMII_RX_DELAY; - temp |= MII_M1111_RGMII_TX_DELAY; + delay = MII_M1111_RGMII_TX_DELAY; + } else { + delay = 0; } - return phy_write(phydev, MII_M1111_PHY_EXT_CR, temp); + return phy_modify(phydev, MII_M1111_PHY_EXT_CR, + MII_M1111_RGMII_RX_DELAY | MII_M1111_RGMII_TX_DELAY, + delay); } static int m88e1111_config_init_rgmii(struct phy_device *phydev) @@ -802,7 +763,7 @@ static int m88e1111_config_init_rtbi(struct phy_device *phydev) int err; err = m88e1111_config_init_rgmii_delays(phydev); - if (err) + if (err < 0) return err; err = m88e1111_config_init_hwcfg_mode( @@ -829,7 +790,7 @@ static int m88e1111_config_init(struct phy_device *phydev) if (phy_interface_is_rgmii(phydev)) { err = m88e1111_config_init_rgmii(phydev); - if (err) + if (err < 0) return err; } @@ -854,20 +815,15 @@ static int m88e1111_config_init(struct phy_device *phydev) static int m88e1121_config_init(struct phy_device *phydev) { - int err, oldpage; - - oldpage = marvell_get_set_page(phydev, MII_MARVELL_LED_PAGE); - if (oldpage < 0) - return oldpage; + int err; /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */ - err = phy_write(phydev, MII_88E1121_PHY_LED_CTRL, - MII_88E1121_PHY_LED_DEF); + err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE, + MII_88E1121_PHY_LED_CTRL, + MII_88E1121_PHY_LED_DEF); if (err < 0) return err; - marvell_set_page(phydev, oldpage); - /* Set marvell,reg-init configuration from device tree */ return marvell_config_init(phydev); } @@ -875,7 +831,6 @@ static int m88e1121_config_init(struct phy_device *phydev) static int m88e1510_config_init(struct phy_device *phydev) { int err; - int temp; /* SGMII-to-Copper mode initialization */ if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { @@ -887,16 +842,15 @@ static int m88e1510_config_init(struct phy_device *phydev) return err; /* In reg 20, write MODE[2:0] = 0x1 (SGMII to Copper) */ - temp = phy_read(phydev, MII_88E1510_GEN_CTRL_REG_1); - temp &= ~MII_88E1510_GEN_CTRL_REG_1_MODE_MASK; - temp |= MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII; - err = phy_write(phydev, MII_88E1510_GEN_CTRL_REG_1, temp); + err = phy_modify(phydev, MII_88E1510_GEN_CTRL_REG_1, + MII_88E1510_GEN_CTRL_REG_1_MODE_MASK, + MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII); if (err < 0) return err; /* PHY reset is necessary after changing MODE[2:0] */ - temp |= MII_88E1510_GEN_CTRL_REG_1_RESET; - err = phy_write(phydev, MII_88E1510_GEN_CTRL_REG_1, temp); + err = phy_modify(phydev, MII_88E1510_GEN_CTRL_REG_1, 0, + MII_88E1510_GEN_CTRL_REG_1_RESET); if (err < 0) return err; @@ -1002,7 +956,6 @@ static int m88e1149_config_init(struct phy_device *phydev) static int m88e1145_config_init_rgmii(struct phy_device *phydev) { - int temp; int err; err = m88e1111_config_init_rgmii_delays(phydev); @@ -1014,15 +967,9 @@ static int m88e1145_config_init_rgmii(struct phy_device *phydev) if (err < 0) return err; - temp = phy_read(phydev, 0x1e); - if (temp < 0) - return temp; - - temp &= 0xf03f; - temp |= 2 << 9; /* 36 ohm */ - temp |= 2 << 6; /* 39 ohm */ - - err = phy_write(phydev, 0x1e, temp); + err = phy_modify(phydev, 0x1e, 0x0fc0, + 2 << 9 | /* 36 ohm */ + 2 << 6); /* 39 ohm */ if (err < 0) return err; @@ -1398,100 +1345,98 @@ static int m88e1121_did_interrupt(struct phy_device *phydev) static void m88e1318_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) { + int oldpage, ret = 0; + wol->supported = WAKE_MAGIC; wol->wolopts = 0; - if (marvell_set_page(phydev, MII_MARVELL_WOL_PAGE) < 0) - return; + oldpage = phy_select_page(phydev, MII_MARVELL_WOL_PAGE); + if (oldpage < 0) + goto error; - if (phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL) & - MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE) + ret = __phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL); + if (ret & MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE) wol->wolopts |= WAKE_MAGIC; - if (marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE) < 0) - return; +error: + phy_restore_page(phydev, oldpage, ret); } static int m88e1318_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) { - int err, oldpage, temp; + int err = 0, oldpage; - oldpage = marvell_get_page(phydev); + oldpage = phy_save_page(phydev); + if (oldpage < 0) + goto error; if (wol->wolopts & WAKE_MAGIC) { /* Explicitly switch to page 0x00, just to be sure */ - err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE); + err = marvell_write_page(phydev, MII_MARVELL_COPPER_PAGE); if (err < 0) - return err; + goto error; /* Enable the WOL interrupt */ - temp = phy_read(phydev, MII_88E1318S_PHY_CSIER); - temp |= MII_88E1318S_PHY_CSIER_WOL_EIE; - err = phy_write(phydev, MII_88E1318S_PHY_CSIER, temp); + err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0, + MII_88E1318S_PHY_CSIER_WOL_EIE); if (err < 0) - return err; + goto error; - err = marvell_set_page(phydev, MII_MARVELL_LED_PAGE); + err = marvell_write_page(phydev, MII_MARVELL_LED_PAGE); if (err < 0) - return err; + goto error; /* Setup LED[2] as interrupt pin (active low) */ - temp = phy_read(phydev, MII_88E1318S_PHY_LED_TCR); - temp &= ~MII_88E1318S_PHY_LED_TCR_FORCE_INT; - temp |= MII_88E1318S_PHY_LED_TCR_INTn_ENABLE; - temp |= MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW; - err = phy_write(phydev, MII_88E1318S_PHY_LED_TCR, temp); + err = __phy_modify(phydev, MII_88E1318S_PHY_LED_TCR, + MII_88E1318S_PHY_LED_TCR_FORCE_INT, + MII_88E1318S_PHY_LED_TCR_INTn_ENABLE | + MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW); if (err < 0) - return err; + goto error; - err = marvell_set_page(phydev, MII_MARVELL_WOL_PAGE); + err = marvell_write_page(phydev, MII_MARVELL_WOL_PAGE); if (err < 0) - return err; + goto error; /* Store the device address for the magic packet */ - err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD2, + err = __phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD2, ((phydev->attached_dev->dev_addr[5] << 8) | phydev->attached_dev->dev_addr[4])); if (err < 0) - return err; - err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD1, + goto error; + err = __phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD1, ((phydev->attached_dev->dev_addr[3] << 8) | phydev->attached_dev->dev_addr[2])); if (err < 0) - return err; - err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD0, + goto error; + err = __phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD0, ((phydev->attached_dev->dev_addr[1] << 8) | phydev->attached_dev->dev_addr[0])); if (err < 0) - return err; + goto error; /* Clear WOL status and enable magic packet matching */ - temp = phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL); - temp |= MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS; - temp |= MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE; - err = phy_write(phydev, MII_88E1318S_PHY_WOL_CTRL, temp); + err = __phy_modify(phydev, MII_88E1318S_PHY_WOL_CTRL, 0, + MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS | + MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE); if (err < 0) - return err; + goto error; } else { - err = marvell_set_page(phydev, MII_MARVELL_WOL_PAGE); + err = marvell_write_page(phydev, MII_MARVELL_WOL_PAGE); if (err < 0) - return err; + goto error; /* Clear WOL status and disable magic packet matching */ - temp = phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL); - temp |= MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS; - temp &= ~MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE; - err = phy_write(phydev, MII_88E1318S_PHY_WOL_CTRL, temp); + err = __phy_modify(phydev, MII_88E1318S_PHY_WOL_CTRL, + MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE, + MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS); if (err < 0) - return err; + goto error; } - err = marvell_set_page(phydev, oldpage); - if (err < 0) - return err; - - return 0; +error: + return phy_restore_page(phydev, oldpage, err); } static int marvell_get_sset_count(struct phy_device *phydev) @@ -1519,14 +1464,10 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i) { struct marvell_hw_stat stat = marvell_hw_stats[i]; struct marvell_priv *priv = phydev->priv; - int oldpage, val; + int val; u64 ret; - oldpage = marvell_get_set_page(phydev, stat.page); - if (oldpage < 0) - return UINT64_MAX; - - val = phy_read(phydev, stat.reg); + val = phy_read_paged(phydev, stat.page, stat.reg); if (val < 0) { ret = UINT64_MAX; } else { @@ -1535,8 +1476,6 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i) ret = priv->stats[i]; } - marvell_set_page(phydev, oldpage); - return ret; } @@ -1553,51 +1492,44 @@ static void marvell_get_stats(struct phy_device *phydev, static int m88e1121_get_temp(struct phy_device *phydev, long *temp) { int oldpage; - int ret; + int ret = 0; int val; *temp = 0; - mutex_lock(&phydev->lock); - - oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE); - if (oldpage < 0) { - mutex_unlock(&phydev->lock); - return oldpage; - } + oldpage = phy_select_page(phydev, MII_MARVELL_MISC_TEST_PAGE); + if (oldpage < 0) + goto error; /* Enable temperature sensor */ - ret = phy_read(phydev, MII_88E1121_MISC_TEST); + ret = __phy_read(phydev, MII_88E1121_MISC_TEST); if (ret < 0) goto error; - ret = phy_write(phydev, MII_88E1121_MISC_TEST, - ret | MII_88E1121_MISC_TEST_TEMP_SENSOR_EN); + ret = __phy_write(phydev, MII_88E1121_MISC_TEST, + ret | MII_88E1121_MISC_TEST_TEMP_SENSOR_EN); if (ret < 0) goto error; /* Wait for temperature to stabilize */ usleep_range(10000, 12000); - val = phy_read(phydev, MII_88E1121_MISC_TEST); + val = __phy_read(phydev, MII_88E1121_MISC_TEST); if (val < 0) { ret = val; goto error; } /* Disable temperature sensor */ - ret = phy_write(phydev, MII_88E1121_MISC_TEST, - ret & ~MII_88E1121_MISC_TEST_TEMP_SENSOR_EN); + ret = __phy_write(phydev, MII_88E1121_MISC_TEST, + ret & ~MII_88E1121_MISC_TEST_TEMP_SENSOR_EN); if (ret < 0) goto error; *temp = ((val & MII_88E1121_MISC_TEST_TEMP_MASK) - 5) * 5000; error: - marvell_set_page(phydev, oldpage); - mutex_unlock(&phydev->lock); - - return ret; + return phy_restore_page(phydev, oldpage, ret); } static int m88e1121_hwmon_read(struct device *dev, @@ -1671,118 +1603,64 @@ static const struct hwmon_chip_info m88e1121_hwmon_chip_info = { static int m88e1510_get_temp(struct phy_device *phydev, long *temp) { - int oldpage; int ret; *temp = 0; - mutex_lock(&phydev->lock); - - oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE); - if (oldpage < 0) { - mutex_unlock(&phydev->lock); - return oldpage; - } - - ret = phy_read(phydev, MII_88E1510_TEMP_SENSOR); + ret = phy_read_paged(phydev, MII_MARVELL_MISC_TEST_PAGE, + MII_88E1510_TEMP_SENSOR); if (ret < 0) - goto error; + return ret; *temp = ((ret & MII_88E1510_TEMP_SENSOR_MASK) - 25) * 1000; -error: - marvell_set_page(phydev, oldpage); - mutex_unlock(&phydev->lock); - - return ret; + return 0; } static int m88e1510_get_temp_critical(struct phy_device *phydev, long *temp) { - int oldpage; int ret; *temp = 0; - mutex_lock(&phydev->lock); - - oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE); - if (oldpage < 0) { - mutex_unlock(&phydev->lock); - return oldpage; - } - - ret = phy_read(phydev, MII_88E1121_MISC_TEST); + ret = phy_read_paged(phydev, MII_MARVELL_MISC_TEST_PAGE, + MII_88E1121_MISC_TEST); if (ret < 0) - goto error; + return ret; *temp = (((ret & MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK) >> MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT) * 5) - 25; /* convert to mC */ *temp *= 1000; -error: - marvell_set_page(phydev, oldpage); - mutex_unlock(&phydev->lock); - - return ret; + return 0; } static int m88e1510_set_temp_critical(struct phy_device *phydev, long temp) { - int oldpage; - int ret; - - mutex_lock(&phydev->lock); - - oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE); - if (oldpage < 0) { - mutex_unlock(&phydev->lock); - return oldpage; - } - - ret = phy_read(phydev, MII_88E1121_MISC_TEST); - if (ret < 0) - goto error; - temp = temp / 1000; temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f); - ret = phy_write(phydev, MII_88E1121_MISC_TEST, - (ret & ~MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK) | - (temp << MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT)); - -error: - marvell_set_page(phydev, oldpage); - mutex_unlock(&phydev->lock); - return ret; + return phy_modify_paged(phydev, MII_MARVELL_MISC_TEST_PAGE, + MII_88E1121_MISC_TEST, + MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK, + temp << MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT); } static int m88e1510_get_temp_alarm(struct phy_device *phydev, long *alarm) { - int oldpage; int ret; *alarm = false; - mutex_lock(&phydev->lock); - - oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE); - if (oldpage < 0) { - mutex_unlock(&phydev->lock); - return oldpage; - } - - ret = phy_read(phydev, MII_88E1121_MISC_TEST); + ret = phy_read_paged(phydev, MII_MARVELL_MISC_TEST_PAGE, + MII_88E1121_MISC_TEST); if (ret < 0) - goto error; - *alarm = !!(ret & MII_88E1510_MISC_TEST_TEMP_IRQ); + return ret; -error: - marvell_set_page(phydev, oldpage); - mutex_unlock(&phydev->lock); + *alarm = !!(ret & MII_88E1510_MISC_TEST_TEMP_IRQ); - return ret; + return 0; } static int m88e1510_hwmon_read(struct device *dev, @@ -1871,6 +1749,123 @@ static const struct hwmon_chip_info m88e1510_hwmon_chip_info = { .info = m88e1510_hwmon_info, }; +static int m88e6390_get_temp(struct phy_device *phydev, long *temp) +{ + int sum = 0; + int oldpage; + int ret = 0; + int i; + + *temp = 0; + + oldpage = phy_select_page(phydev, MII_MARVELL_MISC_TEST_PAGE); + if (oldpage < 0) + goto error; + + /* Enable temperature sensor */ + ret = __phy_read(phydev, MII_88E6390_MISC_TEST); + if (ret < 0) + goto error; + + ret = ret & ~MII_88E6390_MISC_TEST_SAMPLE_MASK; + ret |= MII_88E6390_MISC_TEST_SAMPLE_ENABLE | + MII_88E6390_MISC_TEST_SAMPLE_1S; + + ret = __phy_write(phydev, MII_88E6390_MISC_TEST, ret); + if (ret < 0) + goto error; + + /* Wait for temperature to stabilize */ + usleep_range(10000, 12000); + + /* Reading the temperature sense has an errata. You need to read + * a number of times and take an average. + */ + for (i = 0; i < MII_88E6390_TEMP_SENSOR_SAMPLES; i++) { + ret = __phy_read(phydev, MII_88E6390_TEMP_SENSOR); + if (ret < 0) + goto error; + sum += ret & MII_88E6390_TEMP_SENSOR_MASK; + } + + sum /= MII_88E6390_TEMP_SENSOR_SAMPLES; + *temp = (sum - 75) * 1000; + + /* Disable temperature sensor */ + ret = __phy_read(phydev, MII_88E6390_MISC_TEST); + if (ret < 0) + goto error; + + ret = ret & ~MII_88E6390_MISC_TEST_SAMPLE_MASK; + ret |= MII_88E6390_MISC_TEST_SAMPLE_DISABLE; + + ret = __phy_write(phydev, MII_88E6390_MISC_TEST, ret); + +error: + phy_restore_page(phydev, oldpage, ret); + + return ret; +} + +static int m88e6390_hwmon_read(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, long *temp) +{ + struct phy_device *phydev = dev_get_drvdata(dev); + int err; + + switch (attr) { + case hwmon_temp_input: + err = m88e6390_get_temp(phydev, temp); + break; + default: + return -EOPNOTSUPP; + } + + return err; +} + +static umode_t m88e6390_hwmon_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + if (type != hwmon_temp) + return 0; + + switch (attr) { + case hwmon_temp_input: + return 0444; + default: + return 0; + } +} + +static u32 m88e6390_hwmon_temp_config[] = { + HWMON_T_INPUT, + 0 +}; + +static const struct hwmon_channel_info m88e6390_hwmon_temp = { + .type = hwmon_temp, + .config = m88e6390_hwmon_temp_config, +}; + +static const struct hwmon_channel_info *m88e6390_hwmon_info[] = { + &m88e1121_hwmon_chip, + &m88e6390_hwmon_temp, + NULL +}; + +static const struct hwmon_ops m88e6390_hwmon_hwmon_ops = { + .is_visible = m88e6390_hwmon_is_visible, + .read = m88e6390_hwmon_read, +}; + +static const struct hwmon_chip_info m88e6390_hwmon_chip_info = { + .ops = &m88e6390_hwmon_hwmon_ops, + .info = m88e6390_hwmon_info, +}; + static int marvell_hwmon_name(struct phy_device *phydev) { struct marvell_priv *priv = phydev->priv; @@ -1917,6 +1912,11 @@ static int m88e1510_hwmon_probe(struct phy_device *phydev) { return marvell_hwmon_probe(phydev, &m88e1510_hwmon_chip_info); } + +static int m88e6390_hwmon_probe(struct phy_device *phydev) +{ + return marvell_hwmon_probe(phydev, &m88e6390_hwmon_chip_info); +} #else static int m88e1121_hwmon_probe(struct phy_device *phydev) { @@ -1927,6 +1927,11 @@ static int m88e1510_hwmon_probe(struct phy_device *phydev) { return 0; } + +static int m88e6390_hwmon_probe(struct phy_device *phydev) +{ + return 0; +} #endif static int marvell_probe(struct phy_device *phydev) @@ -1964,6 +1969,17 @@ static int m88e1510_probe(struct phy_device *phydev) return m88e1510_hwmon_probe(phydev); } +static int m88e6390_probe(struct phy_device *phydev) +{ + int err; + + err = marvell_probe(phydev); + if (err) + return err; + + return m88e6390_hwmon_probe(phydev); +} + static struct phy_driver marvell_drivers[] = { { .phy_id = MARVELL_PHY_ID_88E1101, @@ -1974,11 +1990,12 @@ static struct phy_driver marvell_drivers[] = { .probe = marvell_probe, .config_init = &marvell_config_init, .config_aneg = &m88e1101_config_aneg, - .read_status = &genphy_read_status, .ack_interrupt = &marvell_ack_interrupt, .config_intr = &marvell_config_intr, .resume = &genphy_resume, .suspend = &genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, @@ -1992,11 +2009,12 @@ static struct phy_driver marvell_drivers[] = { .probe = marvell_probe, .config_init = &m88e1111_config_init, .config_aneg = &marvell_config_aneg, - .read_status = &genphy_read_status, .ack_interrupt = &marvell_ack_interrupt, .config_intr = &marvell_config_intr, .resume = &genphy_resume, .suspend = &genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, @@ -2015,6 +2033,8 @@ static struct phy_driver marvell_drivers[] = { .config_intr = &marvell_config_intr, .resume = &genphy_resume, .suspend = &genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, @@ -2028,11 +2048,12 @@ static struct phy_driver marvell_drivers[] = { .probe = marvell_probe, .config_init = &m88e1118_config_init, .config_aneg = &m88e1118_config_aneg, - .read_status = &genphy_read_status, .ack_interrupt = &marvell_ack_interrupt, .config_intr = &marvell_config_intr, .resume = &genphy_resume, .suspend = &genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, @@ -2052,6 +2073,8 @@ static struct phy_driver marvell_drivers[] = { .did_interrupt = &m88e1121_did_interrupt, .resume = &genphy_resume, .suspend = &genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, @@ -2073,6 +2096,8 @@ static struct phy_driver marvell_drivers[] = { .set_wol = &m88e1318_set_wol, .resume = &genphy_resume, .suspend = &genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, @@ -2091,6 +2116,8 @@ static struct phy_driver marvell_drivers[] = { .config_intr = &marvell_config_intr, .resume = &genphy_resume, .suspend = &genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, @@ -2104,11 +2131,12 @@ static struct phy_driver marvell_drivers[] = { .probe = marvell_probe, .config_init = &m88e1149_config_init, .config_aneg = &m88e1118_config_aneg, - .read_status = &genphy_read_status, .ack_interrupt = &marvell_ack_interrupt, .config_intr = &marvell_config_intr, .resume = &genphy_resume, .suspend = &genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, @@ -2122,11 +2150,12 @@ static struct phy_driver marvell_drivers[] = { .probe = marvell_probe, .config_init = &m88e1111_config_init, .config_aneg = &marvell_config_aneg, - .read_status = &genphy_read_status, .ack_interrupt = &marvell_ack_interrupt, .config_intr = &marvell_config_intr, .resume = &genphy_resume, .suspend = &genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, @@ -2139,12 +2168,12 @@ static struct phy_driver marvell_drivers[] = { .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, .config_init = &m88e1116r_config_init, - .config_aneg = &genphy_config_aneg, - .read_status = &genphy_read_status, .ack_interrupt = &marvell_ack_interrupt, .config_intr = &marvell_config_intr, .resume = &genphy_resume, .suspend = &genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, @@ -2166,6 +2195,8 @@ static struct phy_driver marvell_drivers[] = { .set_wol = &m88e1318_set_wol, .resume = &marvell_resume, .suspend = &marvell_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, @@ -2186,6 +2217,8 @@ static struct phy_driver marvell_drivers[] = { .did_interrupt = &m88e1121_did_interrupt, .resume = &genphy_resume, .suspend = &genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, @@ -2205,6 +2238,8 @@ static struct phy_driver marvell_drivers[] = { .did_interrupt = &m88e1121_did_interrupt, .resume = &genphy_resume, .suspend = &genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, @@ -2216,7 +2251,6 @@ static struct phy_driver marvell_drivers[] = { .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, - .config_aneg = &genphy_config_aneg, .config_init = &m88e3016_config_init, .aneg_done = &marvell_aneg_done, .read_status = &marvell_read_status, @@ -2225,6 +2259,8 @@ static struct phy_driver marvell_drivers[] = { .did_interrupt = &m88e1121_did_interrupt, .resume = &genphy_resume, .suspend = &genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, @@ -2235,7 +2271,7 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E6390", .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, - .probe = m88e1510_probe, + .probe = m88e6390_probe, .config_init = &marvell_config_init, .config_aneg = &m88e1510_config_aneg, .read_status = &marvell_read_status, @@ -2244,6 +2280,8 @@ static struct phy_driver marvell_drivers[] = { .did_interrupt = &m88e1121_did_interrupt, .resume = &genphy_resume, .suspend = &genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index 21b3f36e023a..8a0bd98fdec7 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -6,12 +6,18 @@ * * There appears to be several different data paths through the PHY which * are automatically managed by the PHY. The following has been determined - * via observation and experimentation: + * via observation and experimentation for a setup using single-lane Serdes: * * SGMII PHYXS -- BASE-T PCS -- 10G PMA -- AN -- Copper (for <= 1G) * 10GBASE-KR PHYXS -- BASE-T PCS -- 10G PMA -- AN -- Copper (for 10G) * 10GBASE-KR PHYXS -- BASE-R PCS -- Fiber * + * With XAUI, observation shows: + * + * XAUI PHYXS -- <appropriate PCS as above> + * + * and no switching of the host interface mode occurs. + * * If both the fiber and copper ports are connected, the first to gain * link takes priority and the other port is completely locked out. */ @@ -23,19 +29,17 @@ enum { MV_PCS_BASE_R = 0x1000, MV_PCS_1000BASEX = 0x2000, + MV_PCS_PAIRSWAP = 0x8182, + MV_PCS_PAIRSWAP_MASK = 0x0003, + MV_PCS_PAIRSWAP_AB = 0x0002, + MV_PCS_PAIRSWAP_NONE = 0x0003, + /* These registers appear at 0x800X and 0xa00X - the 0xa00X control * registers appear to set themselves to the 0x800X when AN is * restarted, but status registers appear readable from either. */ MV_AN_CTRL1000 = 0x8000, /* 1000base-T control register */ MV_AN_STAT1000 = 0x8001, /* 1000base-T status register */ - - /* This register appears to reflect the copper status */ - MV_AN_RESULT = 0xa016, - MV_AN_RESULT_SPD_10 = BIT(12), - MV_AN_RESULT_SPD_100 = BIT(13), - MV_AN_RESULT_SPD_1000 = BIT(14), - MV_AN_RESULT_SPD_10000 = BIT(15), }; static int mv3310_modify(struct phy_device *phydev, int devad, u16 reg, @@ -84,7 +88,6 @@ static int mv3310_config_init(struct phy_device *phydev) /* Check that the PHY interface type is compatible */ if (phydev->interface != PHY_INTERFACE_MODE_SGMII && - phydev->interface != PHY_INTERFACE_MODE_XGMII && phydev->interface != PHY_INTERFACE_MODE_XAUI && phydev->interface != PHY_INTERFACE_MODE_RXAUI && phydev->interface != PHY_INTERFACE_MODE_10GKR) @@ -150,12 +153,18 @@ static int mv3310_config_init(struct phy_device *phydev) if (val & MDIO_PMA_EXTABLE_1000BKX) __set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, supported); - if (val & MDIO_PMA_EXTABLE_100BTX) + if (val & MDIO_PMA_EXTABLE_100BTX) { __set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, supported); - if (val & MDIO_PMA_EXTABLE_10BT) + __set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + supported); + } + if (val & MDIO_PMA_EXTABLE_10BT) { __set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported); + __set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, + supported); + } } if (!ethtool_convert_link_mode_to_legacy_u32(&mask, supported)) @@ -175,6 +184,9 @@ static int mv3310_config_aneg(struct phy_device *phydev) u32 advertising; int ret; + /* We don't support manual MDI control */ + phydev->mdix_ctrl = ETH_TP_MDI_AUTO; + if (phydev->autoneg == AUTONEG_DISABLE) { ret = genphy_c45_pma_setup_forced(phydev); if (ret < 0) @@ -233,6 +245,24 @@ static int mv3310_aneg_done(struct phy_device *phydev) return genphy_c45_aneg_done(phydev); } +static void mv3310_update_interface(struct phy_device *phydev) +{ + if ((phydev->interface == PHY_INTERFACE_MODE_SGMII || + phydev->interface == PHY_INTERFACE_MODE_10GKR) && phydev->link) { + /* The PHY automatically switches its serdes interface (and + * active PHYXS instance) between Cisco SGMII and 10GBase-KR + * modes according to the speed. Florian suggests setting + * phydev->interface to communicate this to the MAC. Only do + * this if we are already in either SGMII or 10GBase-KR mode. + */ + if (phydev->speed == SPEED_10000) + phydev->interface = PHY_INTERFACE_MODE_10GKR; + else if (phydev->speed >= SPEED_10 && + phydev->speed < SPEED_10000) + phydev->interface = PHY_INTERFACE_MODE_SGMII; + } +} + /* 10GBASE-ER,LR,LRM,SR do not support autonegotiation. */ static int mv3310_read_10gbr_status(struct phy_device *phydev) { @@ -240,8 +270,7 @@ static int mv3310_read_10gbr_status(struct phy_device *phydev) phydev->speed = SPEED_10000; phydev->duplex = DUPLEX_FULL; - if (phydev->interface == PHY_INTERFACE_MODE_SGMII) - phydev->interface = PHY_INTERFACE_MODE_10GKR; + mv3310_update_interface(phydev); return 0; } @@ -264,6 +293,7 @@ static int mv3310_read_status(struct phy_device *phydev) phydev->link = 0; phydev->pause = 0; phydev->asym_pause = 0; + phydev->mdix = 0; val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_BASE_R + MDIO_STAT1); if (val < 0) @@ -294,22 +324,8 @@ static int mv3310_read_status(struct phy_device *phydev) phydev->lp_advertising |= mii_stat1000_to_ethtool_lpa_t(val); - if (phydev->autoneg == AUTONEG_ENABLE) { - val = phy_read_mmd(phydev, MDIO_MMD_AN, MV_AN_RESULT); - if (val < 0) - return val; - - if (val & MV_AN_RESULT_SPD_10000) - phydev->speed = SPEED_10000; - else if (val & MV_AN_RESULT_SPD_1000) - phydev->speed = SPEED_1000; - else if (val & MV_AN_RESULT_SPD_100) - phydev->speed = SPEED_100; - else if (val & MV_AN_RESULT_SPD_10) - phydev->speed = SPEED_10; - - phydev->duplex = DUPLEX_FULL; - } + if (phydev->autoneg == AUTONEG_ENABLE) + phy_resolve_aneg_linkmode(phydev); } if (phydev->autoneg != AUTONEG_ENABLE) { @@ -318,21 +334,30 @@ static int mv3310_read_status(struct phy_device *phydev) return val; } - if ((phydev->interface == PHY_INTERFACE_MODE_SGMII || - phydev->interface == PHY_INTERFACE_MODE_10GKR) && phydev->link) { - /* The PHY automatically switches its serdes interface (and - * active PHYXS instance) between Cisco SGMII and 10GBase-KR - * modes according to the speed. Florian suggests setting - * phydev->interface to communicate this to the MAC. Only do - * this if we are already in either SGMII or 10GBase-KR mode. - */ - if (phydev->speed == SPEED_10000) - phydev->interface = PHY_INTERFACE_MODE_10GKR; - else if (phydev->speed >= SPEED_10 && - phydev->speed < SPEED_10000) - phydev->interface = PHY_INTERFACE_MODE_SGMII; + if (phydev->speed == SPEED_10000) { + val = genphy_c45_read_mdix(phydev); + if (val < 0) + return val; + } else { + val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_PAIRSWAP); + if (val < 0) + return val; + + switch (val & MV_PCS_PAIRSWAP_MASK) { + case MV_PCS_PAIRSWAP_AB: + phydev->mdix = ETH_TP_MDI_X; + break; + case MV_PCS_PAIRSWAP_NONE: + phydev->mdix = ETH_TP_MDI; + break; + default: + phydev->mdix = ETH_TP_MDI_INVALID; + break; + } } + mv3310_update_interface(phydev); + return 0; } @@ -342,7 +367,9 @@ static struct phy_driver mv3310_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "mv88x3310", .features = SUPPORTED_10baseT_Full | + SUPPORTED_10baseT_Half | SUPPORTED_100baseT_Full | + SUPPORTED_100baseT_Half | SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP | diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c index 08e0647b85e2..8d370667fa1b 100644 --- a/drivers/net/phy/mdio-bcm-unimac.c +++ b/drivers/net/phy/mdio-bcm-unimac.c @@ -205,6 +205,8 @@ static int unimac_mdio_probe(struct platform_device *pdev) return -ENOMEM; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) + return -EINVAL; /* Just ioremap, as this MDIO block is usually integrated into an * Ethernet MAC controller register range diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 54d00a1d2bef..88272b3ac2e2 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -38,6 +38,7 @@ #include <linux/phy.h> #include <linux/io.h> #include <linux/uaccess.h> +#include <linux/gpio/consumer.h> #include <asm/irq.h> @@ -46,11 +47,41 @@ #include "mdio-boardinfo.h" +static int mdiobus_register_gpiod(struct mdio_device *mdiodev) +{ + struct gpio_desc *gpiod = NULL; + + /* Deassert the optional reset signal */ + if (mdiodev->dev.of_node) + gpiod = fwnode_get_named_gpiod(&mdiodev->dev.of_node->fwnode, + "reset-gpios", 0, GPIOD_OUT_LOW, + "PHY reset"); + if (PTR_ERR(gpiod) == -ENOENT) + gpiod = NULL; + else if (IS_ERR(gpiod)) + return PTR_ERR(gpiod); + + mdiodev->reset = gpiod; + + /* Assert the reset signal again */ + mdio_device_reset(mdiodev, 1); + + return 0; +} + int mdiobus_register_device(struct mdio_device *mdiodev) { + int err; + if (mdiodev->bus->mdio_map[mdiodev->addr]) return -EBUSY; + if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY) { + err = mdiobus_register_gpiod(mdiodev); + if (err) + return err; + } + mdiodev->bus->mdio_map[mdiodev->addr] = mdiodev; return 0; @@ -421,6 +452,9 @@ void mdiobus_unregister(struct mii_bus *bus) if (!mdiodev) continue; + if (mdiodev->reset) + gpiod_put(mdiodev->reset); + mdiodev->device_remove(mdiodev); mdiodev->device_free(mdiodev); } @@ -494,6 +528,55 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr) EXPORT_SYMBOL(mdiobus_scan); /** + * __mdiobus_read - Unlocked version of the mdiobus_read function + * @bus: the mii_bus struct + * @addr: the phy address + * @regnum: register number to read + * + * Read a MDIO bus register. Caller must hold the mdio bus lock. + * + * NOTE: MUST NOT be called from interrupt context. + */ +int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum) +{ + int retval; + + WARN_ON_ONCE(!mutex_is_locked(&bus->mdio_lock)); + + retval = bus->read(bus, addr, regnum); + + trace_mdio_access(bus, 1, addr, regnum, retval, retval); + + return retval; +} +EXPORT_SYMBOL(__mdiobus_read); + +/** + * __mdiobus_write - Unlocked version of the mdiobus_write function + * @bus: the mii_bus struct + * @addr: the phy address + * @regnum: register number to write + * @val: value to write to @regnum + * + * Write a MDIO bus register. Caller must hold the mdio bus lock. + * + * NOTE: MUST NOT be called from interrupt context. + */ +int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val) +{ + int err; + + WARN_ON_ONCE(!mutex_is_locked(&bus->mdio_lock)); + + err = bus->write(bus, addr, regnum, val); + + trace_mdio_access(bus, 0, addr, regnum, val, err); + + return err; +} +EXPORT_SYMBOL(__mdiobus_write); + +/** * mdiobus_read_nested - Nested version of the mdiobus_read function * @bus: the mii_bus struct * @addr: the phy address @@ -513,11 +596,9 @@ int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum) BUG_ON(in_interrupt()); mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); - retval = bus->read(bus, addr, regnum); + retval = __mdiobus_read(bus, addr, regnum); mutex_unlock(&bus->mdio_lock); - trace_mdio_access(bus, 1, addr, regnum, retval, retval); - return retval; } EXPORT_SYMBOL(mdiobus_read_nested); @@ -539,11 +620,9 @@ int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum) BUG_ON(in_interrupt()); mutex_lock(&bus->mdio_lock); - retval = bus->read(bus, addr, regnum); + retval = __mdiobus_read(bus, addr, regnum); mutex_unlock(&bus->mdio_lock); - trace_mdio_access(bus, 1, addr, regnum, retval, retval); - return retval; } EXPORT_SYMBOL(mdiobus_read); @@ -569,11 +648,9 @@ int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val) BUG_ON(in_interrupt()); mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); - err = bus->write(bus, addr, regnum, val); + err = __mdiobus_write(bus, addr, regnum, val); mutex_unlock(&bus->mdio_lock); - trace_mdio_access(bus, 0, addr, regnum, val, err); - return err; } EXPORT_SYMBOL(mdiobus_write_nested); @@ -596,11 +673,9 @@ int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val) BUG_ON(in_interrupt()); mutex_lock(&bus->mdio_lock); - err = bus->write(bus, addr, regnum, val); + err = __mdiobus_write(bus, addr, regnum, val); mutex_unlock(&bus->mdio_lock); - trace_mdio_access(bus, 0, addr, regnum, val, err); - return err; } EXPORT_SYMBOL(mdiobus_write); diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c index e24f28924af8..c924700cf37b 100644 --- a/drivers/net/phy/mdio_device.c +++ b/drivers/net/phy/mdio_device.c @@ -12,6 +12,8 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/errno.h> +#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> @@ -22,6 +24,7 @@ #include <linux/slab.h> #include <linux/string.h> #include <linux/unistd.h> +#include <linux/delay.h> void mdio_device_free(struct mdio_device *mdiodev) { @@ -114,6 +117,21 @@ void mdio_device_remove(struct mdio_device *mdiodev) } EXPORT_SYMBOL(mdio_device_remove); +void mdio_device_reset(struct mdio_device *mdiodev, int value) +{ + unsigned int d; + + if (!mdiodev->reset) + return; + + gpiod_set_value(mdiodev->reset, value); + + d = value ? mdiodev->reset_assert_delay : mdiodev->reset_deassert_delay; + if (d) + usleep_range(d, d + max_t(unsigned int, d / 10, 100)); +} +EXPORT_SYMBOL(mdio_device_reset); + /** * mdio_probe - probe an MDIO device * @dev: device to probe @@ -128,8 +146,16 @@ static int mdio_probe(struct device *dev) struct mdio_driver *mdiodrv = to_mdio_driver(drv); int err = 0; - if (mdiodrv->probe) + if (mdiodrv->probe) { + /* Deassert the reset signal */ + mdio_device_reset(mdiodev, 0); + err = mdiodrv->probe(mdiodev); + if (err) { + /* Assert the reset signal */ + mdio_device_reset(mdiodev, 1); + } + } return err; } @@ -140,9 +166,13 @@ static int mdio_remove(struct device *dev) struct device_driver *drv = mdiodev->dev.driver; struct mdio_driver *mdiodrv = to_mdio_driver(drv); - if (mdiodrv->remove) + if (mdiodrv->remove) { mdiodrv->remove(mdiodev); + /* Assert the reset signal */ + mdio_device_reset(mdiodev, 1); + } + return 0; } diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c index 842eb871a6e3..ddc2c5ea3787 100644 --- a/drivers/net/phy/meson-gxl.c +++ b/drivers/net/phy/meson-gxl.c @@ -24,31 +24,129 @@ #include <linux/netdevice.h> #include <linux/bitfield.h> -static int meson_gxl_config_init(struct phy_device *phydev) +#define TSTCNTL 20 +#define TSTCNTL_READ BIT(15) +#define TSTCNTL_WRITE BIT(14) +#define TSTCNTL_REG_BANK_SEL GENMASK(12, 11) +#define TSTCNTL_TEST_MODE BIT(10) +#define TSTCNTL_READ_ADDRESS GENMASK(9, 5) +#define TSTCNTL_WRITE_ADDRESS GENMASK(4, 0) +#define TSTREAD1 21 +#define TSTWRITE 23 +#define INTSRC_FLAG 29 +#define INTSRC_ANEG_PR BIT(1) +#define INTSRC_PARALLEL_FAULT BIT(2) +#define INTSRC_ANEG_LP_ACK BIT(3) +#define INTSRC_LINK_DOWN BIT(4) +#define INTSRC_REMOTE_FAULT BIT(5) +#define INTSRC_ANEG_COMPLETE BIT(6) +#define INTSRC_MASK 30 + +#define BANK_ANALOG_DSP 0 +#define BANK_WOL 1 +#define BANK_BIST 3 + +/* WOL Registers */ +#define LPI_STATUS 0xc +#define LPI_STATUS_RSV12 BIT(12) + +/* BIST Registers */ +#define FR_PLL_CONTROL 0x1b +#define FR_PLL_DIV0 0x1c +#define FR_PLL_DIV1 0x1d + +static int meson_gxl_open_banks(struct phy_device *phydev) { - /* Enable Analog and DSP register Bank access by */ - phy_write(phydev, 0x14, 0x0000); - phy_write(phydev, 0x14, 0x0400); - phy_write(phydev, 0x14, 0x0000); - phy_write(phydev, 0x14, 0x0400); + int ret; + + /* Enable Analog and DSP register Bank access by + * toggling TSTCNTL_TEST_MODE bit in the TSTCNTL register + */ + ret = phy_write(phydev, TSTCNTL, 0); + if (ret) + return ret; + ret = phy_write(phydev, TSTCNTL, TSTCNTL_TEST_MODE); + if (ret) + return ret; + ret = phy_write(phydev, TSTCNTL, 0); + if (ret) + return ret; + return phy_write(phydev, TSTCNTL, TSTCNTL_TEST_MODE); +} + +static void meson_gxl_close_banks(struct phy_device *phydev) +{ + phy_write(phydev, TSTCNTL, 0); +} + +static int meson_gxl_read_reg(struct phy_device *phydev, + unsigned int bank, unsigned int reg) +{ + int ret; + + ret = meson_gxl_open_banks(phydev); + if (ret) + goto out; + + ret = phy_write(phydev, TSTCNTL, TSTCNTL_READ | + FIELD_PREP(TSTCNTL_REG_BANK_SEL, bank) | + TSTCNTL_TEST_MODE | + FIELD_PREP(TSTCNTL_READ_ADDRESS, reg)); + if (ret) + goto out; + + ret = phy_read(phydev, TSTREAD1); +out: + /* Close the bank access on our way out */ + meson_gxl_close_banks(phydev); + return ret; +} + +static int meson_gxl_write_reg(struct phy_device *phydev, + unsigned int bank, unsigned int reg, + uint16_t value) +{ + int ret; + + ret = meson_gxl_open_banks(phydev); + if (ret) + goto out; + + ret = phy_write(phydev, TSTWRITE, value); + if (ret) + goto out; + + ret = phy_write(phydev, TSTCNTL, TSTCNTL_WRITE | + FIELD_PREP(TSTCNTL_REG_BANK_SEL, bank) | + TSTCNTL_TEST_MODE | + FIELD_PREP(TSTCNTL_WRITE_ADDRESS, reg)); - /* Write Analog register 23 */ - phy_write(phydev, 0x17, 0x8E0D); - phy_write(phydev, 0x14, 0x4417); +out: + /* Close the bank access on our way out */ + meson_gxl_close_banks(phydev); + return ret; +} + +static int meson_gxl_config_init(struct phy_device *phydev) +{ + int ret; /* Enable fractional PLL */ - phy_write(phydev, 0x17, 0x0005); - phy_write(phydev, 0x14, 0x5C1B); + ret = meson_gxl_write_reg(phydev, BANK_BIST, FR_PLL_CONTROL, 0x5); + if (ret) + return ret; /* Program fraction FR_PLL_DIV1 */ - phy_write(phydev, 0x17, 0x029A); - phy_write(phydev, 0x14, 0x5C1D); + ret = meson_gxl_write_reg(phydev, BANK_BIST, FR_PLL_DIV1, 0x029a); + if (ret) + return ret; /* Program fraction FR_PLL_DIV1 */ - phy_write(phydev, 0x17, 0xAAAA); - phy_write(phydev, 0x14, 0x5C1C); + ret = meson_gxl_write_reg(phydev, BANK_BIST, FR_PLL_DIV0, 0xaaaa); + if (ret) + return ret; - return 0; + return genphy_config_init(phydev); } /* This function is provided to cope with the possible failures of this phy @@ -78,27 +176,8 @@ static int meson_gxl_read_status(struct phy_device *phydev) else if (!ret) goto read_status_continue; - /* Need to access WOL bank, make sure the access is open */ - ret = phy_write(phydev, 0x14, 0x0000); - if (ret) - return ret; - ret = phy_write(phydev, 0x14, 0x0400); - if (ret) - return ret; - ret = phy_write(phydev, 0x14, 0x0000); - if (ret) - return ret; - ret = phy_write(phydev, 0x14, 0x0400); - if (ret) - return ret; - - /* Request LPI_STATUS WOL register */ - ret = phy_write(phydev, 0x14, 0x8D80); - if (ret) - return ret; - - /* Read LPI_STATUS value */ - wol = phy_read(phydev, 0x15); + /* Aneg is done, let's check everything is fine */ + wol = meson_gxl_read_reg(phydev, BANK_WOL, LPI_STATUS); if (wol < 0) return wol; @@ -110,7 +189,7 @@ static int meson_gxl_read_status(struct phy_device *phydev) if (exp < 0) return exp; - if (!(wol & BIT(12)) || + if (!(wol & LPI_STATUS_RSV12) || ((exp & EXPANSION_NWAY) && !(lpa & LPA_LPACK))) { /* Looks like aneg failed after all */ phydev_dbg(phydev, "LPA corruption - aneg restart\n"); @@ -122,17 +201,43 @@ read_status_continue: return genphy_read_status(phydev); } +static int meson_gxl_ack_interrupt(struct phy_device *phydev) +{ + int ret = phy_read(phydev, INTSRC_FLAG); + + return ret < 0 ? ret : 0; +} + +static int meson_gxl_config_intr(struct phy_device *phydev) +{ + u16 val; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + val = INTSRC_ANEG_PR + | INTSRC_PARALLEL_FAULT + | INTSRC_ANEG_LP_ACK + | INTSRC_LINK_DOWN + | INTSRC_REMOTE_FAULT + | INTSRC_ANEG_COMPLETE; + } else { + val = 0; + } + + return phy_write(phydev, INTSRC_MASK, val); +} + static struct phy_driver meson_gxl_phy[] = { { .phy_id = 0x01814400, .phy_id_mask = 0xfffffff0, .name = "Meson GXL Internal PHY", .features = PHY_BASIC_FEATURES, - .flags = PHY_IS_INTERNAL, + .flags = PHY_IS_INTERNAL | PHY_HAS_INTERRUPT, .config_init = meson_gxl_config_init, - .config_aneg = genphy_config_aneg, .aneg_done = genphy_aneg_done, .read_status = meson_gxl_read_status, + .ack_interrupt = meson_gxl_ack_interrupt, + .config_intr = meson_gxl_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, }, @@ -150,4 +255,5 @@ MODULE_DEVICE_TABLE(mdio, meson_gxl_tbl); MODULE_DESCRIPTION("Amlogic Meson GXL Internal PHY driver"); MODULE_AUTHOR("Baoqi wang"); MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>"); +MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 422ff6333c52..0f45310300f6 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -802,8 +802,6 @@ static struct phy_driver ksphy_driver[] = { .flags = PHY_HAS_INTERRUPT, .driver_data = &ks8737_type, .config_init = kszphy_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, .suspend = genphy_suspend, @@ -817,8 +815,6 @@ static struct phy_driver ksphy_driver[] = { .driver_data = &ksz8021_type, .probe = kszphy_probe, .config_init = kszphy_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, .get_sset_count = kszphy_get_sset_count, @@ -835,8 +831,6 @@ static struct phy_driver ksphy_driver[] = { .driver_data = &ksz8021_type, .probe = kszphy_probe, .config_init = kszphy_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, .get_sset_count = kszphy_get_sset_count, @@ -854,7 +848,6 @@ static struct phy_driver ksphy_driver[] = { .probe = kszphy_probe, .config_init = ksz8041_config_init, .config_aneg = ksz8041_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, .get_sset_count = kszphy_get_sset_count, @@ -871,8 +864,6 @@ static struct phy_driver ksphy_driver[] = { .driver_data = &ksz8041_type, .probe = kszphy_probe, .config_init = kszphy_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, .get_sset_count = kszphy_get_sset_count, @@ -889,8 +880,6 @@ static struct phy_driver ksphy_driver[] = { .driver_data = &ksz8051_type, .probe = kszphy_probe, .config_init = kszphy_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, .get_sset_count = kszphy_get_sset_count, @@ -907,8 +896,6 @@ static struct phy_driver ksphy_driver[] = { .driver_data = &ksz8041_type, .probe = kszphy_probe, .config_init = kszphy_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, .get_sset_count = kszphy_get_sset_count, @@ -925,8 +912,6 @@ static struct phy_driver ksphy_driver[] = { .driver_data = &ksz8081_type, .probe = kszphy_probe, .config_init = kszphy_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, .get_sset_count = kszphy_get_sset_count, @@ -941,8 +926,6 @@ static struct phy_driver ksphy_driver[] = { .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = kszphy_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, .suspend = genphy_suspend, @@ -956,8 +939,6 @@ static struct phy_driver ksphy_driver[] = { .driver_data = &ksz9021_type, .probe = kszphy_probe, .config_init = ksz9021_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, .get_sset_count = kszphy_get_sset_count, @@ -976,7 +957,6 @@ static struct phy_driver ksphy_driver[] = { .driver_data = &ksz9021_type, .probe = kszphy_probe, .config_init = ksz9031_config_init, - .config_aneg = genphy_config_aneg, .read_status = ksz9031_read_status, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, @@ -1001,8 +981,6 @@ static struct phy_driver ksphy_driver[] = { .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = kszphy_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .suspend = genphy_suspend, .resume = genphy_resume, }, { @@ -1022,8 +1000,6 @@ static struct phy_driver ksphy_driver[] = { .name = "Microchip KSZ9477", .features = PHY_GBIT_FEATURES, .config_init = kszphy_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .suspend = genphy_suspend, .resume = genphy_resume, } }; diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c index 37ee856c7680..0f293ef28935 100644 --- a/drivers/net/phy/microchip.c +++ b/drivers/net/phy/microchip.c @@ -153,7 +153,6 @@ static struct phy_driver microchip_phy_driver[] = { .config_init = genphy_config_init, .config_aneg = lan88xx_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = lan88xx_phy_ack_interrupt, .config_intr = lan88xx_phy_config_intr, diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c index 2addf1d3f619..2b1e336961f9 100644 --- a/drivers/net/phy/national.c +++ b/drivers/net/phy/national.c @@ -136,8 +136,6 @@ static struct phy_driver dp83865_driver[] = { { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = ns_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = ns_ack_interrupt, .config_intr = ns_config_intr, } }; diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index dada819c6b78..a4576859afae 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -233,6 +233,39 @@ int genphy_c45_read_pma(struct phy_device *phydev) } EXPORT_SYMBOL_GPL(genphy_c45_read_pma); +/** + * genphy_c45_read_mdix - read mdix status from PMA + * @phydev: target phy_device struct + */ +int genphy_c45_read_mdix(struct phy_device *phydev) +{ + int val; + + if (phydev->speed == SPEED_10000) { + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, + MDIO_PMA_10GBT_SWAPPOL); + if (val < 0) + return val; + + switch (val) { + case MDIO_PMA_10GBT_SWAPPOL_ABNX | MDIO_PMA_10GBT_SWAPPOL_CDNX: + phydev->mdix = ETH_TP_MDI; + break; + + case 0: + phydev->mdix = ETH_TP_MDI_X; + break; + + default: + phydev->mdix = ETH_TP_MDI_INVALID; + break; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(genphy_c45_read_mdix); + /* The gen10g_* functions are the old Clause 45 stub */ static int gen10g_config_aneg(struct phy_device *phydev) diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index 21f75ae244b3..4083f00c97a5 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -189,17 +189,61 @@ size_t phy_speeds(unsigned int *speeds, size_t size, return count; } +/** + * phy_resolve_aneg_linkmode - resolve the advertisments into phy settings + * @phydev: The phy_device struct + * + * Resolve our and the link partner advertisments into their corresponding + * speed and duplex. If full duplex was negotiated, extract the pause mode + * from the link partner mask. + */ +void phy_resolve_aneg_linkmode(struct phy_device *phydev) +{ + u32 common = phydev->lp_advertising & phydev->advertising; + + if (common & ADVERTISED_10000baseT_Full) { + phydev->speed = SPEED_10000; + phydev->duplex = DUPLEX_FULL; + } else if (common & ADVERTISED_1000baseT_Full) { + phydev->speed = SPEED_1000; + phydev->duplex = DUPLEX_FULL; + } else if (common & ADVERTISED_1000baseT_Half) { + phydev->speed = SPEED_1000; + phydev->duplex = DUPLEX_HALF; + } else if (common & ADVERTISED_100baseT_Full) { + phydev->speed = SPEED_100; + phydev->duplex = DUPLEX_FULL; + } else if (common & ADVERTISED_100baseT_Half) { + phydev->speed = SPEED_100; + phydev->duplex = DUPLEX_HALF; + } else if (common & ADVERTISED_10baseT_Full) { + phydev->speed = SPEED_10; + phydev->duplex = DUPLEX_FULL; + } else if (common & ADVERTISED_10baseT_Half) { + phydev->speed = SPEED_10; + phydev->duplex = DUPLEX_HALF; + } + + if (phydev->duplex == DUPLEX_FULL) { + phydev->pause = !!(phydev->lp_advertising & ADVERTISED_Pause); + phydev->asym_pause = !!(phydev->lp_advertising & + ADVERTISED_Asym_Pause); + } +} +EXPORT_SYMBOL_GPL(phy_resolve_aneg_linkmode); + static void mmd_phy_indirect(struct mii_bus *bus, int phy_addr, int devad, u16 regnum) { /* Write the desired MMD Devad */ - bus->write(bus, phy_addr, MII_MMD_CTRL, devad); + __mdiobus_write(bus, phy_addr, MII_MMD_CTRL, devad); /* Write the desired MMD register address */ - bus->write(bus, phy_addr, MII_MMD_DATA, regnum); + __mdiobus_write(bus, phy_addr, MII_MMD_DATA, regnum); /* Select the Function : DATA with no post increment */ - bus->write(bus, phy_addr, MII_MMD_CTRL, devad | MII_MMD_CTRL_NOINCR); + __mdiobus_write(bus, phy_addr, MII_MMD_CTRL, + devad | MII_MMD_CTRL_NOINCR); } /** @@ -232,7 +276,7 @@ int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum) mmd_phy_indirect(bus, phy_addr, devad, regnum); /* Read the content of the MMD's selected register */ - val = bus->read(bus, phy_addr, MII_MMD_DATA); + val = __mdiobus_read(bus, phy_addr, MII_MMD_DATA); mutex_unlock(&bus->mdio_lock); } return val; @@ -271,7 +315,7 @@ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val) mmd_phy_indirect(bus, phy_addr, devad, regnum); /* Write the data into MMD's selected register */ - bus->write(bus, phy_addr, MII_MMD_DATA, val); + __mdiobus_write(bus, phy_addr, MII_MMD_DATA, val); mutex_unlock(&bus->mdio_lock); ret = 0; @@ -279,3 +323,207 @@ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val) return ret; } EXPORT_SYMBOL(phy_write_mmd); + +/** + * __phy_modify() - Convenience function for modifying a PHY register + * @phydev: a pointer to a &struct phy_device + * @regnum: register number + * @mask: bit mask of bits to clear + * @set: bit mask of bits to set + * + * Unlocked helper function which allows a PHY register to be modified as + * new register value = (old register value & ~mask) | set + */ +int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set) +{ + int ret; + + ret = __phy_read(phydev, regnum); + if (ret < 0) + return ret; + + ret = __phy_write(phydev, regnum, (ret & ~mask) | set); + + return ret < 0 ? ret : 0; +} +EXPORT_SYMBOL_GPL(__phy_modify); + +/** + * phy_modify - Convenience function for modifying a given PHY register + * @phydev: the phy_device struct + * @regnum: register number to write + * @mask: bit mask of bits to clear + * @set: new value of bits set in mask to write to @regnum + * + * NOTE: MUST NOT be called from interrupt context, + * because the bus read/write functions may wait for an interrupt + * to conclude the operation. + */ +int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set) +{ + int ret; + + mutex_lock(&phydev->mdio.bus->mdio_lock); + ret = __phy_modify(phydev, regnum, mask, set); + mutex_unlock(&phydev->mdio.bus->mdio_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(phy_modify); + +static int __phy_read_page(struct phy_device *phydev) +{ + return phydev->drv->read_page(phydev); +} + +static int __phy_write_page(struct phy_device *phydev, int page) +{ + return phydev->drv->write_page(phydev, page); +} + +/** + * phy_save_page() - take the bus lock and save the current page + * @phydev: a pointer to a &struct phy_device + * + * Take the MDIO bus lock, and return the current page number. On error, + * returns a negative errno. phy_restore_page() must always be called + * after this, irrespective of success or failure of this call. + */ +int phy_save_page(struct phy_device *phydev) +{ + mutex_lock(&phydev->mdio.bus->mdio_lock); + return __phy_read_page(phydev); +} +EXPORT_SYMBOL_GPL(phy_save_page); + +/** + * phy_select_page() - take the bus lock, save the current page, and set a page + * @phydev: a pointer to a &struct phy_device + * @page: desired page + * + * Take the MDIO bus lock to protect against concurrent access, save the + * current PHY page, and set the current page. On error, returns a + * negative errno, otherwise returns the previous page number. + * phy_restore_page() must always be called after this, irrespective + * of success or failure of this call. + */ +int phy_select_page(struct phy_device *phydev, int page) +{ + int ret, oldpage; + + oldpage = ret = phy_save_page(phydev); + if (ret < 0) + return ret; + + if (oldpage != page) { + ret = __phy_write_page(phydev, page); + if (ret < 0) + return ret; + } + + return oldpage; +} +EXPORT_SYMBOL_GPL(phy_select_page); + +/** + * phy_restore_page() - restore the page register and release the bus lock + * @phydev: a pointer to a &struct phy_device + * @oldpage: the old page, return value from phy_save_page() or phy_select_page() + * @ret: operation's return code + * + * Release the MDIO bus lock, restoring @oldpage if it is a valid page. + * This function propagates the earliest error code from the group of + * operations. + * + * Returns: + * @oldpage if it was a negative value, otherwise + * @ret if it was a negative errno value, otherwise + * phy_write_page()'s negative value if it were in error, otherwise + * @ret. + */ +int phy_restore_page(struct phy_device *phydev, int oldpage, int ret) +{ + int r; + + if (oldpage >= 0) { + r = __phy_write_page(phydev, oldpage); + + /* Propagate the operation return code if the page write + * was successful. + */ + if (ret >= 0 && r < 0) + ret = r; + } else { + /* Propagate the phy page selection error code */ + ret = oldpage; + } + + mutex_unlock(&phydev->mdio.bus->mdio_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(phy_restore_page); + +/** + * phy_read_paged() - Convenience function for reading a paged register + * @phydev: a pointer to a &struct phy_device + * @page: the page for the phy + * @regnum: register number + * + * Same rules as for phy_read(). + */ +int phy_read_paged(struct phy_device *phydev, int page, u32 regnum) +{ + int ret = 0, oldpage; + + oldpage = phy_select_page(phydev, page); + if (oldpage >= 0) + ret = __phy_read(phydev, regnum); + + return phy_restore_page(phydev, oldpage, ret); +} +EXPORT_SYMBOL(phy_read_paged); + +/** + * phy_write_paged() - Convenience function for writing a paged register + * @phydev: a pointer to a &struct phy_device + * @page: the page for the phy + * @regnum: register number + * @val: value to write + * + * Same rules as for phy_write(). + */ +int phy_write_paged(struct phy_device *phydev, int page, u32 regnum, u16 val) +{ + int ret = 0, oldpage; + + oldpage = phy_select_page(phydev, page); + if (oldpage >= 0) + ret = __phy_write(phydev, regnum, val); + + return phy_restore_page(phydev, oldpage, ret); +} +EXPORT_SYMBOL(phy_write_paged); + +/** + * phy_modify_paged() - Convenience function for modifying a paged register + * @phydev: a pointer to a &struct phy_device + * @page: the page for the phy + * @regnum: register number + * @mask: bit mask of bits to clear + * @set: bit mask of bits to set + * + * Same rules as for phy_read() and phy_write(). + */ +int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum, + u16 mask, u16 set) +{ + int ret = 0, oldpage; + + oldpage = phy_select_page(phydev, page); + if (oldpage >= 0) + ret = __phy_modify(phydev, regnum, mask, set); + + return phy_restore_page(phydev, oldpage, ret); +} +EXPORT_SYMBOL(phy_modify_paged); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index ed10d1fc8f59..f3313a129531 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -493,7 +493,10 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync) /* Invalidate LP advertising flags */ phydev->lp_advertising = 0; - err = phydev->drv->config_aneg(phydev); + if (phydev->drv->config_aneg) + err = phydev->drv->config_aneg(phydev); + else + err = genphy_config_aneg(phydev); if (err < 0) goto out_unlock; @@ -629,9 +632,6 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat) if (PHY_HALTED == phydev->state) return IRQ_NONE; /* It can't be ours. */ - disable_irq_nosync(irq); - atomic_inc(&phydev->irq_disable); - phy_change(phydev); return IRQ_HANDLED; @@ -689,7 +689,6 @@ phy_err: */ int phy_start_interrupts(struct phy_device *phydev) { - atomic_set(&phydev->irq_disable, 0); if (request_threaded_irq(phydev->irq, NULL, phy_interrupt, IRQF_ONESHOT | IRQF_SHARED, phydev_name(phydev), phydev) < 0) { @@ -716,13 +715,6 @@ int phy_stop_interrupts(struct phy_device *phydev) free_irq(phydev->irq, phydev); - /* If work indeed has been cancelled, disable_irq() will have - * been left unbalanced from phy_interrupt() and enable_irq() - * has to be called so that other devices on the line work. - */ - while (atomic_dec_return(&phydev->irq_disable) >= 0) - enable_irq(phydev->irq); - return err; } EXPORT_SYMBOL(phy_stop_interrupts); @@ -736,10 +728,11 @@ void phy_change(struct phy_device *phydev) if (phy_interrupt_is_valid(phydev)) { if (phydev->drv->did_interrupt && !phydev->drv->did_interrupt(phydev)) - goto ignore; + return; - if (phy_disable_interrupts(phydev)) - goto phy_err; + if (phydev->state == PHY_HALTED) + if (phy_disable_interrupts(phydev)) + goto phy_err; } mutex_lock(&phydev->lock); @@ -747,28 +740,13 @@ void phy_change(struct phy_device *phydev) phydev->state = PHY_CHANGELINK; mutex_unlock(&phydev->lock); - if (phy_interrupt_is_valid(phydev)) { - atomic_dec(&phydev->irq_disable); - enable_irq(phydev->irq); - - /* Reenable interrupts */ - if (PHY_HALTED != phydev->state && - phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED)) - goto irq_enable_err; - } - /* reschedule state queue work to run as soon as possible */ phy_trigger_machine(phydev, true); - return; -ignore: - atomic_dec(&phydev->irq_disable); - enable_irq(phydev->irq); + if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev)) + goto phy_err; return; -irq_enable_err: - disable_irq(phydev->irq); - atomic_inc(&phydev->irq_disable); phy_err: phy_error(phydev); } @@ -1003,10 +981,6 @@ void phy_state_machine(struct work_struct *work) phydev->state = PHY_NOLINK; phy_link_down(phydev, true); } - - if (phy_interrupt_is_valid(phydev)) - err = phy_config_interrupt(phydev, - PHY_INTERRUPT_ENABLED); break; case PHY_HALTED: if (phydev->link) { @@ -1083,16 +1057,12 @@ void phy_state_machine(struct work_struct *work) /** * phy_mac_interrupt - MAC says the link has changed * @phydev: phy_device struct with changed link - * @new_link: Link is Up/Down. * - * Description: The MAC layer is able indicate there has been a change - * in the PHY link status. Set the new link status, and trigger the - * state machine, work a work queue. + * The MAC layer is able to indicate there has been a change in the PHY link + * status. Trigger the state machine and work a work queue. */ -void phy_mac_interrupt(struct phy_device *phydev, int new_link) +void phy_mac_interrupt(struct phy_device *phydev) { - phydev->link = new_link; - /* Trigger a state machine change */ queue_work(system_power_efficient_wq, &phydev->phy_queue); } diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index b15b31ca2618..b13eed21c87d 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -634,6 +634,9 @@ int phy_device_register(struct phy_device *phydev) if (err) return err; + /* Deassert the reset signal */ + phy_device_reset(phydev, 0); + /* Run all of the fixups for this PHY */ err = phy_scan_fixups(phydev); if (err) { @@ -652,6 +655,9 @@ int phy_device_register(struct phy_device *phydev) return 0; out: + /* Assert the reset signal */ + phy_device_reset(phydev, 1); + mdiobus_unregister_device(&phydev->mdio); return err; } @@ -668,6 +674,10 @@ EXPORT_SYMBOL(phy_device_register); void phy_device_remove(struct phy_device *phydev) { device_del(&phydev->mdio.dev); + + /* Assert the reset signal */ + phy_device_reset(phydev, 1); + mdiobus_unregister_device(&phydev->mdio); } EXPORT_SYMBOL(phy_device_remove); @@ -851,6 +861,9 @@ int phy_init_hw(struct phy_device *phydev) { int ret = 0; + /* Deassert the reset signal */ + phy_device_reset(phydev, 0); + if (!phydev->drv || !phydev->drv->config_init) return 0; @@ -1130,6 +1143,9 @@ void phy_detach(struct phy_device *phydev) put_device(&phydev->mdio.dev); if (ndev_owner != bus->owner) module_put(bus->owner); + + /* Assert the reset signal */ + phy_device_reset(phydev, 1); } EXPORT_SYMBOL(phy_detach); @@ -1208,6 +1224,30 @@ out: } EXPORT_SYMBOL(phy_loopback); +/** + * phy_reset_after_clk_enable - perform a PHY reset if needed + * @phydev: target phy_device struct + * + * Description: Some PHYs are known to need a reset after their refclk was + * enabled. This function evaluates the flags and perform the reset if it's + * needed. Returns < 0 on error, 0 if the phy wasn't reset and 1 if the phy + * was reset. + */ +int phy_reset_after_clk_enable(struct phy_device *phydev) +{ + if (!phydev || !phydev->drv) + return -ENODEV; + + if (phydev->drv->flags & PHY_RST_AFTER_CLK_EN) { + phy_device_reset(phydev, 1); + phy_device_reset(phydev, 0); + return 1; + } + + return 0; +} +EXPORT_SYMBOL(phy_reset_after_clk_enable); + /* Generic PHY support and helper functions */ /** @@ -1328,9 +1368,8 @@ static int genphy_config_eee_advert(struct phy_device *phydev) */ int genphy_setup_forced(struct phy_device *phydev) { - int ctl = phy_read(phydev, MII_BMCR); + u16 ctl = 0; - ctl &= BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN; phydev->pause = 0; phydev->asym_pause = 0; @@ -1342,7 +1381,8 @@ int genphy_setup_forced(struct phy_device *phydev) if (DUPLEX_FULL == phydev->duplex) ctl |= BMCR_FULLDPLX; - return phy_write(phydev, MII_BMCR, ctl); + return phy_modify(phydev, MII_BMCR, + BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN, ctl); } EXPORT_SYMBOL(genphy_setup_forced); @@ -1352,17 +1392,9 @@ EXPORT_SYMBOL(genphy_setup_forced); */ int genphy_restart_aneg(struct phy_device *phydev) { - int ctl = phy_read(phydev, MII_BMCR); - - if (ctl < 0) - return ctl; - - ctl |= BMCR_ANENABLE | BMCR_ANRESTART; - /* Don't isolate the PHY if we're negotiating */ - ctl &= ~BMCR_ISOLATE; - - return phy_write(phydev, MII_BMCR, ctl); + return phy_modify(phydev, MII_BMCR, BMCR_ISOLATE, + BMCR_ANENABLE | BMCR_ANRESTART); } EXPORT_SYMBOL(genphy_restart_aneg); @@ -1628,44 +1660,20 @@ EXPORT_SYMBOL(genphy_config_init); int genphy_suspend(struct phy_device *phydev) { - int value; - - mutex_lock(&phydev->lock); - - value = phy_read(phydev, MII_BMCR); - phy_write(phydev, MII_BMCR, value | BMCR_PDOWN); - - mutex_unlock(&phydev->lock); - - return 0; + return phy_set_bits(phydev, MII_BMCR, BMCR_PDOWN); } EXPORT_SYMBOL(genphy_suspend); int genphy_resume(struct phy_device *phydev) { - int value; - - value = phy_read(phydev, MII_BMCR); - phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); - - return 0; + return phy_clear_bits(phydev, MII_BMCR, BMCR_PDOWN); } EXPORT_SYMBOL(genphy_resume); int genphy_loopback(struct phy_device *phydev, bool enable) { - int value; - - value = phy_read(phydev, MII_BMCR); - if (value < 0) - return value; - - if (enable) - value |= BMCR_LOOPBACK; - else - value &= ~BMCR_LOOPBACK; - - return phy_write(phydev, MII_BMCR, value); + return phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK, + enable ? BMCR_LOOPBACK : 0); } EXPORT_SYMBOL(genphy_loopback); @@ -1813,8 +1821,16 @@ static int phy_probe(struct device *dev) /* Set the state to READY by default */ phydev->state = PHY_READY; - if (phydev->drv->probe) + if (phydev->drv->probe) { + /* Deassert the reset signal */ + phy_device_reset(phydev, 0); + err = phydev->drv->probe(phydev); + if (err) { + /* Assert the reset signal */ + phy_device_reset(phydev, 1); + } + } mutex_unlock(&phydev->lock); @@ -1831,8 +1847,12 @@ static int phy_remove(struct device *dev) phydev->state = PHY_DOWN; mutex_unlock(&phydev->lock); - if (phydev->drv && phydev->drv->remove) + if (phydev->drv && phydev->drv->remove) { phydev->drv->remove(phydev); + + /* Assert the reset signal */ + phy_device_reset(phydev, 1); + } phydev->drv = NULL; return 0; @@ -1909,9 +1929,7 @@ static struct phy_driver genphy_driver = { .features = PHY_GBIT_FEATURES | SUPPORTED_MII | SUPPORTED_AUI | SUPPORTED_FIBRE | SUPPORTED_BNC, - .config_aneg = genphy_config_aneg, .aneg_done = genphy_aneg_done, - .read_status = genphy_read_status, .suspend = genphy_suspend, .resume = genphy_resume, .set_loopback = genphy_loopback, diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 249ce5cbea22..6ac8b29b2dc3 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -36,7 +36,11 @@ enum { PHYLINK_DISABLE_LINK, }; +/** + * struct phylink - internal data type for phylink + */ struct phylink { + /* private: */ struct net_device *netdev; const struct phylink_mac_ops *ops; @@ -50,6 +54,8 @@ struct phylink { /* The link configuration settings */ struct phylink_link_state link_config; struct gpio_desc *link_gpio; + void (*get_fixed_state)(struct net_device *dev, + struct phylink_link_state *s); struct mutex state_mutex; struct phylink_link_state phy_state; @@ -87,6 +93,13 @@ static inline bool linkmode_empty(const unsigned long *src) return bitmap_empty(src, __ETHTOOL_LINK_MODE_MASK_NBITS); } +/** + * phylink_set_port_modes() - set the port type modes in the ethtool mask + * @mask: ethtool link mode mask + * + * Sets all the port type modes in the ethtool mask. MAC drivers should + * use this in their 'validate' callback. + */ void phylink_set_port_modes(unsigned long *mask) { phylink_set(mask, TP); @@ -117,8 +130,7 @@ static const char *phylink_an_mode_str(unsigned int mode) static const char *modestr[] = { [MLO_AN_PHY] = "phy", [MLO_AN_FIXED] = "fixed", - [MLO_AN_SGMII] = "SGMII", - [MLO_AN_8023Z] = "802.3z", + [MLO_AN_INBAND] = "inband", }; return mode < ARRAY_SIZE(modestr) ? modestr[mode] : "unknown"; @@ -132,59 +144,64 @@ static int phylink_validate(struct phylink *pl, unsigned long *supported, return phylink_is_empty_linkmode(supported) ? -EINVAL : 0; } -static int phylink_parse_fixedlink(struct phylink *pl, struct device_node *np) +static int phylink_parse_fixedlink(struct phylink *pl, + struct fwnode_handle *fwnode) { - struct device_node *fixed_node; + struct fwnode_handle *fixed_node; const struct phy_setting *s; struct gpio_desc *desc; - const __be32 *fixed_prop; u32 speed; - int ret, len; + int ret; - fixed_node = of_get_child_by_name(np, "fixed-link"); + fixed_node = fwnode_get_named_child_node(fwnode, "fixed-link"); if (fixed_node) { - ret = of_property_read_u32(fixed_node, "speed", &speed); + ret = fwnode_property_read_u32(fixed_node, "speed", &speed); pl->link_config.speed = speed; pl->link_config.duplex = DUPLEX_HALF; - if (of_property_read_bool(fixed_node, "full-duplex")) + if (fwnode_property_read_bool(fixed_node, "full-duplex")) pl->link_config.duplex = DUPLEX_FULL; /* We treat the "pause" and "asym-pause" terminology as * defining the link partner's ability. */ - if (of_property_read_bool(fixed_node, "pause")) + if (fwnode_property_read_bool(fixed_node, "pause")) pl->link_config.pause |= MLO_PAUSE_SYM; - if (of_property_read_bool(fixed_node, "asym-pause")) + if (fwnode_property_read_bool(fixed_node, "asym-pause")) pl->link_config.pause |= MLO_PAUSE_ASYM; if (ret == 0) { - desc = fwnode_get_named_gpiod(&fixed_node->fwnode, - "link-gpios", 0, - GPIOD_IN, "?"); + desc = fwnode_get_named_gpiod(fixed_node, "link-gpios", + 0, GPIOD_IN, "?"); if (!IS_ERR(desc)) pl->link_gpio = desc; else if (desc == ERR_PTR(-EPROBE_DEFER)) ret = -EPROBE_DEFER; } - of_node_put(fixed_node); + fwnode_handle_put(fixed_node); if (ret) return ret; } else { - fixed_prop = of_get_property(np, "fixed-link", &len); - if (!fixed_prop) { + u32 prop[5]; + + ret = fwnode_property_read_u32_array(fwnode, "fixed-link", + NULL, 0); + if (ret != ARRAY_SIZE(prop)) { netdev_err(pl->netdev, "broken fixed-link?\n"); return -EINVAL; } - if (len == 5 * sizeof(*fixed_prop)) { - pl->link_config.duplex = be32_to_cpu(fixed_prop[1]) ? + + ret = fwnode_property_read_u32_array(fwnode, "fixed-link", + prop, ARRAY_SIZE(prop)); + if (!ret) { + pl->link_config.duplex = prop[1] ? DUPLEX_FULL : DUPLEX_HALF; - pl->link_config.speed = be32_to_cpu(fixed_prop[2]); - if (be32_to_cpu(fixed_prop[3])) + pl->link_config.speed = prop[2]; + if (prop[3]) pl->link_config.pause |= MLO_PAUSE_SYM; - if (be32_to_cpu(fixed_prop[4])) + if (prop[4]) pl->link_config.pause |= MLO_PAUSE_ASYM; } } @@ -220,17 +237,17 @@ static int phylink_parse_fixedlink(struct phylink *pl, struct device_node *np) return 0; } -static int phylink_parse_mode(struct phylink *pl, struct device_node *np) +static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode) { - struct device_node *dn; + struct fwnode_handle *dn; const char *managed; - dn = of_get_child_by_name(np, "fixed-link"); - if (dn || of_find_property(np, "fixed-link", NULL)) + dn = fwnode_get_named_child_node(fwnode, "fixed-link"); + if (dn || fwnode_property_present(fwnode, "fixed-link")) pl->link_an_mode = MLO_AN_FIXED; - of_node_put(dn); + fwnode_handle_put(dn); - if (of_property_read_string(np, "managed", &managed) == 0 && + if (fwnode_property_read_string(fwnode, "managed", &managed) == 0 && strcmp(managed, "in-band-status") == 0) { if (pl->link_an_mode == MLO_AN_FIXED) { netdev_err(pl->netdev, @@ -244,6 +261,7 @@ static int phylink_parse_mode(struct phylink *pl, struct device_node *np) phylink_set(pl->supported, Asym_Pause); phylink_set(pl->supported, Pause); pl->link_config.an_enabled = true; + pl->link_an_mode = MLO_AN_INBAND; switch (pl->link_config.interface) { case PHY_INTERFACE_MODE_SGMII: @@ -253,17 +271,14 @@ static int phylink_parse_mode(struct phylink *pl, struct device_node *np) phylink_set(pl->supported, 100baseT_Full); phylink_set(pl->supported, 1000baseT_Half); phylink_set(pl->supported, 1000baseT_Full); - pl->link_an_mode = MLO_AN_SGMII; break; case PHY_INTERFACE_MODE_1000BASEX: phylink_set(pl->supported, 1000baseX_Full); - pl->link_an_mode = MLO_AN_8023Z; break; case PHY_INTERFACE_MODE_2500BASEX: phylink_set(pl->supported, 2500baseX_Full); - pl->link_an_mode = MLO_AN_8023Z; break; case PHY_INTERFACE_MODE_10GKR: @@ -280,7 +295,6 @@ static int phylink_parse_mode(struct phylink *pl, struct device_node *np) phylink_set(pl->supported, 10000baseLR_Full); phylink_set(pl->supported, 10000baseLRM_Full); phylink_set(pl->supported, 10000baseER_Full); - pl->link_an_mode = MLO_AN_SGMII; break; default: @@ -320,8 +334,7 @@ static void phylink_mac_config(struct phylink *pl, static void phylink_mac_an_restart(struct phylink *pl) { if (pl->link_config.an_enabled && - (pl->link_config.interface == PHY_INTERFACE_MODE_1000BASEX || - pl->link_config.interface == PHY_INTERFACE_MODE_2500BASEX)) + phy_interface_mode_is_8023z(pl->link_config.interface)) pl->ops->mac_an_restart(pl->netdev); } @@ -339,12 +352,14 @@ static int phylink_get_mac_state(struct phylink *pl, struct phylink_link_state * } /* The fixed state is... fixed except for the link state, - * which may be determined by a GPIO. + * which may be determined by a GPIO or a callback. */ static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_state *state) { *state = pl->link_config; - if (pl->link_gpio) + if (pl->get_fixed_state) + pl->get_fixed_state(pl->netdev, state); + else if (pl->link_gpio) state->link = !!gpiod_get_value(pl->link_gpio); } @@ -423,7 +438,7 @@ static void phylink_resolve(struct work_struct *w) phylink_mac_config(pl, &link_state); break; - case MLO_AN_SGMII: + case MLO_AN_INBAND: phylink_get_mac_state(pl, &link_state); if (pl->phydev) { bool changed = false; @@ -449,10 +464,6 @@ static void phylink_resolve(struct work_struct *w) } } break; - - case MLO_AN_8023Z: - phylink_get_mac_state(pl, &link_state); - break; } } @@ -489,15 +500,27 @@ static void phylink_run_resolve(struct phylink *pl) static const struct sfp_upstream_ops sfp_phylink_ops; -static int phylink_register_sfp(struct phylink *pl, struct device_node *np) +static int phylink_register_sfp(struct phylink *pl, + struct fwnode_handle *fwnode) { - struct device_node *sfp_np; + struct fwnode_reference_args ref; + int ret; - sfp_np = of_parse_phandle(np, "sfp", 0); - if (!sfp_np) + if (!fwnode) return 0; - pl->sfp_bus = sfp_register_upstream(sfp_np, pl->netdev, pl, + ret = fwnode_property_get_reference_args(fwnode, "sfp", NULL, + 0, 0, &ref); + if (ret < 0) { + if (ret == -ENOENT) + return 0; + + netdev_err(pl->netdev, "unable to parse \"sfp\" node: %d\n", + ret); + return ret; + } + + pl->sfp_bus = sfp_register_upstream(ref.fwnode, pl->netdev, pl, &sfp_phylink_ops); if (!pl->sfp_bus) return -ENOMEM; @@ -505,7 +528,22 @@ static int phylink_register_sfp(struct phylink *pl, struct device_node *np) return 0; } -struct phylink *phylink_create(struct net_device *ndev, struct device_node *np, +/** + * phylink_create() - create a phylink instance + * @ndev: a pointer to the &struct net_device + * @fwnode: a pointer to a &struct fwnode_handle describing the network + * interface + * @iface: the desired link mode defined by &typedef phy_interface_t + * @ops: a pointer to a &struct phylink_mac_ops for the MAC. + * + * Create a new phylink instance, and parse the link parameters found in @np. + * This will parse in-band modes, fixed-link or SFP configuration. + * + * Returns a pointer to a &struct phylink, or an error-pointer value. Users + * must use IS_ERR() to check for errors from this function. + */ +struct phylink *phylink_create(struct net_device *ndev, + struct fwnode_handle *fwnode, phy_interface_t iface, const struct phylink_mac_ops *ops) { @@ -521,7 +559,10 @@ struct phylink *phylink_create(struct net_device *ndev, struct device_node *np, pl->netdev = ndev; pl->phy_state.interface = iface; pl->link_interface = iface; - pl->link_port = PORT_MII; + if (iface == PHY_INTERFACE_MODE_MOCA) + pl->link_port = PORT_BNC; + else + pl->link_port = PORT_MII; pl->link_config.interface = iface; pl->link_config.pause = MLO_PAUSE_AN; pl->link_config.speed = SPEED_UNKNOWN; @@ -534,21 +575,21 @@ struct phylink *phylink_create(struct net_device *ndev, struct device_node *np, linkmode_copy(pl->link_config.advertising, pl->supported); phylink_validate(pl, pl->supported, &pl->link_config); - ret = phylink_parse_mode(pl, np); + ret = phylink_parse_mode(pl, fwnode); if (ret < 0) { kfree(pl); return ERR_PTR(ret); } if (pl->link_an_mode == MLO_AN_FIXED) { - ret = phylink_parse_fixedlink(pl, np); + ret = phylink_parse_fixedlink(pl, fwnode); if (ret < 0) { kfree(pl); return ERR_PTR(ret); } } - ret = phylink_register_sfp(pl, np); + ret = phylink_register_sfp(pl, fwnode); if (ret < 0) { kfree(pl); return ERR_PTR(ret); @@ -558,6 +599,13 @@ struct phylink *phylink_create(struct net_device *ndev, struct device_node *np, } EXPORT_SYMBOL_GPL(phylink_create); +/** + * phylink_destroy() - cleanup and destroy the phylink instance + * @pl: a pointer to a &struct phylink returned from phylink_create() + * + * Destroy a phylink instance. Any PHY that has been attached must have been + * cleaned up via phylink_disconnect_phy() prior to calling this function. + */ void phylink_destroy(struct phylink *pl) { if (pl->sfp_bus) @@ -654,10 +702,39 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy) return 0; } +/** + * phylink_connect_phy() - connect a PHY to the phylink instance + * @pl: a pointer to a &struct phylink returned from phylink_create() + * @phy: a pointer to a &struct phy_device. + * + * Connect @phy to the phylink instance specified by @pl by calling + * phy_attach_direct(). Configure the @phy according to the MAC driver's + * capabilities, start the PHYLIB state machine and enable any interrupts + * that the PHY supports. + * + * This updates the phylink's ethtool supported and advertising link mode + * masks. + * + * Returns 0 on success or a negative errno. + */ int phylink_connect_phy(struct phylink *pl, struct phy_device *phy) { int ret; + if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED || + (pl->link_an_mode == MLO_AN_INBAND && + phy_interface_mode_is_8023z(pl->link_interface)))) + return -EINVAL; + + if (pl->phydev) + return -EBUSY; + + /* Use PHY device/driver interface */ + if (pl->link_interface == PHY_INTERFACE_MODE_NA) { + pl->link_interface = phy->interface; + pl->link_config.interface = pl->link_interface; + } + ret = phy_attach_direct(pl->netdev, phy, 0, pl->link_interface); if (ret) return ret; @@ -670,14 +747,29 @@ int phylink_connect_phy(struct phylink *pl, struct phy_device *phy) } EXPORT_SYMBOL_GPL(phylink_connect_phy); -int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn) +/** + * phylink_of_phy_connect() - connect the PHY specified in the DT mode. + * @pl: a pointer to a &struct phylink returned from phylink_create() + * @dn: a pointer to a &struct device_node. + * @flags: PHY-specific flags to communicate to the PHY device driver + * + * Connect the phy specified in the device node @dn to the phylink instance + * specified by @pl. Actions specified in phylink_connect_phy() will be + * performed. + * + * Returns 0 on success or a negative errno. + */ +int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn, + u32 flags) { struct device_node *phy_node; struct phy_device *phy_dev; int ret; - /* Fixed links are handled without needing a PHY */ - if (pl->link_an_mode == MLO_AN_FIXED) + /* Fixed links and 802.3z are handled without needing a PHY */ + if (pl->link_an_mode == MLO_AN_FIXED || + (pl->link_an_mode == MLO_AN_INBAND && + phy_interface_mode_is_8023z(pl->link_interface))) return 0; phy_node = of_parse_phandle(dn, "phy-handle", 0); @@ -687,14 +779,13 @@ int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn) phy_node = of_parse_phandle(dn, "phy-device", 0); if (!phy_node) { - if (pl->link_an_mode == MLO_AN_PHY) { - netdev_err(pl->netdev, "unable to find PHY node\n"); + if (pl->link_an_mode == MLO_AN_PHY) return -ENODEV; - } return 0; } - phy_dev = of_phy_attach(pl->netdev, phy_node, 0, pl->link_interface); + phy_dev = of_phy_attach(pl->netdev, phy_node, flags, + pl->link_interface); /* We're done with the phy_node handle */ of_node_put(phy_node); @@ -709,11 +800,18 @@ int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn) } EXPORT_SYMBOL_GPL(phylink_of_phy_connect); +/** + * phylink_disconnect_phy() - disconnect any PHY attached to the phylink + * instance. + * @pl: a pointer to a &struct phylink returned from phylink_create() + * + * Disconnect any current PHY from the phylink instance described by @pl. + */ void phylink_disconnect_phy(struct phylink *pl) { struct phy_device *phy; - WARN_ON(!lockdep_rtnl_is_held()); + ASSERT_RTNL(); phy = pl->phydev; if (phy) { @@ -730,6 +828,40 @@ void phylink_disconnect_phy(struct phylink *pl) } EXPORT_SYMBOL_GPL(phylink_disconnect_phy); +/** + * phylink_fixed_state_cb() - allow setting a fixed link callback + * @pl: a pointer to a &struct phylink returned from phylink_create() + * @cb: callback to execute to determine the fixed link state. + * + * The MAC driver should call this driver when the state of its link + * can be determined through e.g: an out of band MMIO register. + */ +int phylink_fixed_state_cb(struct phylink *pl, + void (*cb)(struct net_device *dev, + struct phylink_link_state *state)) +{ + /* It does not make sense to let the link be overriden unless we use + * MLO_AN_FIXED + */ + if (pl->link_an_mode != MLO_AN_FIXED) + return -EINVAL; + + mutex_lock(&pl->state_mutex); + pl->get_fixed_state = cb; + mutex_unlock(&pl->state_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(phylink_fixed_state_cb); + +/** + * phylink_mac_change() - notify phylink of a change in MAC state + * @pl: a pointer to a &struct phylink returned from phylink_create() + * @up: indicates whether the link is currently up. + * + * The MAC driver should call this driver when the state of its link + * changes (eg, link failure, new negotiation results, etc.) + */ void phylink_mac_change(struct phylink *pl, bool up) { if (!up) @@ -739,9 +871,17 @@ void phylink_mac_change(struct phylink *pl, bool up) } EXPORT_SYMBOL_GPL(phylink_mac_change); +/** + * phylink_start() - start a phylink instance + * @pl: a pointer to a &struct phylink returned from phylink_create() + * + * Start the phylink instance specified by @pl, configuring the MAC for the + * desired link mode(s) and negotiation style. This should be called from the + * network device driver's &struct net_device_ops ndo_open() method. + */ void phylink_start(struct phylink *pl) { - WARN_ON(!lockdep_rtnl_is_held()); + ASSERT_RTNL(); netdev_info(pl->netdev, "configuring for %s/%s link mode\n", phylink_an_mode_str(pl->link_an_mode), @@ -754,6 +894,12 @@ void phylink_start(struct phylink *pl) phylink_resolve_flow(pl, &pl->link_config); phylink_mac_config(pl, &pl->link_config); + /* Restart autonegotiation if using 802.3z to ensure that the link + * parameters are properly negotiated. This is necessary for DSA + * switches using 802.3z negotiation to ensure they see our modes. + */ + phylink_mac_an_restart(pl); + clear_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); phylink_run_resolve(pl); @@ -764,9 +910,18 @@ void phylink_start(struct phylink *pl) } EXPORT_SYMBOL_GPL(phylink_start); +/** + * phylink_stop() - stop a phylink instance + * @pl: a pointer to a &struct phylink returned from phylink_create() + * + * Stop the phylink instance specified by @pl. This should be called from the + * network device driver's &struct net_device_ops ndo_stop() method. The + * network device's carrier state should not be changed prior to calling this + * function. + */ void phylink_stop(struct phylink *pl) { - WARN_ON(!lockdep_rtnl_is_held()); + ASSERT_RTNL(); if (pl->phydev) phy_stop(pl->phydev); @@ -779,9 +934,18 @@ void phylink_stop(struct phylink *pl) } EXPORT_SYMBOL_GPL(phylink_stop); +/** + * phylink_ethtool_get_wol() - get the wake on lan parameters for the PHY + * @pl: a pointer to a &struct phylink returned from phylink_create() + * @wol: a pointer to &struct ethtool_wolinfo to hold the read parameters + * + * Read the wake on lan parameters from the PHY attached to the phylink + * instance specified by @pl. If no PHY is currently attached, report no + * support for wake on lan. + */ void phylink_ethtool_get_wol(struct phylink *pl, struct ethtool_wolinfo *wol) { - WARN_ON(!lockdep_rtnl_is_held()); + ASSERT_RTNL(); wol->supported = 0; wol->wolopts = 0; @@ -791,11 +955,22 @@ void phylink_ethtool_get_wol(struct phylink *pl, struct ethtool_wolinfo *wol) } EXPORT_SYMBOL_GPL(phylink_ethtool_get_wol); +/** + * phylink_ethtool_set_wol() - set wake on lan parameters + * @pl: a pointer to a &struct phylink returned from phylink_create() + * @wol: a pointer to &struct ethtool_wolinfo for the desired parameters + * + * Set the wake on lan parameters for the PHY attached to the phylink + * instance specified by @pl. If no PHY is attached, returns %EOPNOTSUPP + * error. + * + * Returns zero on success or negative errno code. + */ int phylink_ethtool_set_wol(struct phylink *pl, struct ethtool_wolinfo *wol) { int ret = -EOPNOTSUPP; - WARN_ON(!lockdep_rtnl_is_held()); + ASSERT_RTNL(); if (pl->phydev) ret = phy_ethtool_set_wol(pl->phydev, wol); @@ -826,12 +1001,21 @@ static void phylink_get_ksettings(const struct phylink_link_state *state, AUTONEG_DISABLE; } +/** + * phylink_ethtool_ksettings_get() - get the current link settings + * @pl: a pointer to a &struct phylink returned from phylink_create() + * @kset: a pointer to a &struct ethtool_link_ksettings to hold link settings + * + * Read the current link settings for the phylink instance specified by @pl. + * This will be the link settings read from the MAC, PHY or fixed link + * settings depending on the current negotiation mode. + */ int phylink_ethtool_ksettings_get(struct phylink *pl, struct ethtool_link_ksettings *kset) { struct phylink_link_state link_state; - WARN_ON(!lockdep_rtnl_is_held()); + ASSERT_RTNL(); if (pl->phydev) { phy_ethtool_ksettings_get(pl->phydev, kset); @@ -851,14 +1035,13 @@ int phylink_ethtool_ksettings_get(struct phylink *pl, phylink_get_ksettings(&link_state, kset); break; - case MLO_AN_SGMII: + case MLO_AN_INBAND: /* If there is a phy attached, then use the reported * settings from the phy with no modification. */ if (pl->phydev) break; - case MLO_AN_8023Z: phylink_get_mac_state(pl, &link_state); /* The MAC is reporting the link results from its own PCS @@ -873,6 +1056,11 @@ int phylink_ethtool_ksettings_get(struct phylink *pl, } EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_get); +/** + * phylink_ethtool_ksettings_set() - set the link settings + * @pl: a pointer to a &struct phylink returned from phylink_create() + * @kset: a pointer to a &struct ethtool_link_ksettings for the desired modes + */ int phylink_ethtool_ksettings_set(struct phylink *pl, const struct ethtool_link_ksettings *kset) { @@ -880,7 +1068,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, struct phylink_link_state config; int ret; - WARN_ON(!lockdep_rtnl_is_held()); + ASSERT_RTNL(); if (kset->base.autoneg != AUTONEG_DISABLE && kset->base.autoneg != AUTONEG_ENABLE) @@ -967,11 +1155,22 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, } EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_set); +/** + * phylink_ethtool_nway_reset() - restart negotiation + * @pl: a pointer to a &struct phylink returned from phylink_create() + * + * Restart negotiation for the phylink instance specified by @pl. This will + * cause any attached phy to restart negotiation with the link partner, and + * if the MAC is in a BaseX mode, the MAC will also be requested to restart + * negotiation. + * + * Returns zero on success, or negative error code. + */ int phylink_ethtool_nway_reset(struct phylink *pl) { int ret = 0; - WARN_ON(!lockdep_rtnl_is_held()); + ASSERT_RTNL(); if (pl->phydev) ret = phy_restart_aneg(pl->phydev); @@ -981,10 +1180,15 @@ int phylink_ethtool_nway_reset(struct phylink *pl) } EXPORT_SYMBOL_GPL(phylink_ethtool_nway_reset); +/** + * phylink_ethtool_get_pauseparam() - get the current pause parameters + * @pl: a pointer to a &struct phylink returned from phylink_create() + * @pause: a pointer to a &struct ethtool_pauseparam + */ void phylink_ethtool_get_pauseparam(struct phylink *pl, struct ethtool_pauseparam *pause) { - WARN_ON(!lockdep_rtnl_is_held()); + ASSERT_RTNL(); pause->autoneg = !!(pl->link_config.pause & MLO_PAUSE_AN); pause->rx_pause = !!(pl->link_config.pause & MLO_PAUSE_RX); @@ -992,12 +1196,17 @@ void phylink_ethtool_get_pauseparam(struct phylink *pl, } EXPORT_SYMBOL_GPL(phylink_ethtool_get_pauseparam); +/** + * phylink_ethtool_set_pauseparam() - set the current pause parameters + * @pl: a pointer to a &struct phylink returned from phylink_create() + * @pause: a pointer to a &struct ethtool_pauseparam + */ int phylink_ethtool_set_pauseparam(struct phylink *pl, struct ethtool_pauseparam *pause) { struct phylink_link_state *config = &pl->link_config; - WARN_ON(!lockdep_rtnl_is_held()); + ASSERT_RTNL(); if (!phylink_test(pl->supported, Pause) && !phylink_test(pl->supported, Asym_Pause)) @@ -1030,8 +1239,7 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl, phylink_mac_config(pl, config); break; - case MLO_AN_SGMII: - case MLO_AN_8023Z: + case MLO_AN_INBAND: phylink_mac_config(pl, config); phylink_mac_an_restart(pl); break; @@ -1070,24 +1278,21 @@ int phylink_ethtool_get_module_eeprom(struct phylink *pl, } EXPORT_SYMBOL_GPL(phylink_ethtool_get_module_eeprom); -int phylink_init_eee(struct phylink *pl, bool clk_stop_enable) -{ - int ret = -EPROTONOSUPPORT; - - WARN_ON(!lockdep_rtnl_is_held()); - - if (pl->phydev) - ret = phy_init_eee(pl->phydev, clk_stop_enable); - - return ret; -} -EXPORT_SYMBOL_GPL(phylink_init_eee); - +/** + * phylink_ethtool_get_eee_err() - read the energy efficient ethernet error + * counter + * @pl: a pointer to a &struct phylink returned from phylink_create(). + * + * Read the Energy Efficient Ethernet error counter from the PHY associated + * with the phylink instance specified by @pl. + * + * Returns positive error counter value, or negative error code. + */ int phylink_get_eee_err(struct phylink *pl) { int ret = 0; - WARN_ON(!lockdep_rtnl_is_held()); + ASSERT_RTNL(); if (pl->phydev) ret = phy_get_eee_err(pl->phydev); @@ -1096,11 +1301,16 @@ int phylink_get_eee_err(struct phylink *pl) } EXPORT_SYMBOL_GPL(phylink_get_eee_err); +/** + * phylink_ethtool_get_eee() - read the energy efficient ethernet parameters + * @pl: a pointer to a &struct phylink returned from phylink_create() + * @eee: a pointer to a &struct ethtool_eee for the read parameters + */ int phylink_ethtool_get_eee(struct phylink *pl, struct ethtool_eee *eee) { int ret = -EOPNOTSUPP; - WARN_ON(!lockdep_rtnl_is_held()); + ASSERT_RTNL(); if (pl->phydev) ret = phy_ethtool_get_eee(pl->phydev, eee); @@ -1109,11 +1319,16 @@ int phylink_ethtool_get_eee(struct phylink *pl, struct ethtool_eee *eee) } EXPORT_SYMBOL_GPL(phylink_ethtool_get_eee); +/** + * phylink_ethtool_set_eee() - set the energy efficient ethernet parameters + * @pl: a pointer to a &struct phylink returned from phylink_create() + * @eee: a pointer to a &struct ethtool_eee for the desired parameters + */ int phylink_ethtool_set_eee(struct phylink *pl, struct ethtool_eee *eee) { int ret = -EOPNOTSUPP; - WARN_ON(!lockdep_rtnl_is_held()); + ASSERT_RTNL(); if (pl->phydev) ret = phy_ethtool_set_eee(pl->phydev, eee); @@ -1248,9 +1463,7 @@ static int phylink_mii_read(struct phylink *pl, unsigned int phy_id, case MLO_AN_PHY: return -EOPNOTSUPP; - case MLO_AN_SGMII: - /* No phy, fall through to 8023z method */ - case MLO_AN_8023Z: + case MLO_AN_INBAND: if (phy_id == 0) { val = phylink_get_mac_state(pl, &state); if (val < 0) @@ -1275,24 +1488,40 @@ static int phylink_mii_write(struct phylink *pl, unsigned int phy_id, case MLO_AN_PHY: return -EOPNOTSUPP; - case MLO_AN_SGMII: - /* No phy, fall through to 8023z method */ - case MLO_AN_8023Z: + case MLO_AN_INBAND: break; } return 0; } +/** + * phylink_mii_ioctl() - generic mii ioctl interface + * @pl: a pointer to a &struct phylink returned from phylink_create() + * @ifr: a pointer to a &struct ifreq for socket ioctls + * @cmd: ioctl cmd to execute + * + * Perform the specified MII ioctl on the PHY attached to the phylink instance + * specified by @pl. If no PHY is attached, emulate the presence of the PHY. + * + * Returns: zero on success or negative error code. + * + * %SIOCGMIIPHY: + * read register from the current PHY. + * %SIOCGMIIREG: + * read register from the specified PHY. + * %SIOCSMIIREG: + * set a register on the specified PHY. + */ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd) { struct mii_ioctl_data *mii = if_mii(ifr); int ret; - WARN_ON(!lockdep_rtnl_is_held()); + ASSERT_RTNL(); if (pl->phydev) { - /* PHYs only exist for MLO_AN_PHY and MLO_AN_SGMII */ + /* PHYs only exist for MLO_AN_PHY and SGMII */ switch (cmd) { case SIOCGMIIPHY: mii->phy_id = pl->phydev->mdio.addr; @@ -1351,7 +1580,7 @@ static int phylink_sfp_module_insert(void *upstream, __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, }; struct phylink_link_state config; phy_interface_t iface; - int mode, ret = 0; + int ret = 0; bool changed; u8 port; @@ -1359,14 +1588,13 @@ static int phylink_sfp_module_insert(void *upstream, port = sfp_parse_port(pl->sfp_bus, id, support); iface = sfp_parse_interface(pl->sfp_bus, id); - WARN_ON(!lockdep_rtnl_is_held()); + ASSERT_RTNL(); switch (iface) { case PHY_INTERFACE_MODE_SGMII: - mode = MLO_AN_SGMII; - break; case PHY_INTERFACE_MODE_1000BASEX: - mode = MLO_AN_8023Z; + case PHY_INTERFACE_MODE_2500BASEX: + case PHY_INTERFACE_MODE_10GKR: break; default: return -EINVAL; @@ -1384,16 +1612,18 @@ static int phylink_sfp_module_insert(void *upstream, ret = phylink_validate(pl, support, &config); if (ret) { netdev_err(pl->netdev, "validation of %s/%s with support %*pb failed: %d\n", - phylink_an_mode_str(mode), phy_modes(config.interface), + phylink_an_mode_str(MLO_AN_INBAND), + phy_modes(config.interface), __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret); return ret; } netdev_dbg(pl->netdev, "requesting link mode %s/%s with support %*pb\n", - phylink_an_mode_str(mode), phy_modes(config.interface), + phylink_an_mode_str(MLO_AN_INBAND), + phy_modes(config.interface), __ETHTOOL_LINK_MODE_MASK_NBITS, support); - if (mode == MLO_AN_8023Z && pl->phydev) + if (phy_interface_mode_is_8023z(iface) && pl->phydev) return -EINVAL; changed = !bitmap_equal(pl->supported, support, @@ -1403,15 +1633,15 @@ static int phylink_sfp_module_insert(void *upstream, linkmode_copy(pl->link_config.advertising, config.advertising); } - if (pl->link_an_mode != mode || + if (pl->link_an_mode != MLO_AN_INBAND || pl->link_config.interface != config.interface) { pl->link_config.interface = config.interface; - pl->link_an_mode = mode; + pl->link_an_mode = MLO_AN_INBAND; changed = true; netdev_info(pl->netdev, "switched to %s/%s link mode\n", - phylink_an_mode_str(mode), + phylink_an_mode_str(MLO_AN_INBAND), phy_modes(config.interface)); } @@ -1428,7 +1658,7 @@ static void phylink_sfp_link_down(void *upstream) { struct phylink *pl = upstream; - WARN_ON(!lockdep_rtnl_is_held()); + ASSERT_RTNL(); set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state); queue_work(system_power_efficient_wq, &pl->resolve); @@ -1439,7 +1669,7 @@ static void phylink_sfp_link_up(void *upstream) { struct phylink *pl = upstream; - WARN_ON(!lockdep_rtnl_is_held()); + ASSERT_RTNL(); clear_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state); phylink_run_resolve(pl); diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c index dbef8002bc28..889a4dce1648 100644 --- a/drivers/net/phy/qsemi.c +++ b/drivers/net/phy/qsemi.c @@ -118,8 +118,6 @@ static struct phy_driver qs6612_driver[] = { { .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = qs6612_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = qs6612_ack_interrupt, .config_intr = qs6612_config_intr, } }; diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index eda0a6e86918..ee3ca4a2f12b 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -13,29 +13,44 @@ * option) any later version. * */ +#include <linux/bitops.h> #include <linux/phy.h> #include <linux/module.h> -#define RTL821x_PHYSR 0x11 -#define RTL821x_PHYSR_DUPLEX 0x2000 -#define RTL821x_PHYSR_SPEED 0xc000 -#define RTL821x_INER 0x12 -#define RTL821x_INER_INIT 0x6400 -#define RTL821x_INSR 0x13 -#define RTL821x_PAGE_SELECT 0x1f -#define RTL8211E_INER_LINK_STATUS 0x400 +#define RTL821x_PHYSR 0x11 +#define RTL821x_PHYSR_DUPLEX BIT(13) +#define RTL821x_PHYSR_SPEED GENMASK(15, 14) -#define RTL8211F_INER_LINK_STATUS 0x0010 -#define RTL8211F_INSR 0x1d -#define RTL8211F_TX_DELAY 0x100 +#define RTL821x_INER 0x12 +#define RTL8211B_INER_INIT 0x6400 +#define RTL8211E_INER_LINK_STATUS BIT(10) +#define RTL8211F_INER_LINK_STATUS BIT(4) -#define RTL8201F_ISR 0x1e -#define RTL8201F_IER 0x13 +#define RTL821x_INSR 0x13 + +#define RTL821x_PAGE_SELECT 0x1f + +#define RTL8211F_INSR 0x1d + +#define RTL8211F_TX_DELAY BIT(8) + +#define RTL8201F_ISR 0x1e +#define RTL8201F_IER 0x13 MODULE_DESCRIPTION("Realtek PHY driver"); MODULE_AUTHOR("Johnson Leung"); MODULE_LICENSE("GPL"); +static int rtl821x_read_page(struct phy_device *phydev) +{ + return __phy_read(phydev, RTL821x_PAGE_SELECT); +} + +static int rtl821x_write_page(struct phy_device *phydev, int page) +{ + return __phy_write(phydev, RTL821x_PAGE_SELECT, page); +} + static int rtl8201_ack_interrupt(struct phy_device *phydev) { int err; @@ -58,31 +73,21 @@ static int rtl8211f_ack_interrupt(struct phy_device *phydev) { int err; - phy_write(phydev, RTL821x_PAGE_SELECT, 0xa43); - err = phy_read(phydev, RTL8211F_INSR); - /* restore to default page 0 */ - phy_write(phydev, RTL821x_PAGE_SELECT, 0x0); + err = phy_read_paged(phydev, 0xa43, RTL8211F_INSR); return (err < 0) ? err : 0; } static int rtl8201_config_intr(struct phy_device *phydev) { - int err; - - /* switch to page 7 */ - phy_write(phydev, RTL821x_PAGE_SELECT, 0x7); + u16 val; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) - err = phy_write(phydev, RTL8201F_IER, - BIT(13) | BIT(12) | BIT(11)); + val = BIT(13) | BIT(12) | BIT(11); else - err = phy_write(phydev, RTL8201F_IER, 0); - - /* restore to default page 0 */ - phy_write(phydev, RTL821x_PAGE_SELECT, 0x0); + val = 0; - return err; + return phy_write_paged(phydev, 0x7, RTL8201F_IER, val); } static int rtl8211b_config_intr(struct phy_device *phydev) @@ -91,7 +96,7 @@ static int rtl8211b_config_intr(struct phy_device *phydev) if (phydev->interrupts == PHY_INTERRUPT_ENABLED) err = phy_write(phydev, RTL821x_INER, - RTL821x_INER_INIT); + RTL8211B_INER_INIT); else err = phy_write(phydev, RTL821x_INER, 0); @@ -113,43 +118,31 @@ static int rtl8211e_config_intr(struct phy_device *phydev) static int rtl8211f_config_intr(struct phy_device *phydev) { - int err; + u16 val; - phy_write(phydev, RTL821x_PAGE_SELECT, 0xa42); if (phydev->interrupts == PHY_INTERRUPT_ENABLED) - err = phy_write(phydev, RTL821x_INER, - RTL8211F_INER_LINK_STATUS); + val = RTL8211F_INER_LINK_STATUS; else - err = phy_write(phydev, RTL821x_INER, 0); - phy_write(phydev, RTL821x_PAGE_SELECT, 0); + val = 0; - return err; + return phy_write_paged(phydev, 0xa42, RTL821x_INER, val); } static int rtl8211f_config_init(struct phy_device *phydev) { int ret; - u16 reg; + u16 val = 0; ret = genphy_config_init(phydev); if (ret < 0) return ret; - phy_write(phydev, RTL821x_PAGE_SELECT, 0xd08); - reg = phy_read(phydev, 0x11); - /* enable TX-delay for rgmii-id and rgmii-txid, otherwise disable it */ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) - reg |= RTL8211F_TX_DELAY; - else - reg &= ~RTL8211F_TX_DELAY; - - phy_write(phydev, 0x11, reg); - /* restore to default page 0 */ - phy_write(phydev, RTL821x_PAGE_SELECT, 0x0); + val = RTL8211F_TX_DELAY; - return 0; + return phy_modify_paged(phydev, 0xd08, 0x11, RTL8211F_TX_DELAY, val); } static struct phy_driver realtek_drvs[] = { @@ -159,28 +152,24 @@ static struct phy_driver realtek_drvs[] = { .phy_id_mask = 0x0000ffff, .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_INTERRUPT, - .config_aneg = &genphy_config_aneg, - .read_status = &genphy_read_status, }, { .phy_id = 0x001cc816, .name = "RTL8201F 10/100Mbps Ethernet", .phy_id_mask = 0x001fffff, .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_INTERRUPT, - .config_aneg = &genphy_config_aneg, - .read_status = &genphy_read_status, .ack_interrupt = &rtl8201_ack_interrupt, .config_intr = &rtl8201_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, + .read_page = rtl821x_read_page, + .write_page = rtl821x_write_page, }, { .phy_id = 0x001cc912, .name = "RTL8211B Gigabit Ethernet", .phy_id_mask = 0x001fffff, .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, - .config_aneg = &genphy_config_aneg, - .read_status = &genphy_read_status, .ack_interrupt = &rtl821x_ack_interrupt, .config_intr = &rtl8211b_config_intr, }, { @@ -189,8 +178,6 @@ static struct phy_driver realtek_drvs[] = { .phy_id_mask = 0x001fffff, .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = rtl821x_ack_interrupt, .config_intr = rtl8211e_config_intr, .suspend = genphy_suspend, @@ -201,8 +188,6 @@ static struct phy_driver realtek_drvs[] = { .phy_id_mask = 0x001fffff, .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, - .config_aneg = &genphy_config_aneg, - .read_status = &genphy_read_status, .ack_interrupt = &rtl821x_ack_interrupt, .config_intr = &rtl8211e_config_intr, .suspend = genphy_suspend, @@ -213,13 +198,13 @@ static struct phy_driver realtek_drvs[] = { .phy_id_mask = 0x001fffff, .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, - .config_aneg = &genphy_config_aneg, .config_init = &rtl8211f_config_init, - .read_status = &genphy_read_status, .ack_interrupt = &rtl8211f_ack_interrupt, .config_intr = &rtl8211f_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, + .read_page = rtl821x_read_page, + .write_page = rtl821x_write_page, }, }; diff --git a/drivers/net/phy/rockchip.c b/drivers/net/phy/rockchip.c index c092af137056..f1da70b9b55f 100644 --- a/drivers/net/phy/rockchip.c +++ b/drivers/net/phy/rockchip.c @@ -213,7 +213,6 @@ static struct phy_driver rockchip_phy_driver[] = { .soft_reset = genphy_soft_reset, .config_init = rockchip_integrated_phy_config_init, .config_aneg = rockchip_config_aneg, - .read_status = genphy_read_status, .suspend = genphy_suspend, .resume = rockchip_phy_resume, }, diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index ab64a142b832..8961209ee949 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -8,10 +8,14 @@ #include "sfp.h" +/** + * struct sfp_bus - internal representation of a sfp bus + */ struct sfp_bus { + /* private: */ struct kref kref; struct list_head node; - struct device_node *device_node; + struct fwnode_handle *fwnode; const struct sfp_socket_ops *socket_ops; struct device *sfp_dev; @@ -26,6 +30,20 @@ struct sfp_bus { bool started; }; +/** + * sfp_parse_port() - Parse the EEPROM base ID, setting the port type + * @bus: a pointer to the &struct sfp_bus structure for the sfp module + * @id: a pointer to the module's &struct sfp_eeprom_id + * @support: optional pointer to an array of unsigned long for the + * ethtool support mask + * + * Parse the EEPROM identification given in @id, and return one of + * %PORT_TP, %PORT_FIBRE or %PORT_OTHER. If @support is non-%NULL, + * also set the ethtool %ETHTOOL_LINK_MODE_xxx_BIT corresponding with + * the connector type. + * + * If the port type is not known, returns %PORT_OTHER. + */ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id, unsigned long *support) { @@ -39,21 +57,19 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id, case SFP_CONNECTOR_MT_RJ: case SFP_CONNECTOR_MU: case SFP_CONNECTOR_OPTICAL_PIGTAIL: - if (support) - phylink_set(support, FIBRE); port = PORT_FIBRE; break; case SFP_CONNECTOR_RJ45: - if (support) - phylink_set(support, TP); port = PORT_TP; break; + case SFP_CONNECTOR_COPPER_PIGTAIL: + port = PORT_DA; + break; + case SFP_CONNECTOR_UNSPEC: if (id->base.e1000_base_t) { - if (support) - phylink_set(support, TP); port = PORT_TP; break; } @@ -62,7 +78,6 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id, case SFP_CONNECTOR_MPO_1X12: case SFP_CONNECTOR_MPO_2X16: case SFP_CONNECTOR_HSSDC_II: - case SFP_CONNECTOR_COPPER_PIGTAIL: case SFP_CONNECTOR_NOSEPARATE: case SFP_CONNECTOR_MXC_2X16: port = PORT_OTHER; @@ -74,10 +89,40 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id, break; } + if (support) { + switch (port) { + case PORT_FIBRE: + phylink_set(support, FIBRE); + break; + + case PORT_TP: + phylink_set(support, TP); + break; + } + } + return port; } EXPORT_SYMBOL_GPL(sfp_parse_port); +/** + * sfp_parse_interface() - Parse the phy_interface_t + * @bus: a pointer to the &struct sfp_bus structure for the sfp module + * @id: a pointer to the module's &struct sfp_eeprom_id + * + * Derive the phy_interface_t mode for the information found in the + * module's identifying EEPROM. There is no standard or defined way + * to derive this information, so we use some heuristics. + * + * If the encoding is 64b66b, then the module must be >= 10G, so + * return %PHY_INTERFACE_MODE_10GKR. + * + * If it's 8b10b, then it's 1G or slower. If it's definitely a fibre + * module, return %PHY_INTERFACE_MODE_1000BASEX mode, otherwise return + * %PHY_INTERFACE_MODE_SGMII mode. + * + * If the encoding is not known, return %PHY_INTERFACE_MODE_NA. + */ phy_interface_t sfp_parse_interface(struct sfp_bus *bus, const struct sfp_eeprom_id *id) { @@ -107,6 +152,11 @@ phy_interface_t sfp_parse_interface(struct sfp_bus *bus, break; default: + if (id->base.e1000_base_cx) { + iface = PHY_INTERFACE_MODE_1000BASEX; + break; + } + iface = PHY_INTERFACE_MODE_NA; dev_err(bus->sfp_dev, "SFP module encoding does not support 8b10b nor 64b66b\n"); @@ -117,13 +167,38 @@ phy_interface_t sfp_parse_interface(struct sfp_bus *bus, } EXPORT_SYMBOL_GPL(sfp_parse_interface); +/** + * sfp_parse_support() - Parse the eeprom id for supported link modes + * @bus: a pointer to the &struct sfp_bus structure for the sfp module + * @id: a pointer to the module's &struct sfp_eeprom_id + * @support: pointer to an array of unsigned long for the ethtool support mask + * + * Parse the EEPROM identification information and derive the supported + * ethtool link modes for the module. + */ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, unsigned long *support) { + unsigned int br_min, br_nom, br_max; + phylink_set(support, Autoneg); phylink_set(support, Pause); phylink_set(support, Asym_Pause); + /* Decode the bitrate information to MBd */ + br_min = br_nom = br_max = 0; + if (id->base.br_nominal) { + if (id->base.br_nominal != 255) { + br_nom = id->base.br_nominal * 100; + br_min = br_nom + id->base.br_nominal * id->ext.br_min; + br_max = br_nom + id->base.br_nominal * id->ext.br_max; + } else if (id->ext.br_max) { + br_nom = 250 * id->ext.br_max; + br_max = br_nom + br_nom * id->ext.br_min / 100; + br_min = br_nom - br_nom * id->ext.br_min / 100; + } + } + /* Set ethtool support from the compliance fields. */ if (id->base.e10g_base_sr) phylink_set(support, 10000baseSR_Full); @@ -142,6 +217,34 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, phylink_set(support, 1000baseT_Full); } + /* 1000Base-PX or 1000Base-BX10 */ + if ((id->base.e_base_px || id->base.e_base_bx10) && + br_min <= 1300 && br_max >= 1200) + phylink_set(support, 1000baseX_Full); + + /* For active or passive cables, select the link modes + * based on the bit rates and the cable compliance bytes. + */ + if ((id->base.sfp_ct_passive || id->base.sfp_ct_active) && br_nom) { + /* This may look odd, but some manufacturers use 12000MBd */ + if (br_min <= 12000 && br_max >= 10300) + phylink_set(support, 10000baseCR_Full); + if (br_min <= 3200 && br_max >= 3100) + phylink_set(support, 2500baseX_Full); + if (br_min <= 1300 && br_max >= 1200) + phylink_set(support, 1000baseX_Full); + } + if (id->base.sfp_ct_passive) { + if (id->base.passive.sff8431_app_e) + phylink_set(support, 10000baseCR_Full); + } + if (id->base.sfp_ct_active) { + if (id->base.active.sff8431_app_e || + id->base.active.sff8431_lim) { + phylink_set(support, 10000baseCR_Full); + } + } + switch (id->base.extended_cc) { case 0x00: /* Unspecified */ break; @@ -175,35 +278,6 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, if (id->base.br_nominal >= 12) phylink_set(support, 1000baseX_Full); } - - switch (id->base.connector) { - case SFP_CONNECTOR_SC: - case SFP_CONNECTOR_FIBERJACK: - case SFP_CONNECTOR_LC: - case SFP_CONNECTOR_MT_RJ: - case SFP_CONNECTOR_MU: - case SFP_CONNECTOR_OPTICAL_PIGTAIL: - break; - - case SFP_CONNECTOR_UNSPEC: - if (id->base.e1000_base_t) - break; - - case SFP_CONNECTOR_SG: /* guess */ - case SFP_CONNECTOR_MPO_1X12: - case SFP_CONNECTOR_MPO_2X16: - case SFP_CONNECTOR_HSSDC_II: - case SFP_CONNECTOR_COPPER_PIGTAIL: - case SFP_CONNECTOR_NOSEPARATE: - case SFP_CONNECTOR_MXC_2X16: - default: - /* a guess at the supported link modes */ - dev_warn(bus->sfp_dev, - "Guessing link modes, please report...\n"); - phylink_set(support, 1000baseT_Half); - phylink_set(support, 1000baseT_Full); - break; - } } EXPORT_SYMBOL_GPL(sfp_parse_support); @@ -215,7 +289,7 @@ static const struct sfp_upstream_ops *sfp_get_upstream_ops(struct sfp_bus *bus) return bus->registered ? bus->upstream_ops : NULL; } -static struct sfp_bus *sfp_bus_get(struct device_node *np) +static struct sfp_bus *sfp_bus_get(struct fwnode_handle *fwnode) { struct sfp_bus *sfp, *new, *found = NULL; @@ -224,7 +298,7 @@ static struct sfp_bus *sfp_bus_get(struct device_node *np) mutex_lock(&sfp_mutex); list_for_each_entry(sfp, &sfp_buses, node) { - if (sfp->device_node == np) { + if (sfp->fwnode == fwnode) { kref_get(&sfp->kref); found = sfp; break; @@ -233,7 +307,7 @@ static struct sfp_bus *sfp_bus_get(struct device_node *np) if (!found && new) { kref_init(&new->kref); - new->device_node = np; + new->fwnode = fwnode; list_add(&new->node, &sfp_buses); found = new; new = NULL; @@ -246,7 +320,7 @@ static struct sfp_bus *sfp_bus_get(struct device_node *np) return found; } -static void sfp_bus_release(struct kref *kref) __releases(sfp_mutex) +static void sfp_bus_release(struct kref *kref) { struct sfp_bus *bus = container_of(kref, struct sfp_bus, kref); @@ -293,6 +367,16 @@ static void sfp_unregister_bus(struct sfp_bus *bus) bus->registered = false; } +/** + * sfp_get_module_info() - Get the ethtool_modinfo for a SFP module + * @bus: a pointer to the &struct sfp_bus structure for the sfp module + * @modinfo: a &struct ethtool_modinfo + * + * Fill in the type and eeprom_len parameters in @modinfo for a module on + * the sfp bus specified by @bus. + * + * Returns 0 on success or a negative errno number. + */ int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo) { if (!bus->registered) @@ -301,6 +385,17 @@ int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo) } EXPORT_SYMBOL_GPL(sfp_get_module_info); +/** + * sfp_get_module_eeprom() - Read the SFP module EEPROM + * @bus: a pointer to the &struct sfp_bus structure for the sfp module + * @ee: a &struct ethtool_eeprom + * @data: buffer to contain the EEPROM data (must be at least @ee->len bytes) + * + * Read the EEPROM as specified by the supplied @ee. See the documentation + * for &struct ethtool_eeprom for the region to be read. + * + * Returns 0 on success or a negative errno number. + */ int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee, u8 *data) { @@ -310,6 +405,15 @@ int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee, } EXPORT_SYMBOL_GPL(sfp_get_module_eeprom); +/** + * sfp_upstream_start() - Inform the SFP that the network device is up + * @bus: a pointer to the &struct sfp_bus structure for the sfp module + * + * Inform the SFP socket that the network device is now up, so that the + * module can be enabled by allowing TX_DISABLE to be deasserted. This + * should be called from the network device driver's &struct net_device_ops + * ndo_open() method. + */ void sfp_upstream_start(struct sfp_bus *bus) { if (bus->registered) @@ -318,6 +422,15 @@ void sfp_upstream_start(struct sfp_bus *bus) } EXPORT_SYMBOL_GPL(sfp_upstream_start); +/** + * sfp_upstream_stop() - Inform the SFP that the network device is down + * @bus: a pointer to the &struct sfp_bus structure for the sfp module + * + * Inform the SFP socket that the network device is now up, so that the + * module can be disabled by asserting TX_DISABLE, disabling the laser + * in optical modules. This should be called from the network device + * driver's &struct net_device_ops ndo_stop() method. + */ void sfp_upstream_stop(struct sfp_bus *bus) { if (bus->registered) @@ -326,11 +439,24 @@ void sfp_upstream_stop(struct sfp_bus *bus) } EXPORT_SYMBOL_GPL(sfp_upstream_stop); -struct sfp_bus *sfp_register_upstream(struct device_node *np, +/** + * sfp_register_upstream() - Register the neighbouring device + * @fwnode: firmware node for the SFP bus + * @ndev: network device associated with the interface + * @upstream: the upstream private data + * @ops: the upstream's &struct sfp_upstream_ops + * + * Register the upstream device (eg, PHY) with the SFP bus. MAC drivers + * should use phylink, which will call this function for them. Returns + * a pointer to the allocated &struct sfp_bus. + * + * On error, returns %NULL. + */ +struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode, struct net_device *ndev, void *upstream, const struct sfp_upstream_ops *ops) { - struct sfp_bus *bus = sfp_bus_get(np); + struct sfp_bus *bus = sfp_bus_get(fwnode); int ret = 0; if (bus) { @@ -353,6 +479,13 @@ struct sfp_bus *sfp_register_upstream(struct device_node *np, } EXPORT_SYMBOL_GPL(sfp_register_upstream); +/** + * sfp_unregister_upstream() - Unregister sfp bus + * @bus: a pointer to the &struct sfp_bus structure for the sfp module + * + * Unregister a previously registered upstream connection for the SFP + * module. @bus is returned from sfp_register_upstream(). + */ void sfp_unregister_upstream(struct sfp_bus *bus) { rtnl_lock(); @@ -434,7 +567,7 @@ EXPORT_SYMBOL_GPL(sfp_module_remove); struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp, const struct sfp_socket_ops *ops) { - struct sfp_bus *bus = sfp_bus_get(dev->of_node); + struct sfp_bus *bus = sfp_bus_get(dev->fwnode); int ret = 0; if (bus) { diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 9dfc1c4c954f..6c7d9289078d 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -98,12 +98,18 @@ static const enum gpiod_flags gpio_flags[] = { static DEFINE_MUTEX(sfp_mutex); +struct sff_data { + unsigned int gpios; + bool (*module_supported)(const struct sfp_eeprom_id *id); +}; + struct sfp { struct device *dev; struct i2c_adapter *i2c; struct mii_bus *i2c_mii; struct sfp_bus *sfp_bus; struct phy_device *mod_phy; + const struct sff_data *type; unsigned int (*get_state)(struct sfp *); void (*set_state)(struct sfp *, unsigned int); @@ -123,6 +129,36 @@ struct sfp { struct sfp_eeprom_id id; }; +static bool sff_module_supported(const struct sfp_eeprom_id *id) +{ + return id->base.phys_id == SFP_PHYS_ID_SFF && + id->base.phys_ext_id == SFP_PHYS_EXT_ID_SFP; +} + +static const struct sff_data sff_data = { + .gpios = SFP_F_LOS | SFP_F_TX_FAULT | SFP_F_TX_DISABLE, + .module_supported = sff_module_supported, +}; + +static bool sfp_module_supported(const struct sfp_eeprom_id *id) +{ + return id->base.phys_id == SFP_PHYS_ID_SFP && + id->base.phys_ext_id == SFP_PHYS_EXT_ID_SFP; +} + +static const struct sff_data sfp_data = { + .gpios = SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT | + SFP_F_TX_DISABLE | SFP_F_RATE_SELECT, + .module_supported = sfp_module_supported, +}; + +static const struct of_device_id sfp_of_match[] = { + { .compatible = "sff,sff", .data = &sff_data, }, + { .compatible = "sff,sfp", .data = &sfp_data, }, + { }, +}; +MODULE_DEVICE_TABLE(of, sfp_of_match); + static unsigned long poll_jiffies; static unsigned int sfp_gpio_get_state(struct sfp *sfp) @@ -141,6 +177,11 @@ static unsigned int sfp_gpio_get_state(struct sfp *sfp) return state; } +static unsigned int sff_gpio_get_state(struct sfp *sfp) +{ + return sfp_gpio_get_state(sfp) | SFP_F_PRESENT; +} + static void sfp_gpio_set_state(struct sfp *sfp, unsigned int state) { if (state & SFP_F_PRESENT) { @@ -315,12 +356,12 @@ static void sfp_sm_probe_phy(struct sfp *sfp) msleep(T_PHY_RESET_MS); phy = mdiobus_scan(sfp->i2c_mii, SFP_PHY_ADDR); - if (IS_ERR(phy)) { - dev_err(sfp->dev, "mdiobus scan returned %ld\n", PTR_ERR(phy)); + if (phy == ERR_PTR(-ENODEV)) { + dev_info(sfp->dev, "no PHY detected\n"); return; } - if (!phy) { - dev_info(sfp->dev, "no PHY detected\n"); + if (IS_ERR(phy)) { + dev_err(sfp->dev, "mdiobus scan returned %ld\n", PTR_ERR(phy)); return; } @@ -425,11 +466,6 @@ static int sfp_sm_mod_probe(struct sfp *sfp) { /* SFP module inserted - read I2C data */ struct sfp_eeprom_id id; - char vendor[17]; - char part[17]; - char sn[17]; - char date[9]; - char rev[5]; u8 check; int err; @@ -465,24 +501,17 @@ static int sfp_sm_mod_probe(struct sfp *sfp) sfp->id = id; - memcpy(vendor, sfp->id.base.vendor_name, 16); - vendor[16] = '\0'; - memcpy(part, sfp->id.base.vendor_pn, 16); - part[16] = '\0'; - memcpy(rev, sfp->id.base.vendor_rev, 4); - rev[4] = '\0'; - memcpy(sn, sfp->id.ext.vendor_sn, 16); - sn[16] = '\0'; - memcpy(date, sfp->id.ext.datecode, 8); - date[8] = '\0'; - - dev_info(sfp->dev, "module %s %s rev %s sn %s dc %s\n", - vendor, part, rev, sn, date); - - /* We only support SFP modules, not the legacy GBIC modules. */ - if (sfp->id.base.phys_id != SFP_PHYS_ID_SFP || - sfp->id.base.phys_ext_id != SFP_PHYS_EXT_ID_SFP) { - dev_err(sfp->dev, "module is not SFP - phys id 0x%02x 0x%02x\n", + dev_info(sfp->dev, "module %.*s %.*s rev %.*s sn %.*s dc %.*s\n", + (int)sizeof(id.base.vendor_name), id.base.vendor_name, + (int)sizeof(id.base.vendor_pn), id.base.vendor_pn, + (int)sizeof(id.base.vendor_rev), id.base.vendor_rev, + (int)sizeof(id.ext.vendor_sn), id.ext.vendor_sn, + (int)sizeof(id.ext.datecode), id.ext.datecode); + + /* Check whether we support this module */ + if (!sfp->type->module_supported(&sfp->id)) { + dev_err(sfp->dev, + "module is not supported - phys id 0x%02x 0x%02x\n", sfp->id.base.phys_id, sfp->id.base.phys_ext_id); return -EINVAL; } @@ -683,20 +712,19 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee, len = min_t(unsigned int, last, ETH_MODULE_SFF_8079_LEN); len -= first; - ret = sfp->read(sfp, false, first, data, len); + ret = sfp_read(sfp, false, first, data, len); if (ret < 0) return ret; first += len; data += len; } - if (first >= ETH_MODULE_SFF_8079_LEN && - first < ETH_MODULE_SFF_8472_LEN) { + if (first < ETH_MODULE_SFF_8472_LEN && last > ETH_MODULE_SFF_8079_LEN) { len = min_t(unsigned int, last, ETH_MODULE_SFF_8472_LEN); len -= first; first -= ETH_MODULE_SFF_8079_LEN; - ret = sfp->read(sfp, true, first, data, len); + ret = sfp_read(sfp, true, first, data, len); if (ret < 0) return ret; } @@ -801,6 +829,7 @@ static void sfp_cleanup(void *data) static int sfp_probe(struct platform_device *pdev) { + const struct sff_data *sff; struct sfp *sfp; bool poll = false; int irq, err, i; @@ -815,10 +844,19 @@ static int sfp_probe(struct platform_device *pdev) if (err < 0) return err; + sff = sfp->type = &sfp_data; + if (pdev->dev.of_node) { struct device_node *node = pdev->dev.of_node; + const struct of_device_id *id; struct device_node *np; + id = of_match_node(sfp_of_match, node); + if (WARN_ON(!id)) + return -EINVAL; + + sff = sfp->type = id->data; + np = of_parse_phandle(node, "i2c-bus", 0); if (np) { struct i2c_adapter *i2c; @@ -834,17 +872,22 @@ static int sfp_probe(struct platform_device *pdev) return err; } } + } - for (i = 0; i < GPIO_MAX; i++) { + for (i = 0; i < GPIO_MAX; i++) + if (sff->gpios & BIT(i)) { sfp->gpio[i] = devm_gpiod_get_optional(sfp->dev, gpio_of_names[i], gpio_flags[i]); if (IS_ERR(sfp->gpio[i])) return PTR_ERR(sfp->gpio[i]); } - sfp->get_state = sfp_gpio_get_state; - sfp->set_state = sfp_gpio_set_state; - } + sfp->get_state = sfp_gpio_get_state; + sfp->set_state = sfp_gpio_set_state; + + /* Modules that have no detect signal are always present */ + if (!(sfp->gpio[GPIO_MODDEF0])) + sfp->get_state = sff_gpio_get_state; sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops); if (!sfp->sfp_bus) @@ -899,12 +942,6 @@ static int sfp_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id sfp_of_match[] = { - { .compatible = "sff,sfp", }, - { }, -}; -MODULE_DEVICE_TABLE(of, sfp_of_match); - static struct platform_driver sfp_driver = { .probe = sfp_probe, .remove = sfp_remove, diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index 2306bfae057f..be399d645224 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -227,8 +227,6 @@ static struct phy_driver smsc_phy_driver[] = { .probe = smsc_phy_probe, /* basic functions */ - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .config_init = smsc_phy_config_init, .soft_reset = smsc_phy_reset, @@ -249,8 +247,6 @@ static struct phy_driver smsc_phy_driver[] = { .probe = smsc_phy_probe, /* basic functions */ - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .config_init = smsc_phy_config_init, .soft_reset = smsc_phy_reset, @@ -276,7 +272,6 @@ static struct phy_driver smsc_phy_driver[] = { .probe = smsc_phy_probe, /* basic functions */ - .config_aneg = genphy_config_aneg, .read_status = lan87xx_read_status, .config_init = smsc_phy_config_init, .soft_reset = smsc_phy_reset, @@ -303,8 +298,6 @@ static struct phy_driver smsc_phy_driver[] = { .probe = smsc_phy_probe, /* basic functions */ - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .config_init = lan911x_config_init, /* IRQ related */ @@ -319,12 +312,11 @@ static struct phy_driver smsc_phy_driver[] = { .name = "SMSC LAN8710/LAN8720", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, + .flags = PHY_HAS_INTERRUPT | PHY_RST_AFTER_CLK_EN, .probe = smsc_phy_probe, /* basic functions */ - .config_aneg = genphy_config_aneg, .read_status = lan87xx_read_status, .config_init = smsc_phy_config_init, .soft_reset = smsc_phy_reset, @@ -351,7 +343,6 @@ static struct phy_driver smsc_phy_driver[] = { .probe = smsc_phy_probe, /* basic functions */ - .config_aneg = genphy_config_aneg, .read_status = lan87xx_read_status, .config_init = smsc_phy_config_init, .soft_reset = smsc_phy_reset, diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c index d00cfb64529e..fbd548a1ad84 100644 --- a/drivers/net/phy/ste10Xp.c +++ b/drivers/net/phy/ste10Xp.c @@ -89,8 +89,6 @@ static struct phy_driver ste10xp_pdriver[] = { .features = PHY_BASIC_FEATURES | SUPPORTED_Pause, .flags = PHY_HAS_INTERRUPT, .config_init = ste10Xp_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = ste10Xp_ack_interrupt, .config_intr = ste10Xp_config_intr, .suspend = genphy_suspend, @@ -102,8 +100,6 @@ static struct phy_driver ste10xp_pdriver[] = { .features = PHY_BASIC_FEATURES | SUPPORTED_Pause, .flags = PHY_HAS_INTERRUPT, .config_init = ste10Xp_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, .ack_interrupt = ste10Xp_ack_interrupt, .config_intr = ste10Xp_config_intr, .suspend = genphy_suspend, diff --git a/drivers/net/phy/uPD60620.c b/drivers/net/phy/uPD60620.c index 96b33475ea5e..55f48ee3595a 100644 --- a/drivers/net/phy/uPD60620.c +++ b/drivers/net/phy/uPD60620.c @@ -95,7 +95,6 @@ static struct phy_driver upd60620_driver[1] = { { .features = PHY_BASIC_FEATURES, .flags = 0, .config_init = upd60620_config_init, - .config_aneg = genphy_config_aneg, .read_status = upd60620_read_status, } }; diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index f78ff0279648..d9dd8fbfffc7 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c @@ -267,7 +267,6 @@ static struct phy_driver vsc82xx_driver[] = { .flags = PHY_HAS_INTERRUPT, .config_init = &vsc824x_config_init, .config_aneg = &vsc82x4_config_aneg, - .read_status = &genphy_read_status, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, }, { @@ -278,7 +277,6 @@ static struct phy_driver vsc82xx_driver[] = { .flags = PHY_HAS_INTERRUPT, .config_init = &vsc824x_config_init, .config_aneg = &vsc82x4_config_aneg, - .read_status = &genphy_read_status, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, }, { @@ -289,7 +287,6 @@ static struct phy_driver vsc82xx_driver[] = { .flags = PHY_HAS_INTERRUPT, .config_init = &vsc824x_config_init, .config_aneg = &vsc82x4_config_aneg, - .read_status = &genphy_read_status, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, }, { @@ -300,7 +297,6 @@ static struct phy_driver vsc82xx_driver[] = { .flags = PHY_HAS_INTERRUPT, .config_init = &vsc824x_config_init, .config_aneg = &vsc82x4_config_aneg, - .read_status = &genphy_read_status, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, }, { @@ -311,7 +307,6 @@ static struct phy_driver vsc82xx_driver[] = { .flags = PHY_HAS_INTERRUPT, .config_init = &vsc824x_config_init, .config_aneg = &vsc82x4_config_aneg, - .read_status = &genphy_read_status, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, }, { @@ -321,8 +316,6 @@ static struct phy_driver vsc82xx_driver[] = { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = &vsc8601_config_init, - .config_aneg = &genphy_config_aneg, - .read_status = &genphy_read_status, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, }, { @@ -333,7 +326,6 @@ static struct phy_driver vsc82xx_driver[] = { .flags = PHY_HAS_INTERRUPT, .config_init = &vsc824x_config_init, .config_aneg = &vsc82x4_config_aneg, - .read_status = &genphy_read_status, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, }, { @@ -344,8 +336,6 @@ static struct phy_driver vsc82xx_driver[] = { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = &vsc8221_config_init, - .config_aneg = &genphy_config_aneg, - .read_status = &genphy_read_status, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, }, { @@ -356,8 +346,6 @@ static struct phy_driver vsc82xx_driver[] = { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = &vsc8221_config_init, - .config_aneg = &genphy_config_aneg, - .read_status = &genphy_read_status, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, } }; diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index cc63102ca96e..8940417c30e5 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c @@ -731,7 +731,7 @@ static void sl_sync(void) /* Find a free SLIP channel, and link in this `tty' line. */ -static struct slip *sl_alloc(dev_t line) +static struct slip *sl_alloc(void) { int i; char name[IFNAMSIZ]; @@ -809,7 +809,7 @@ static int slip_open(struct tty_struct *tty) /* OK. Find a free SLIP channel to use. */ err = -ENFILE; - sl = sl_alloc(tty_devnum(tty)); + sl = sl_alloc(); if (sl == NULL) goto err_exit; diff --git a/drivers/net/tap.c b/drivers/net/tap.c index f39c6f876e67..0a5ed004781c 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -330,9 +330,6 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb) if (!q) return RX_HANDLER_PASS; - if (__skb_array_full(&q->skb_array)) - goto drop; - skb_push(skb, ETH_HLEN); /* Apply the forward feature mask so that we perform segmentation @@ -348,7 +345,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb) goto drop; if (!segs) { - if (skb_array_produce(&q->skb_array, skb)) + if (ptr_ring_produce(&q->ring, skb)) goto drop; goto wake_up; } @@ -358,7 +355,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb) struct sk_buff *nskb = segs->next; segs->next = NULL; - if (skb_array_produce(&q->skb_array, segs)) { + if (ptr_ring_produce(&q->ring, segs)) { kfree_skb(segs); kfree_skb_list(nskb); break; @@ -375,7 +372,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb) !(features & NETIF_F_CSUM_MASK) && skb_checksum_help(skb)) goto drop; - if (skb_array_produce(&q->skb_array, skb)) + if (ptr_ring_produce(&q->ring, skb)) goto drop; } @@ -497,7 +494,7 @@ static void tap_sock_destruct(struct sock *sk) { struct tap_queue *q = container_of(sk, struct tap_queue, sk); - skb_array_cleanup(&q->skb_array); + ptr_ring_cleanup(&q->ring, __skb_array_destroy_skb); } static int tap_open(struct inode *inode, struct file *file) @@ -517,7 +514,7 @@ static int tap_open(struct inode *inode, struct file *file) &tap_proto, 0); if (!q) goto err; - if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL)) { + if (ptr_ring_init(&q->ring, tap->dev->tx_queue_len, GFP_KERNEL)) { sk_free(&q->sk); goto err; } @@ -546,7 +543,7 @@ static int tap_open(struct inode *inode, struct file *file) err = tap_set_queue(tap, file, q); if (err) { - /* tap_sock_destruct() will take care of freeing skb_array */ + /* tap_sock_destruct() will take care of freeing ptr_ring */ goto err_put; } @@ -583,7 +580,7 @@ static __poll_t tap_poll(struct file *file, poll_table *wait) mask = 0; poll_wait(file, &q->wq.wait, wait); - if (!skb_array_empty(&q->skb_array)) + if (!ptr_ring_empty(&q->ring)) mask |= POLLIN | POLLRDNORM; if (sock_writeable(&q->sk) || @@ -844,7 +841,7 @@ static ssize_t tap_do_read(struct tap_queue *q, TASK_INTERRUPTIBLE); /* Read frames from the queue */ - skb = skb_array_consume(&q->skb_array); + skb = ptr_ring_consume(&q->ring); if (skb) break; if (noblock) { @@ -1176,7 +1173,7 @@ static int tap_peek_len(struct socket *sock) { struct tap_queue *q = container_of(sock, struct tap_queue, sock); - return skb_array_peek_len(&q->skb_array); + return PTR_RING_PEEK_CALL(&q->ring, __skb_array_len_with_tag); } /* Ops structure to mimic raw sockets with tun */ @@ -1202,7 +1199,7 @@ struct socket *tap_get_socket(struct file *file) } EXPORT_SYMBOL_GPL(tap_get_socket); -struct skb_array *tap_get_skb_array(struct file *file) +struct ptr_ring *tap_get_ptr_ring(struct file *file) { struct tap_queue *q; @@ -1211,29 +1208,30 @@ struct skb_array *tap_get_skb_array(struct file *file) q = file->private_data; if (!q) return ERR_PTR(-EBADFD); - return &q->skb_array; + return &q->ring; } -EXPORT_SYMBOL_GPL(tap_get_skb_array); +EXPORT_SYMBOL_GPL(tap_get_ptr_ring); int tap_queue_resize(struct tap_dev *tap) { struct net_device *dev = tap->dev; struct tap_queue *q; - struct skb_array **arrays; + struct ptr_ring **rings; int n = tap->numqueues; int ret, i = 0; - arrays = kmalloc_array(n, sizeof(*arrays), GFP_KERNEL); - if (!arrays) + rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); + if (!rings) return -ENOMEM; list_for_each_entry(q, &tap->queue_list, next) - arrays[i++] = &q->skb_array; + rings[i++] = &q->ring; - ret = skb_array_resize_multiple(arrays, n, - dev->tx_queue_len, GFP_KERNEL); + ret = ptr_ring_resize_multiple(rings, n, + dev->tx_queue_len, GFP_KERNEL, + __skb_array_destroy_skb); - kfree(arrays); + kfree(rings); return ret; } EXPORT_SYMBOL_GPL(tap_queue_resize); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 7548d8a11bdf..0dc66e4fbb2c 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -179,7 +179,8 @@ struct tun_file { struct mutex napi_mutex; /* Protects access to the above napi */ struct list_head next; struct tun_struct *detached; - struct skb_array tx_array; + struct ptr_ring tx_ring; + struct xdp_rxq_info xdp_rxq; }; struct tun_flow_entry { @@ -195,6 +196,11 @@ struct tun_flow_entry { #define TUN_NUM_FLOW_ENTRIES 1024 +struct tun_prog { + struct rcu_head rcu; + struct bpf_prog *prog; +}; + /* Since the socket were moved to tun_file, to preserve the behavior of persist * device, socket filter, sndbuf and vnet header size were restore when the * file were attached to a persist device. @@ -232,8 +238,33 @@ struct tun_struct { u32 rx_batched; struct tun_pcpu_stats __percpu *pcpu_stats; struct bpf_prog __rcu *xdp_prog; + struct tun_prog __rcu *steering_prog; + struct tun_prog __rcu *filter_prog; }; +struct veth { + __be16 h_vlan_proto; + __be16 h_vlan_TCI; +}; + +bool tun_is_xdp_buff(void *ptr) +{ + return (unsigned long)ptr & TUN_XDP_FLAG; +} +EXPORT_SYMBOL(tun_is_xdp_buff); + +void *tun_xdp_to_ptr(void *ptr) +{ + return (void *)((unsigned long)ptr | TUN_XDP_FLAG); +} +EXPORT_SYMBOL(tun_xdp_to_ptr); + +void *tun_ptr_to_xdp(void *ptr) +{ + return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG); +} +EXPORT_SYMBOL(tun_ptr_to_xdp); + static int tun_napi_receive(struct napi_struct *napi, int budget) { struct tun_file *tfile = container_of(napi, struct tun_file, napi); @@ -537,15 +568,12 @@ static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash) * different rxq no. here. If we could not get rxhash, then we would * hope the rxq no. may help here. */ -static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) +static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb) { - struct tun_struct *tun = netdev_priv(dev); struct tun_flow_entry *e; u32 txq = 0; u32 numqueues = 0; - rcu_read_lock(); numqueues = READ_ONCE(tun->numqueues); txq = __skb_get_hash_symmetric(skb); @@ -563,10 +591,37 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, txq -= numqueues; } - rcu_read_unlock(); return txq; } +static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb) +{ + struct tun_prog *prog; + u16 ret = 0; + + prog = rcu_dereference(tun->steering_prog); + if (prog) + ret = bpf_prog_run_clear_cb(prog->prog, skb); + + return ret % tun->numqueues; +} + +static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +{ + struct tun_struct *tun = netdev_priv(dev); + u16 ret; + + rcu_read_lock(); + if (rcu_dereference(tun->steering_prog)) + ret = tun_ebpf_select_queue(tun, skb); + else + ret = tun_automq_select_queue(tun, skb); + rcu_read_unlock(); + + return ret; +} + static inline bool tun_not_capable(struct tun_struct *tun) { const struct cred *cred = current_cred(); @@ -600,22 +655,36 @@ static struct tun_struct *tun_enable_queue(struct tun_file *tfile) return tun; } +static void tun_ptr_free(void *ptr) +{ + if (!ptr) + return; + if (tun_is_xdp_buff(ptr)) { + struct xdp_buff *xdp = tun_ptr_to_xdp(ptr); + + put_page(virt_to_head_page(xdp->data)); + } else { + __skb_array_destroy_skb(ptr); + } +} + static void tun_queue_purge(struct tun_file *tfile) { - struct sk_buff *skb; + void *ptr; - while ((skb = skb_array_consume(&tfile->tx_array)) != NULL) - kfree_skb(skb); + while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL) + tun_ptr_free(ptr); skb_queue_purge(&tfile->sk.sk_write_queue); skb_queue_purge(&tfile->sk.sk_error_queue); } -static void tun_cleanup_tx_array(struct tun_file *tfile) +static void tun_cleanup_tx_ring(struct tun_file *tfile) { - if (tfile->tx_array.ring.queue) { - skb_array_cleanup(&tfile->tx_array); - memset(&tfile->tx_array, 0, sizeof(tfile->tx_array)); + if (tfile->tx_ring.queue) { + ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free); + xdp_rxq_info_unreg(&tfile->xdp_rxq); + memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring)); } } @@ -665,7 +734,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean) tun->dev->reg_state == NETREG_REGISTERED) unregister_netdevice(tun->dev); } - tun_cleanup_tx_array(tfile); + tun_cleanup_tx_ring(tfile); sock_put(&tfile->sk); } } @@ -680,7 +749,6 @@ static void tun_detach(struct tun_file *tfile, bool clean) static void tun_detach_all(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); - struct bpf_prog *xdp_prog = rtnl_dereference(tun->xdp_prog); struct tun_file *tfile, *tmp; int i, n = tun->numqueues; @@ -707,19 +775,16 @@ static void tun_detach_all(struct net_device *dev) /* Drop read queue */ tun_queue_purge(tfile); sock_put(&tfile->sk); - tun_cleanup_tx_array(tfile); + tun_cleanup_tx_ring(tfile); } list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { tun_enable_queue(tfile); tun_queue_purge(tfile); sock_put(&tfile->sk); - tun_cleanup_tx_array(tfile); + tun_cleanup_tx_ring(tfile); } BUG_ON(tun->numdisabled != 0); - if (xdp_prog) - bpf_prog_put(xdp_prog); - if (tun->flags & IFF_PERSIST) module_put(THIS_MODULE); } @@ -760,13 +825,29 @@ static int tun_attach(struct tun_struct *tun, struct file *file, } if (!tfile->detached && - skb_array_init(&tfile->tx_array, dev->tx_queue_len, GFP_KERNEL)) { + ptr_ring_init(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL)) { err = -ENOMEM; goto out; } tfile->queue_index = tun->numqueues; tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN; + + if (tfile->detached) { + /* Re-attach detached tfile, updating XDP queue_index */ + WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq)); + + if (tfile->xdp_rxq.queue_index != tfile->queue_index) + tfile->xdp_rxq.queue_index = tfile->queue_index; + } else { + /* Setup XDP RX-queue info, for new tfile getting attached */ + err = xdp_rxq_info_reg(&tfile->xdp_rxq, + tun->dev, tfile->queue_index); + if (err < 0) + goto out; + err = 0; + } + rcu_assign_pointer(tfile->tun, tun); rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); tun->numqueues++; @@ -946,23 +1027,10 @@ static int tun_net_close(struct net_device *dev) } /* Net device start xmit */ -static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) +static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb) { - struct tun_struct *tun = netdev_priv(dev); - int txq = skb->queue_mapping; - struct tun_file *tfile; - u32 numqueues = 0; - - rcu_read_lock(); - tfile = rcu_dereference(tun->tfiles[txq]); - numqueues = READ_ONCE(tun->numqueues); - - /* Drop packet if interface is not attached */ - if (txq >= numqueues) - goto drop; - #ifdef CONFIG_RPS - if (numqueues == 1 && static_key_false(&rps_needed)) { + if (tun->numqueues == 1 && static_key_false(&rps_needed)) { /* Select queue was not called for the skbuff, so we extract the * RPS hash and save it into the flow_table here. */ @@ -978,6 +1046,37 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) } } #endif +} + +static unsigned int run_ebpf_filter(struct tun_struct *tun, + struct sk_buff *skb, + int len) +{ + struct tun_prog *prog = rcu_dereference(tun->filter_prog); + + if (prog) + len = bpf_prog_run_clear_cb(prog->prog, skb); + + return len; +} + +/* Net device start xmit */ +static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct tun_struct *tun = netdev_priv(dev); + int txq = skb->queue_mapping; + struct tun_file *tfile; + int len = skb->len; + + rcu_read_lock(); + tfile = rcu_dereference(tun->tfiles[txq]); + + /* Drop packet if interface is not attached */ + if (txq >= tun->numqueues) + goto drop; + + if (!rcu_dereference(tun->steering_prog)) + tun_automq_xmit(tun, skb); tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len); @@ -993,6 +1092,15 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) sk_filter(tfile->socket.sk, skb)) goto drop; + len = run_ebpf_filter(tun, skb, len); + + /* Trim extra bytes since we may insert vlan proto & TCI + * in tun_put_user(). + */ + len -= skb_vlan_tag_present(skb) ? sizeof(struct veth) : 0; + if (len <= 0 || pskb_trim(skb, len)) + goto drop; + if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) goto drop; @@ -1005,7 +1113,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) nf_reset(skb); - if (skb_array_produce(&tfile->tx_array, skb)) + if (ptr_ring_produce(&tfile->tx_ring, skb)) goto drop; /* Notify and wake up reader process */ @@ -1178,6 +1286,67 @@ static const struct net_device_ops tun_netdev_ops = { .ndo_get_stats64 = tun_net_get_stats64, }; +static int tun_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) +{ + struct tun_struct *tun = netdev_priv(dev); + struct xdp_buff *buff = xdp->data_hard_start; + int headroom = xdp->data - xdp->data_hard_start; + struct tun_file *tfile; + u32 numqueues; + int ret = 0; + + /* Assure headroom is available and buff is properly aligned */ + if (unlikely(headroom < sizeof(*xdp) || tun_is_xdp_buff(xdp))) + return -ENOSPC; + + *buff = *xdp; + + rcu_read_lock(); + + numqueues = READ_ONCE(tun->numqueues); + if (!numqueues) { + ret = -ENOSPC; + goto out; + } + + tfile = rcu_dereference(tun->tfiles[smp_processor_id() % + numqueues]); + /* Encode the XDP flag into lowest bit for consumer to differ + * XDP buffer from sk_buff. + */ + if (ptr_ring_produce(&tfile->tx_ring, tun_xdp_to_ptr(buff))) { + this_cpu_inc(tun->pcpu_stats->tx_dropped); + ret = -ENOSPC; + } + +out: + rcu_read_unlock(); + return ret; +} + +static void tun_xdp_flush(struct net_device *dev) +{ + struct tun_struct *tun = netdev_priv(dev); + struct tun_file *tfile; + u32 numqueues; + + rcu_read_lock(); + + numqueues = READ_ONCE(tun->numqueues); + if (!numqueues) + goto out; + + tfile = rcu_dereference(tun->tfiles[smp_processor_id() % + numqueues]); + /* Notify and wake up reader process */ + if (tfile->flags & TUN_FASYNC) + kill_fasync(&tfile->fasync, SIGIO, POLL_IN); + tfile->socket.sk->sk_data_ready(tfile->socket.sk); + +out: + rcu_read_unlock(); +} + static const struct net_device_ops tap_netdev_ops = { .ndo_uninit = tun_net_uninit, .ndo_open = tun_net_open, @@ -1195,6 +1364,8 @@ static const struct net_device_ops tap_netdev_ops = { .ndo_set_rx_headroom = tun_set_headroom, .ndo_get_stats64 = tun_net_get_stats64, .ndo_bpf = tun_xdp, + .ndo_xdp_xmit = tun_xdp_xmit, + .ndo_xdp_flush = tun_xdp_flush, }; static void tun_flow_init(struct tun_struct *tun) @@ -1273,7 +1444,7 @@ static __poll_t tun_chr_poll(struct file *file, poll_table *wait) poll_wait(file, sk_sleep(sk), wait); - if (!skb_array_empty(&tfile->tx_array)) + if (!ptr_ring_empty(&tfile->tx_ring)) mask |= POLLIN | POLLRDNORM; if (tun->dev->flags & IFF_UP && @@ -1486,6 +1657,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, xdp.data = buf + pad; xdp_set_data_meta_invalid(&xdp); xdp.data_end = xdp.data + len; + xdp.rxq = &tfile->xdp_rxq; orig_data = xdp.data; act = bpf_prog_run_xdp(xdp_prog, &xdp); @@ -1560,7 +1732,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, int copylen; bool zerocopy = false; int err; - u32 rxhash; + u32 rxhash = 0; int skb_xdp = 1; bool frags = tun_napi_frags_enabled(tun); @@ -1748,7 +1920,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, rcu_read_unlock(); } - rxhash = __skb_get_hash_symmetric(skb); + rcu_read_lock(); + if (!rcu_dereference(tun->steering_prog)) + rxhash = __skb_get_hash_symmetric(skb); + rcu_read_unlock(); if (frags) { /* Exercise flow dissector code path. */ @@ -1792,7 +1967,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, u64_stats_update_end(&stats->syncp); put_cpu_ptr(stats); - tun_flow_update(tun, rxhash, tfile); + if (rxhash) + tun_flow_update(tun, rxhash, tfile); + return total_len; } @@ -1813,6 +1990,40 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) return result; } +static ssize_t tun_put_user_xdp(struct tun_struct *tun, + struct tun_file *tfile, + struct xdp_buff *xdp, + struct iov_iter *iter) +{ + int vnet_hdr_sz = 0; + size_t size = xdp->data_end - xdp->data; + struct tun_pcpu_stats *stats; + size_t ret; + + if (tun->flags & IFF_VNET_HDR) { + struct virtio_net_hdr gso = { 0 }; + + vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); + if (unlikely(iov_iter_count(iter) < vnet_hdr_sz)) + return -EINVAL; + if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) != + sizeof(gso))) + return -EFAULT; + iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); + } + + ret = copy_to_iter(xdp->data, size, iter) + vnet_hdr_sz; + + stats = get_cpu_ptr(tun->pcpu_stats); + u64_stats_update_begin(&stats->syncp); + stats->tx_packets++; + stats->tx_bytes += ret; + u64_stats_update_end(&stats->syncp); + put_cpu_ptr(tun->pcpu_stats); + + return ret; +} + /* Put packet to the user space buffer */ static ssize_t tun_put_user(struct tun_struct *tun, struct tun_file *tfile, @@ -1877,10 +2088,7 @@ static ssize_t tun_put_user(struct tun_struct *tun, if (vlan_hlen) { int ret; - struct { - __be16 h_vlan_proto; - __be16 h_vlan_TCI; - } veth; + struct veth veth; veth.h_vlan_proto = skb->vlan_proto; veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); @@ -1910,15 +2118,14 @@ done: return total; } -static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock, - int *err) +static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) { DECLARE_WAITQUEUE(wait, current); - struct sk_buff *skb = NULL; + void *ptr = NULL; int error = 0; - skb = skb_array_consume(&tfile->tx_array); - if (skb) + ptr = ptr_ring_consume(&tfile->tx_ring); + if (ptr) goto out; if (noblock) { error = -EAGAIN; @@ -1929,8 +2136,8 @@ static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock, current->state = TASK_INTERRUPTIBLE; while (1) { - skb = skb_array_consume(&tfile->tx_array); - if (skb) + ptr = ptr_ring_consume(&tfile->tx_ring); + if (ptr) break; if (signal_pending(current)) { error = -ERESTARTSYS; @@ -1949,12 +2156,12 @@ static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock, out: *err = error; - return skb; + return ptr; } static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, struct iov_iter *to, - int noblock, struct sk_buff *skb) + int noblock, void *ptr) { ssize_t ret; int err; @@ -1962,23 +2169,31 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, tun_debug(KERN_INFO, tun, "tun_do_read\n"); if (!iov_iter_count(to)) { - if (skb) - kfree_skb(skb); + tun_ptr_free(ptr); return 0; } - if (!skb) { + if (!ptr) { /* Read frames from ring */ - skb = tun_ring_recv(tfile, noblock, &err); - if (!skb) + ptr = tun_ring_recv(tfile, noblock, &err); + if (!ptr) return err; } - ret = tun_put_user(tun, tfile, skb, to); - if (unlikely(ret < 0)) - kfree_skb(skb); - else - consume_skb(skb); + if (tun_is_xdp_buff(ptr)) { + struct xdp_buff *xdp = tun_ptr_to_xdp(ptr); + + ret = tun_put_user_xdp(tun, tfile, xdp, to); + put_page(virt_to_head_page(xdp->data)); + } else { + struct sk_buff *skb = ptr; + + ret = tun_put_user(tun, tfile, skb, to); + if (unlikely(ret < 0)) + kfree_skb(skb); + else + consume_skb(skb); + } return ret; } @@ -2000,6 +2215,39 @@ static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) return ret; } +static void tun_prog_free(struct rcu_head *rcu) +{ + struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu); + + bpf_prog_destroy(prog->prog); + kfree(prog); +} + +static int __tun_set_ebpf(struct tun_struct *tun, + struct tun_prog __rcu **prog_p, + struct bpf_prog *prog) +{ + struct tun_prog *old, *new = NULL; + + if (prog) { + new = kmalloc(sizeof(*new), GFP_KERNEL); + if (!new) + return -ENOMEM; + new->prog = prog; + } + + spin_lock_bh(&tun->lock); + old = rcu_dereference_protected(*prog_p, + lockdep_is_held(&tun->lock)); + rcu_assign_pointer(*prog_p, new); + spin_unlock_bh(&tun->lock); + + if (old) + call_rcu(&old->rcu, tun_prog_free); + + return 0; +} + static void tun_free_netdev(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); @@ -2008,6 +2256,8 @@ static void tun_free_netdev(struct net_device *dev) free_percpu(tun->pcpu_stats); tun_flow_uninit(tun); security_tun_dev_free_security(tun->security); + __tun_set_ebpf(tun, &tun->steering_prog, NULL); + __tun_set_ebpf(tun, &tun->filter_prog, NULL); } static void tun_setup(struct net_device *dev) @@ -2081,12 +2331,12 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, { struct tun_file *tfile = container_of(sock, struct tun_file, socket); struct tun_struct *tun = tun_get(tfile); - struct sk_buff *skb = m->msg_control; + void *ptr = m->msg_control; int ret; if (!tun) { ret = -EBADFD; - goto out_free_skb; + goto out_free; } if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { @@ -2098,7 +2348,7 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, SOL_PACKET, TUN_TX_TIMESTAMP); goto out; } - ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, skb); + ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr); if (ret > (ssize_t)total_len) { m->msg_flags |= MSG_TRUNC; ret = flags & MSG_TRUNC ? ret : total_len; @@ -2109,12 +2359,25 @@ out: out_put_tun: tun_put(tun); -out_free_skb: - if (skb) - kfree_skb(skb); +out_free: + tun_ptr_free(ptr); return ret; } +static int tun_ptr_peek_len(void *ptr) +{ + if (likely(ptr)) { + if (tun_is_xdp_buff(ptr)) { + struct xdp_buff *xdp = tun_ptr_to_xdp(ptr); + + return xdp->data_end - xdp->data; + } + return __skb_array_len_with_tag(ptr); + } else { + return 0; + } +} + static int tun_peek_len(struct socket *sock) { struct tun_file *tfile = container_of(sock, struct tun_file, socket); @@ -2125,7 +2388,7 @@ static int tun_peek_len(struct socket *sock) if (!tun) return 0; - ret = skb_array_peek_len(&tfile->tx_array); + ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len); tun_put(tun); return ret; @@ -2296,6 +2559,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) tun->filter_attached = false; tun->sndbuf = tfile->socket.sk->sk_sndbuf; tun->rx_batched = 0; + RCU_INIT_POINTER(tun->steering_prog, NULL); tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats); if (!tun->pcpu_stats) { @@ -2488,6 +2752,26 @@ unlock: return ret; } +static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog **prog_p, + void __user *data) +{ + struct bpf_prog *prog; + int fd; + + if (copy_from_user(&fd, data, sizeof(fd))) + return -EFAULT; + + if (fd == -1) { + prog = NULL; + } else { + prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); + if (IS_ERR(prog)) + return PTR_ERR(prog); + } + + return __tun_set_ebpf(tun, prog_p, prog); +} + static long __tun_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg, int ifreq_len) { @@ -2764,6 +3048,14 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, ret = 0; break; + case TUNSETSTEERINGEBPF: + ret = tun_set_ebpf(tun, &tun->steering_prog, argp); + break; + + case TUNSETFILTEREBPF: + ret = tun_set_ebpf(tun, &tun->filter_prog, argp); + break; + default: ret = -EINVAL; break; @@ -2860,7 +3152,7 @@ static int tun_chr_open(struct inode *inode, struct file * file) sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); - memset(&tfile->tx_array, 0, sizeof(tfile->tx_array)); + memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring)); return 0; } @@ -3009,25 +3301,26 @@ static int tun_queue_resize(struct tun_struct *tun) { struct net_device *dev = tun->dev; struct tun_file *tfile; - struct skb_array **arrays; + struct ptr_ring **rings; int n = tun->numqueues + tun->numdisabled; int ret, i; - arrays = kmalloc_array(n, sizeof(*arrays), GFP_KERNEL); - if (!arrays) + rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); + if (!rings) return -ENOMEM; for (i = 0; i < tun->numqueues; i++) { tfile = rtnl_dereference(tun->tfiles[i]); - arrays[i] = &tfile->tx_array; + rings[i] = &tfile->tx_ring; } list_for_each_entry(tfile, &tun->disabled, next) - arrays[i++] = &tfile->tx_array; + rings[i++] = &tfile->tx_ring; - ret = skb_array_resize_multiple(arrays, n, - dev->tx_queue_len, GFP_KERNEL); + ret = ptr_ring_resize_multiple(rings, n, + dev->tx_queue_len, GFP_KERNEL, + tun_ptr_free); - kfree(arrays); + kfree(rings); return ret; } @@ -3113,7 +3406,7 @@ struct socket *tun_get_socket(struct file *file) } EXPORT_SYMBOL_GPL(tun_get_socket); -struct skb_array *tun_get_skb_array(struct file *file) +struct ptr_ring *tun_get_tx_ring(struct file *file) { struct tun_file *tfile; @@ -3122,9 +3415,9 @@ struct skb_array *tun_get_skb_array(struct file *file) tfile = file->private_data; if (!tfile) return ERR_PTR(-EBADFD); - return &tfile->tx_array; + return &tfile->tx_ring; } -EXPORT_SYMBOL_GPL(tun_get_skb_array); +EXPORT_SYMBOL_GPL(tun_get_tx_ring); module_init(tun_init); module_exit(tun_cleanup); diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 728819feab44..76ac48095c29 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -826,7 +826,7 @@ err: static const struct driver_info qmi_wwan_info = { .description = "WWAN/QMI device", - .flags = FLAG_WWAN, + .flags = FLAG_WWAN | FLAG_SEND_ZLP, .bind = qmi_wwan_bind, .unbind = qmi_wwan_unbind, .manage_power = qmi_wwan_manage_power, @@ -835,7 +835,7 @@ static const struct driver_info qmi_wwan_info = { static const struct driver_info qmi_wwan_info_quirk_dtr = { .description = "WWAN/QMI device", - .flags = FLAG_WWAN, + .flags = FLAG_WWAN | FLAG_SEND_ZLP, .bind = qmi_wwan_bind, .unbind = qmi_wwan_unbind, .manage_power = qmi_wwan_manage_power, @@ -1245,6 +1245,7 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ + {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */ /* 4. Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ diff --git a/drivers/net/veth.c b/drivers/net/veth.c index f5438d0978ca..a69ad39ee57e 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -410,6 +410,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, if (ifmp && (dev->ifindex != 0)) peer->ifindex = ifmp->ifi_index; + peer->gso_max_size = dev->gso_max_size; + peer->gso_max_segs = dev->gso_max_segs; + err = register_netdevice(peer); put_net(net); net = NULL; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 559b215c0169..626c27352ae2 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -31,6 +31,7 @@ #include <linux/average.h> #include <linux/filter.h> #include <net/route.h> +#include <net/xdp.h> static int napi_weight = NAPI_POLL_WEIGHT; module_param(napi_weight, int, 0444); @@ -65,16 +66,39 @@ static const unsigned long guest_offloads[] = { VIRTIO_NET_F_GUEST_UFO }; -struct virtnet_stats { - struct u64_stats_sync tx_syncp; - struct u64_stats_sync rx_syncp; - u64 tx_bytes; - u64 tx_packets; +struct virtnet_stat_desc { + char desc[ETH_GSTRING_LEN]; + size_t offset; +}; + +struct virtnet_sq_stats { + struct u64_stats_sync syncp; + u64 packets; + u64 bytes; +}; + +struct virtnet_rq_stats { + struct u64_stats_sync syncp; + u64 packets; + u64 bytes; +}; - u64 rx_bytes; - u64 rx_packets; +#define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m) +#define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m) + +static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = { + { "packets", VIRTNET_SQ_STAT(packets) }, + { "bytes", VIRTNET_SQ_STAT(bytes) }, +}; + +static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = { + { "packets", VIRTNET_RQ_STAT(packets) }, + { "bytes", VIRTNET_RQ_STAT(bytes) }, }; +#define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc) +#define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc) + /* Internal representation of a send virtqueue */ struct send_queue { /* Virtqueue associated with this send _queue */ @@ -86,6 +110,8 @@ struct send_queue { /* Name of the send queue: output.$index */ char name[40]; + struct virtnet_sq_stats stats; + struct napi_struct napi; }; @@ -98,6 +124,8 @@ struct receive_queue { struct bpf_prog __rcu *xdp_prog; + struct virtnet_rq_stats stats; + /* Chain pages by the private ptr. */ struct page *pages; @@ -115,6 +143,8 @@ struct receive_queue { /* Name of this receive queue: input.$index */ char name[40]; + + struct xdp_rxq_info xdp_rxq; }; struct virtnet_info { @@ -149,9 +179,6 @@ struct virtnet_info { /* Packet virtio header size */ u8 hdr_len; - /* Active statistics */ - struct virtnet_stats __percpu *stats; - /* Work struct for refilling if we run low on memory. */ struct delayed_work refill; @@ -261,9 +288,12 @@ static void virtqueue_napi_complete(struct napi_struct *napi, int opaque; opaque = virtqueue_enable_cb_prepare(vq); - if (napi_complete_done(napi, processed) && - unlikely(virtqueue_poll(vq, opaque))) - virtqueue_napi_schedule(napi, vq); + if (napi_complete_done(napi, processed)) { + if (unlikely(virtqueue_poll(vq, opaque))) + virtqueue_napi_schedule(napi, vq); + } else { + virtqueue_disable_cb(vq); + } } static void skb_xmit_done(struct virtqueue *vq) @@ -556,6 +586,7 @@ static struct sk_buff *receive_small(struct net_device *dev, xdp.data = xdp.data_hard_start + xdp_headroom; xdp_set_data_meta_invalid(&xdp); xdp.data_end = xdp.data + len; + xdp.rxq = &rq->xdp_rxq; orig_data = xdp.data; act = bpf_prog_run_xdp(xdp_prog, &xdp); @@ -689,6 +720,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, xdp.data = data + vi->hdr_len; xdp_set_data_meta_invalid(&xdp); xdp.data_end = xdp.data + (len - vi->hdr_len); + xdp.rxq = &rq->xdp_rxq; + act = bpf_prog_run_xdp(xdp_prog, &xdp); if (act != XDP_PASS) @@ -1118,7 +1151,6 @@ static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit) struct virtnet_info *vi = rq->vq->vdev->priv; unsigned int len, received = 0, bytes = 0; void *buf; - struct virtnet_stats *stats = this_cpu_ptr(vi->stats); if (!vi->big_packets || vi->mergeable_rx_bufs) { void *ctx; @@ -1141,10 +1173,10 @@ static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit) schedule_delayed_work(&vi->refill, 0); } - u64_stats_update_begin(&stats->rx_syncp); - stats->rx_bytes += bytes; - stats->rx_packets += received; - u64_stats_update_end(&stats->rx_syncp); + u64_stats_update_begin(&rq->stats.syncp); + rq->stats.bytes += bytes; + rq->stats.packets += received; + u64_stats_update_end(&rq->stats.syncp); return received; } @@ -1153,8 +1185,6 @@ static void free_old_xmit_skbs(struct send_queue *sq) { struct sk_buff *skb; unsigned int len; - struct virtnet_info *vi = sq->vq->vdev->priv; - struct virtnet_stats *stats = this_cpu_ptr(vi->stats); unsigned int packets = 0; unsigned int bytes = 0; @@ -1173,10 +1203,10 @@ static void free_old_xmit_skbs(struct send_queue *sq) if (!packets) return; - u64_stats_update_begin(&stats->tx_syncp); - stats->tx_bytes += bytes; - stats->tx_packets += packets; - u64_stats_update_end(&stats->tx_syncp); + u64_stats_update_begin(&sq->stats.syncp); + sq->stats.bytes += bytes; + sq->stats.packets += packets; + u64_stats_update_end(&sq->stats.syncp); } static void virtnet_poll_cleantx(struct receive_queue *rq) @@ -1222,13 +1252,18 @@ static int virtnet_poll(struct napi_struct *napi, int budget) static int virtnet_open(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); - int i; + int i, err; for (i = 0; i < vi->max_queue_pairs; i++) { if (i < vi->curr_queue_pairs) /* Make sure we have some buffers: if oom use wq. */ if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); + + err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i); + if (err < 0) + return err; + virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi); } @@ -1460,24 +1495,25 @@ static void virtnet_stats(struct net_device *dev, struct rtnl_link_stats64 *tot) { struct virtnet_info *vi = netdev_priv(dev); - int cpu; unsigned int start; + int i; - for_each_possible_cpu(cpu) { - struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu); + for (i = 0; i < vi->max_queue_pairs; i++) { u64 tpackets, tbytes, rpackets, rbytes; + struct receive_queue *rq = &vi->rq[i]; + struct send_queue *sq = &vi->sq[i]; do { - start = u64_stats_fetch_begin_irq(&stats->tx_syncp); - tpackets = stats->tx_packets; - tbytes = stats->tx_bytes; - } while (u64_stats_fetch_retry_irq(&stats->tx_syncp, start)); + start = u64_stats_fetch_begin_irq(&sq->stats.syncp); + tpackets = sq->stats.packets; + tbytes = sq->stats.bytes; + } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); do { - start = u64_stats_fetch_begin_irq(&stats->rx_syncp); - rpackets = stats->rx_packets; - rbytes = stats->rx_bytes; - } while (u64_stats_fetch_retry_irq(&stats->rx_syncp, start)); + start = u64_stats_fetch_begin_irq(&rq->stats.syncp); + rpackets = rq->stats.packets; + rbytes = rq->stats.bytes; + } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start)); tot->rx_packets += rpackets; tot->tx_packets += tpackets; @@ -1557,6 +1593,7 @@ static int virtnet_close(struct net_device *dev) cancel_delayed_work_sync(&vi->refill); for (i = 0; i < vi->max_queue_pairs; i++) { + xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); napi_disable(&vi->rq[i].napi); virtnet_napi_tx_disable(&vi->sq[i].napi); } @@ -1814,6 +1851,83 @@ static int virtnet_set_channels(struct net_device *dev, return err; } +static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + struct virtnet_info *vi = netdev_priv(dev); + char *p = (char *)data; + unsigned int i, j; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < vi->curr_queue_pairs; i++) { + for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) { + snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_%s", + i, virtnet_rq_stats_desc[j].desc); + p += ETH_GSTRING_LEN; + } + } + + for (i = 0; i < vi->curr_queue_pairs; i++) { + for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) { + snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_%s", + i, virtnet_sq_stats_desc[j].desc); + p += ETH_GSTRING_LEN; + } + } + break; + } +} + +static int virtnet_get_sset_count(struct net_device *dev, int sset) +{ + struct virtnet_info *vi = netdev_priv(dev); + + switch (sset) { + case ETH_SS_STATS: + return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN + + VIRTNET_SQ_STATS_LEN); + default: + return -EOPNOTSUPP; + } +} + +static void virtnet_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct virtnet_info *vi = netdev_priv(dev); + unsigned int idx = 0, start, i, j; + const u8 *stats_base; + size_t offset; + + for (i = 0; i < vi->curr_queue_pairs; i++) { + struct receive_queue *rq = &vi->rq[i]; + + stats_base = (u8 *)&rq->stats; + do { + start = u64_stats_fetch_begin_irq(&rq->stats.syncp); + for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) { + offset = virtnet_rq_stats_desc[j].offset; + data[idx + j] = *(u64 *)(stats_base + offset); + } + } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start)); + idx += VIRTNET_RQ_STATS_LEN; + } + + for (i = 0; i < vi->curr_queue_pairs; i++) { + struct send_queue *sq = &vi->sq[i]; + + stats_base = (u8 *)&sq->stats; + do { + start = u64_stats_fetch_begin_irq(&sq->stats.syncp); + for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) { + offset = virtnet_sq_stats_desc[j].offset; + data[idx + j] = *(u64 *)(stats_base + offset); + } + } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); + idx += VIRTNET_SQ_STATS_LEN; + } +} + static void virtnet_get_channels(struct net_device *dev, struct ethtool_channels *channels) { @@ -1891,10 +2005,31 @@ static void virtnet_init_settings(struct net_device *dev) vi->duplex = DUPLEX_UNKNOWN; } +static void virtnet_update_settings(struct virtnet_info *vi) +{ + u32 speed; + u8 duplex; + + if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) + return; + + speed = virtio_cread32(vi->vdev, offsetof(struct virtio_net_config, + speed)); + if (ethtool_validate_speed(speed)) + vi->speed = speed; + duplex = virtio_cread8(vi->vdev, offsetof(struct virtio_net_config, + duplex)); + if (ethtool_validate_duplex(duplex)) + vi->duplex = duplex; +} + static const struct ethtool_ops virtnet_ethtool_ops = { .get_drvinfo = virtnet_get_drvinfo, .get_link = ethtool_op_get_link, .get_ringparam = virtnet_get_ringparam, + .get_strings = virtnet_get_strings, + .get_sset_count = virtnet_get_sset_count, + .get_ethtool_stats = virtnet_get_ethtool_stats, .set_channels = virtnet_set_channels, .get_channels = virtnet_get_channels, .get_ts_info = ethtool_op_get_ts_info, @@ -2144,6 +2279,7 @@ static void virtnet_config_changed_work(struct work_struct *work) vi->status = v; if (vi->status & VIRTIO_NET_S_LINK_UP) { + virtnet_update_settings(vi); netif_carrier_on(vi->dev); netif_tx_wake_all_queues(vi->dev); } else { @@ -2386,6 +2522,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi) sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); + + u64_stats_init(&vi->rq[i].stats.syncp); + u64_stats_init(&vi->sq[i].stats.syncp); } return 0; @@ -2510,7 +2649,7 @@ static int virtnet_validate(struct virtio_device *vdev) static int virtnet_probe(struct virtio_device *vdev) { - int i, err; + int i, err = -ENOMEM; struct net_device *dev; struct virtnet_info *vi; u16 max_queue_pairs; @@ -2587,17 +2726,6 @@ static int virtnet_probe(struct virtio_device *vdev) vi->dev = dev; vi->vdev = vdev; vdev->priv = vi; - vi->stats = alloc_percpu(struct virtnet_stats); - err = -ENOMEM; - if (vi->stats == NULL) - goto free; - - for_each_possible_cpu(i) { - struct virtnet_stats *virtnet_stats; - virtnet_stats = per_cpu_ptr(vi->stats, i); - u64_stats_init(&virtnet_stats->tx_syncp); - u64_stats_init(&virtnet_stats->rx_syncp); - } INIT_WORK(&vi->config_work, virtnet_config_changed_work); @@ -2634,7 +2762,7 @@ static int virtnet_probe(struct virtio_device *vdev) */ dev_err(&vdev->dev, "device MTU appears to have changed " "it is now %d < %d", mtu, dev->min_mtu); - goto free_stats; + goto free; } dev->mtu = mtu; @@ -2658,7 +2786,7 @@ static int virtnet_probe(struct virtio_device *vdev) /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ err = init_vqs(vi); if (err) - goto free_stats; + goto free; #ifdef CONFIG_SYSFS if (vi->mergeable_rx_bufs) @@ -2692,6 +2820,7 @@ static int virtnet_probe(struct virtio_device *vdev) schedule_work(&vi->config_work); } else { vi->status = VIRTIO_NET_S_LINK_UP; + virtnet_update_settings(vi); netif_carrier_on(dev); } @@ -2712,8 +2841,6 @@ free_vqs: cancel_delayed_work_sync(&vi->refill); free_receive_page_frags(vi); virtnet_del_vqs(vi); -free_stats: - free_percpu(vi->stats); free: free_netdev(dev); return err; @@ -2746,7 +2873,6 @@ static void virtnet_remove(struct virtio_device *vdev) remove_vq_common(vi); - free_percpu(vi->stats); free_netdev(vi->dev); } @@ -2793,7 +2919,8 @@ static struct virtio_device_id id_table[] = { VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \ VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ VIRTIO_NET_F_CTRL_MAC_ADDR, \ - VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS + VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \ + VIRTIO_NET_F_SPEED_DUPLEX static unsigned int features[] = { VIRTNET_FEATURES, diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 9c51b8be0038..5ba222920e80 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -69,10 +69,10 @@ /* * Version numbers */ -#define VMXNET3_DRIVER_VERSION_STRING "1.4.a.0-k" +#define VMXNET3_DRIVER_VERSION_STRING "1.4.11.0-k" /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ -#define VMXNET3_DRIVER_VERSION_NUM 0x01040a00 +#define VMXNET3_DRIVER_VERSION_NUM 0x01040b00 #if defined(CONFIG_PCI_MSI) /* RSS only makes sense if MSI-X is supported. */ @@ -416,8 +416,8 @@ struct vmxnet3_adapter { /* must be a multiple of VMXNET3_RING_SIZE_ALIGN */ #define VMXNET3_DEF_TX_RING_SIZE 512 -#define VMXNET3_DEF_RX_RING_SIZE 256 -#define VMXNET3_DEF_RX_RING2_SIZE 128 +#define VMXNET3_DEF_RX_RING_SIZE 1024 +#define VMXNET3_DEF_RX_RING2_SIZE 256 #define VMXNET3_DEF_RXDATA_DESC_SIZE 128 diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index c3e34e3c82a7..fab7a4db249e 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -3709,18 +3709,16 @@ static __net_init int vxlan_init_net(struct net *net) return 0; } -static void __net_exit vxlan_exit_net(struct net *net) +static void vxlan_destroy_tunnels(struct net *net, struct list_head *head) { struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_dev *vxlan, *next; struct net_device *dev, *aux; unsigned int h; - LIST_HEAD(list); - rtnl_lock(); for_each_netdev_safe(net, dev, aux) if (dev->rtnl_link_ops == &vxlan_link_ops) - unregister_netdevice_queue(dev, &list); + unregister_netdevice_queue(dev, head); list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) { /* If vxlan->dev is in the same netns, it has already been added @@ -3728,20 +3726,30 @@ static void __net_exit vxlan_exit_net(struct net *net) */ if (!net_eq(dev_net(vxlan->dev), net)) { gro_cells_destroy(&vxlan->gro_cells); - unregister_netdevice_queue(vxlan->dev, &list); + unregister_netdevice_queue(vxlan->dev, head); } } - unregister_netdevice_many(&list); - rtnl_unlock(); - for (h = 0; h < PORT_HASH_SIZE; ++h) WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h])); } +static void __net_exit vxlan_exit_batch_net(struct list_head *net_list) +{ + struct net *net; + LIST_HEAD(list); + + rtnl_lock(); + list_for_each_entry(net, net_list, exit_list) + vxlan_destroy_tunnels(net, &list); + + unregister_netdevice_many(&list); + rtnl_unlock(); +} + static struct pernet_operations vxlan_net_ops = { .init = vxlan_init_net, - .exit = vxlan_exit_net, + .exit_batch = vxlan_exit_batch_net, .id = &vxlan_net_id, .size = sizeof(struct vxlan_net), }; diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig index 87f56d0e17a6..deb5ae21a559 100644 --- a/drivers/net/wireless/ath/ath10k/Kconfig +++ b/drivers/net/wireless/ath/ath10k/Kconfig @@ -47,12 +47,19 @@ config ATH10K_DEBUG config ATH10K_DEBUGFS bool "Atheros ath10k debugfs support" depends on ATH10K && DEBUG_FS - select RELAY ---help--- Enabled debugfs support If unsure, say Y to make it easier to debug problems. +config ATH10K_SPECTRAL + bool "Atheros ath10k spectral scan support" + depends on ATH10K_DEBUGFS + select RELAY + default n + ---help--- + Say Y to enable access to the FFT/spectral data via debugfs. + config ATH10K_TRACING bool "Atheros ath10k tracing support" depends on ATH10K diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile index 9492177e9063..6739ac26fd29 100644 --- a/drivers/net/wireless/ath/ath10k/Makefile +++ b/drivers/net/wireless/ath/ath10k/Makefile @@ -15,12 +15,13 @@ ath10k_core-y += mac.o \ p2p.o \ swap.o -ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o +ath10k_core-$(CONFIG_ATH10K_SPECTRAL) += spectral.o ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o ath10k_core-$(CONFIG_THERMAL) += thermal.o ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o ath10k_core-$(CONFIG_PM) += wow.o +ath10k_core-$(CONFIG_DEV_COREDUMP) += coredump.o obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o ath10k_pci-y += pci.o \ diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index ff6815e95684..35d10490f6c3 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016 Qualcomm Atheros, Inc. All rights reserved. + * Copyright (c) 2016-2017 Qualcomm Atheros, Inc. All rights reserved. * Copyright (c) 2015 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c index 2d3a2f31123d..af4978d6a14b 100644 --- a/drivers/net/wireless/ath/ath10k/bmi.c +++ b/drivers/net/wireless/ath/ath10k/bmi.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2014,2016-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h index 9c0839b2ca8f..9a396817aa55 100644 --- a/drivers/net/wireless/ath/ath10k/bmi.h +++ b/drivers/net/wireless/ath/ath10k/bmi.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2015,2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index a8afd690290f..b9def7bace2f 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -327,12 +327,12 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar, * Guts of ath10k_ce_send. * The caller takes responsibility for any needed locking. */ -int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, - void *per_transfer_context, - u32 buffer, - unsigned int nbytes, - unsigned int transfer_id, - unsigned int flags) +static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, + void *per_transfer_context, + dma_addr_t buffer, + unsigned int nbytes, + unsigned int transfer_id, + unsigned int flags) { struct ath10k *ar = ce_state->ar; struct ath10k_ce_ring *src_ring = ce_state->src_ring; @@ -384,6 +384,87 @@ exit: return ret; } +static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state, + void *per_transfer_context, + dma_addr_t buffer, + unsigned int nbytes, + unsigned int transfer_id, + unsigned int flags) +{ + struct ath10k *ar = ce_state->ar; + struct ath10k_ce_ring *src_ring = ce_state->src_ring; + struct ce_desc_64 *desc, sdesc; + unsigned int nentries_mask = src_ring->nentries_mask; + unsigned int sw_index = src_ring->sw_index; + unsigned int write_index = src_ring->write_index; + u32 ctrl_addr = ce_state->ctrl_addr; + __le32 *addr; + u32 desc_flags = 0; + int ret = 0; + + if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) + return -ESHUTDOWN; + + if (nbytes > ce_state->src_sz_max) + ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n", + __func__, nbytes, ce_state->src_sz_max); + + if (unlikely(CE_RING_DELTA(nentries_mask, + write_index, sw_index - 1) <= 0)) { + ret = -ENOSR; + goto exit; + } + + desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space, + write_index); + + desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA); + + if (flags & CE_SEND_FLAG_GATHER) + desc_flags |= CE_DESC_FLAGS_GATHER; + + if (flags & CE_SEND_FLAG_BYTE_SWAP) + desc_flags |= CE_DESC_FLAGS_BYTE_SWAP; + + addr = (__le32 *)&sdesc.addr; + + flags |= upper_32_bits(buffer) & CE_DESC_FLAGS_GET_MASK; + addr[0] = __cpu_to_le32(buffer); + addr[1] = __cpu_to_le32(flags); + if (flags & CE_SEND_FLAG_GATHER) + addr[1] |= __cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER); + else + addr[1] &= ~(__cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER)); + + sdesc.nbytes = __cpu_to_le16(nbytes); + sdesc.flags = __cpu_to_le16(desc_flags); + + *desc = sdesc; + + src_ring->per_transfer_context[write_index] = per_transfer_context; + + /* Update Source Ring Write Index */ + write_index = CE_RING_IDX_INCR(nentries_mask, write_index); + + if (!(flags & CE_SEND_FLAG_GATHER)) + ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index); + + src_ring->write_index = write_index; +exit: + return ret; +} + +int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, + void *per_transfer_context, + dma_addr_t buffer, + unsigned int nbytes, + unsigned int transfer_id, + unsigned int flags) +{ + return ce_state->ops->ce_send_nolock(ce_state, per_transfer_context, + buffer, nbytes, transfer_id, flags); +} + void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe) { struct ath10k *ar = pipe->ar; @@ -413,7 +494,7 @@ void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe) int ath10k_ce_send(struct ath10k_ce_pipe *ce_state, void *per_transfer_context, - u32 buffer, + dma_addr_t buffer, unsigned int nbytes, unsigned int transfer_id, unsigned int flags) @@ -459,7 +540,8 @@ int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe) return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); } -int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr) +static int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, + dma_addr_t paddr) { struct ath10k *ar = pipe->ar; struct ath10k_ce *ce = ath10k_ce_priv(ar); @@ -488,6 +570,39 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr) return 0; } +static int __ath10k_ce_rx_post_buf_64(struct ath10k_ce_pipe *pipe, + void *ctx, + dma_addr_t paddr) +{ + struct ath10k *ar = pipe->ar; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_ring *dest_ring = pipe->dest_ring; + unsigned int nentries_mask = dest_ring->nentries_mask; + unsigned int write_index = dest_ring->write_index; + unsigned int sw_index = dest_ring->sw_index; + struct ce_desc_64 *base = dest_ring->base_addr_owner_space; + struct ce_desc_64 *desc = + CE_DEST_RING_TO_DESC_64(base, write_index); + u32 ctrl_addr = pipe->ctrl_addr; + + lockdep_assert_held(&ce->ce_lock); + + if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0) + return -ENOSPC; + + desc->addr = __cpu_to_le64(paddr); + desc->addr &= __cpu_to_le64(CE_DESC_37BIT_ADDR_MASK); + + desc->nbytes = 0; + + dest_ring->per_transfer_context[write_index] = ctx; + write_index = CE_RING_IDX_INCR(nentries_mask, write_index); + ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index); + dest_ring->write_index = write_index; + + return 0; +} + void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries) { struct ath10k *ar = pipe->ar; @@ -508,14 +623,15 @@ void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries) dest_ring->write_index = write_index; } -int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr) +int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, + dma_addr_t paddr) { struct ath10k *ar = pipe->ar; struct ath10k_ce *ce = ath10k_ce_priv(ar); int ret; spin_lock_bh(&ce->ce_lock); - ret = __ath10k_ce_rx_post_buf(pipe, ctx, paddr); + ret = pipe->ops->ce_rx_post_buf(pipe, ctx, paddr); spin_unlock_bh(&ce->ce_lock); return ret; @@ -525,9 +641,10 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr) * Guts of ath10k_ce_completed_recv_next. * The caller takes responsibility for any necessary locking. */ -int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, - void **per_transfer_contextp, - unsigned int *nbytesp) +static int + _ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp, + unsigned int *nbytesp) { struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; unsigned int nentries_mask = dest_ring->nentries_mask; @@ -574,6 +691,64 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, return 0; } +static int +_ath10k_ce_completed_recv_next_nolock_64(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp, + unsigned int *nbytesp) +{ + struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; + unsigned int nentries_mask = dest_ring->nentries_mask; + unsigned int sw_index = dest_ring->sw_index; + struct ce_desc_64 *base = dest_ring->base_addr_owner_space; + struct ce_desc_64 *desc = + CE_DEST_RING_TO_DESC_64(base, sw_index); + struct ce_desc_64 sdesc; + u16 nbytes; + + /* Copy in one go for performance reasons */ + sdesc = *desc; + + nbytes = __le16_to_cpu(sdesc.nbytes); + if (nbytes == 0) { + /* This closes a relatively unusual race where the Host + * sees the updated DRRI before the update to the + * corresponding descriptor has completed. We treat this + * as a descriptor that is not yet done. + */ + return -EIO; + } + + desc->nbytes = 0; + + /* Return data from completed destination descriptor */ + *nbytesp = nbytes; + + if (per_transfer_contextp) + *per_transfer_contextp = + dest_ring->per_transfer_context[sw_index]; + + /* Copy engine 5 (HTT Rx) will reuse the same transfer context. + * So update transfer context all CEs except CE5. + */ + if (ce_state->id != 5) + dest_ring->per_transfer_context[sw_index] = NULL; + + /* Update sw_index */ + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + dest_ring->sw_index = sw_index; + + return 0; +} + +int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, + void **per_transfer_ctx, + unsigned int *nbytesp) +{ + return ce_state->ops->ce_completed_recv_next_nolock(ce_state, + per_transfer_ctx, + nbytesp); +} + int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, unsigned int *nbytesp) @@ -583,17 +758,18 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state, int ret; spin_lock_bh(&ce->ce_lock); - ret = ath10k_ce_completed_recv_next_nolock(ce_state, + ret = ce_state->ops->ce_completed_recv_next_nolock(ce_state, per_transfer_contextp, nbytesp); + spin_unlock_bh(&ce->ce_lock); return ret; } -int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, - void **per_transfer_contextp, - u32 *bufferp) +static int _ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp, + dma_addr_t *bufferp) { struct ath10k_ce_ring *dest_ring; unsigned int nentries_mask; @@ -644,6 +820,69 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, return ret; } +static int _ath10k_ce_revoke_recv_next_64(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp, + dma_addr_t *bufferp) +{ + struct ath10k_ce_ring *dest_ring; + unsigned int nentries_mask; + unsigned int sw_index; + unsigned int write_index; + int ret; + struct ath10k *ar; + struct ath10k_ce *ce; + + dest_ring = ce_state->dest_ring; + + if (!dest_ring) + return -EIO; + + ar = ce_state->ar; + ce = ath10k_ce_priv(ar); + + spin_lock_bh(&ce->ce_lock); + + nentries_mask = dest_ring->nentries_mask; + sw_index = dest_ring->sw_index; + write_index = dest_ring->write_index; + if (write_index != sw_index) { + struct ce_desc_64 *base = dest_ring->base_addr_owner_space; + struct ce_desc_64 *desc = + CE_DEST_RING_TO_DESC_64(base, sw_index); + + /* Return data from completed destination descriptor */ + *bufferp = __le64_to_cpu(desc->addr); + + if (per_transfer_contextp) + *per_transfer_contextp = + dest_ring->per_transfer_context[sw_index]; + + /* sanity */ + dest_ring->per_transfer_context[sw_index] = NULL; + desc->nbytes = 0; + + /* Update sw_index */ + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + dest_ring->sw_index = sw_index; + ret = 0; + } else { + ret = -EIO; + } + + spin_unlock_bh(&ce->ce_lock); + + return ret; +} + +int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp, + dma_addr_t *bufferp) +{ + return ce_state->ops->ce_revoke_recv_next(ce_state, + per_transfer_contextp, + bufferp); +} + /* * Guts of ath10k_ce_completed_send_next. * The caller takes responsibility for any necessary locking. @@ -698,10 +937,45 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, return 0; } +static void ath10k_ce_extract_desc_data(struct ath10k *ar, + struct ath10k_ce_ring *src_ring, + u32 sw_index, + dma_addr_t *bufferp, + u32 *nbytesp, + u32 *transfer_idp) +{ + struct ce_desc *base = src_ring->base_addr_owner_space; + struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index); + + /* Return data from completed source descriptor */ + *bufferp = __le32_to_cpu(desc->addr); + *nbytesp = __le16_to_cpu(desc->nbytes); + *transfer_idp = MS(__le16_to_cpu(desc->flags), + CE_DESC_FLAGS_META_DATA); +} + +static void ath10k_ce_extract_desc_data_64(struct ath10k *ar, + struct ath10k_ce_ring *src_ring, + u32 sw_index, + dma_addr_t *bufferp, + u32 *nbytesp, + u32 *transfer_idp) +{ + struct ce_desc_64 *base = src_ring->base_addr_owner_space; + struct ce_desc_64 *desc = + CE_SRC_RING_TO_DESC_64(base, sw_index); + + /* Return data from completed source descriptor */ + *bufferp = __le64_to_cpu(desc->addr); + *nbytesp = __le16_to_cpu(desc->nbytes); + *transfer_idp = MS(__le16_to_cpu(desc->flags), + CE_DESC_FLAGS_META_DATA); +} + /* NB: Modeled after ath10k_ce_completed_send_next */ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, - u32 *bufferp, + dma_addr_t *bufferp, unsigned int *nbytesp, unsigned int *transfer_idp) { @@ -728,14 +1002,9 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state, write_index = src_ring->write_index; if (write_index != sw_index) { - struct ce_desc *base = src_ring->base_addr_owner_space; - struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index); - - /* Return data from completed source descriptor */ - *bufferp = __le32_to_cpu(desc->addr); - *nbytesp = __le16_to_cpu(desc->nbytes); - *transfer_idp = MS(__le16_to_cpu(desc->flags), - CE_DESC_FLAGS_META_DATA); + ce_state->ops->ce_extract_desc_data(ar, src_ring, sw_index, + bufferp, nbytesp, + transfer_idp); if (per_transfer_contextp) *per_transfer_contextp = @@ -897,8 +1166,12 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar, nentries = roundup_pow_of_two(attr->src_nentries); - memset(src_ring->base_addr_owner_space, 0, - nentries * sizeof(struct ce_desc)); + if (ar->hw_params.target_64bit) + memset(src_ring->base_addr_owner_space, 0, + nentries * sizeof(struct ce_desc_64)); + else + memset(src_ring->base_addr_owner_space, 0, + nentries * sizeof(struct ce_desc)); src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); src_ring->sw_index &= src_ring->nentries_mask; @@ -934,8 +1207,12 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar, nentries = roundup_pow_of_two(attr->dest_nentries); - memset(dest_ring->base_addr_owner_space, 0, - nentries * sizeof(struct ce_desc)); + if (ar->hw_params.target_64bit) + memset(dest_ring->base_addr_owner_space, 0, + nentries * sizeof(struct ce_desc_64)); + else + memset(dest_ring->base_addr_owner_space, 0, + nentries * sizeof(struct ce_desc)); dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr); dest_ring->sw_index &= dest_ring->nentries_mask; @@ -993,12 +1270,57 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id, src_ring->base_addr_ce_space_unaligned = base_addr; - src_ring->base_addr_owner_space = PTR_ALIGN( - src_ring->base_addr_owner_space_unaligned, - CE_DESC_RING_ALIGN); - src_ring->base_addr_ce_space = ALIGN( - src_ring->base_addr_ce_space_unaligned, - CE_DESC_RING_ALIGN); + src_ring->base_addr_owner_space = + PTR_ALIGN(src_ring->base_addr_owner_space_unaligned, + CE_DESC_RING_ALIGN); + src_ring->base_addr_ce_space = + ALIGN(src_ring->base_addr_ce_space_unaligned, + CE_DESC_RING_ALIGN); + + return src_ring; +} + +static struct ath10k_ce_ring * +ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id, + const struct ce_attr *attr) +{ + struct ath10k_ce_ring *src_ring; + u32 nentries = attr->src_nentries; + dma_addr_t base_addr; + + nentries = roundup_pow_of_two(nentries); + + src_ring = kzalloc(sizeof(*src_ring) + + (nentries * + sizeof(*src_ring->per_transfer_context)), + GFP_KERNEL); + if (!src_ring) + return ERR_PTR(-ENOMEM); + + src_ring->nentries = nentries; + src_ring->nentries_mask = nentries - 1; + + /* Legacy platforms that do not support cache + * coherent DMA are unsupported + */ + src_ring->base_addr_owner_space_unaligned = + dma_alloc_coherent(ar->dev, + (nentries * sizeof(struct ce_desc_64) + + CE_DESC_RING_ALIGN), + &base_addr, GFP_KERNEL); + if (!src_ring->base_addr_owner_space_unaligned) { + kfree(src_ring); + return ERR_PTR(-ENOMEM); + } + + src_ring->base_addr_ce_space_unaligned = base_addr; + + src_ring->base_addr_owner_space = + PTR_ALIGN(src_ring->base_addr_owner_space_unaligned, + CE_DESC_RING_ALIGN); + src_ring->base_addr_ce_space = + ALIGN(src_ring->base_addr_ce_space_unaligned, + CE_DESC_RING_ALIGN); return src_ring; } @@ -1039,12 +1361,63 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id, dest_ring->base_addr_ce_space_unaligned = base_addr; - dest_ring->base_addr_owner_space = PTR_ALIGN( - dest_ring->base_addr_owner_space_unaligned, - CE_DESC_RING_ALIGN); - dest_ring->base_addr_ce_space = ALIGN( - dest_ring->base_addr_ce_space_unaligned, - CE_DESC_RING_ALIGN); + dest_ring->base_addr_owner_space = + PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned, + CE_DESC_RING_ALIGN); + dest_ring->base_addr_ce_space = + ALIGN(dest_ring->base_addr_ce_space_unaligned, + CE_DESC_RING_ALIGN); + + return dest_ring; +} + +static struct ath10k_ce_ring * +ath10k_ce_alloc_dest_ring_64(struct ath10k *ar, unsigned int ce_id, + const struct ce_attr *attr) +{ + struct ath10k_ce_ring *dest_ring; + u32 nentries; + dma_addr_t base_addr; + + nentries = roundup_pow_of_two(attr->dest_nentries); + + dest_ring = kzalloc(sizeof(*dest_ring) + + (nentries * + sizeof(*dest_ring->per_transfer_context)), + GFP_KERNEL); + if (!dest_ring) + return ERR_PTR(-ENOMEM); + + dest_ring->nentries = nentries; + dest_ring->nentries_mask = nentries - 1; + + /* Legacy platforms that do not support cache + * coherent DMA are unsupported + */ + dest_ring->base_addr_owner_space_unaligned = + dma_alloc_coherent(ar->dev, + (nentries * sizeof(struct ce_desc_64) + + CE_DESC_RING_ALIGN), + &base_addr, GFP_KERNEL); + if (!dest_ring->base_addr_owner_space_unaligned) { + kfree(dest_ring); + return ERR_PTR(-ENOMEM); + } + + dest_ring->base_addr_ce_space_unaligned = base_addr; + + /* Correctly initialize memory to 0 to prevent garbage + * data crashing system when download firmware + */ + memset(dest_ring->base_addr_owner_space_unaligned, 0, + nentries * sizeof(struct ce_desc_64) + CE_DESC_RING_ALIGN); + + dest_ring->base_addr_owner_space = + PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned, + CE_DESC_RING_ALIGN); + dest_ring->base_addr_ce_space = + ALIGN(dest_ring->base_addr_ce_space_unaligned, + CE_DESC_RING_ALIGN); return dest_ring; } @@ -1107,65 +1480,36 @@ void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id) ath10k_ce_deinit_dest_ring(ar, ce_id); } -int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, - const struct ce_attr *attr) +static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id) { struct ath10k_ce *ce = ath10k_ce_priv(ar); struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; - int ret; - - /* - * Make sure there's enough CE ringbuffer entries for HTT TX to avoid - * additional TX locking checks. - * - * For the lack of a better place do the check here. - */ - BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC > - (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); - BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC > - (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); - BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC > - (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); - - ce_state->ar = ar; - ce_state->id = ce_id; - ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id); - ce_state->attr_flags = attr->flags; - ce_state->src_sz_max = attr->src_sz_max; - if (attr->src_nentries) - ce_state->send_cb = attr->send_cb; - - if (attr->dest_nentries) - ce_state->recv_cb = attr->recv_cb; - - if (attr->src_nentries) { - ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr); - if (IS_ERR(ce_state->src_ring)) { - ret = PTR_ERR(ce_state->src_ring); - ath10k_err(ar, "failed to allocate copy engine source ring %d: %d\n", - ce_id, ret); - ce_state->src_ring = NULL; - return ret; - } + if (ce_state->src_ring) { + dma_free_coherent(ar->dev, + (ce_state->src_ring->nentries * + sizeof(struct ce_desc) + + CE_DESC_RING_ALIGN), + ce_state->src_ring->base_addr_owner_space, + ce_state->src_ring->base_addr_ce_space); + kfree(ce_state->src_ring); } - if (attr->dest_nentries) { - ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id, - attr); - if (IS_ERR(ce_state->dest_ring)) { - ret = PTR_ERR(ce_state->dest_ring); - ath10k_err(ar, "failed to allocate copy engine destination ring %d: %d\n", - ce_id, ret); - ce_state->dest_ring = NULL; - return ret; - } + if (ce_state->dest_ring) { + dma_free_coherent(ar->dev, + (ce_state->dest_ring->nentries * + sizeof(struct ce_desc) + + CE_DESC_RING_ALIGN), + ce_state->dest_ring->base_addr_owner_space, + ce_state->dest_ring->base_addr_ce_space); + kfree(ce_state->dest_ring); } - return 0; + ce_state->src_ring = NULL; + ce_state->dest_ring = NULL; } -void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id) +static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id) { struct ath10k_ce *ce = ath10k_ce_priv(ar); struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; @@ -1173,7 +1517,7 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id) if (ce_state->src_ring) { dma_free_coherent(ar->dev, (ce_state->src_ring->nentries * - sizeof(struct ce_desc) + + sizeof(struct ce_desc_64) + CE_DESC_RING_ALIGN), ce_state->src_ring->base_addr_owner_space, ce_state->src_ring->base_addr_ce_space); @@ -1183,7 +1527,7 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id) if (ce_state->dest_ring) { dma_free_coherent(ar->dev, (ce_state->dest_ring->nentries * - sizeof(struct ce_desc) + + sizeof(struct ce_desc_64) + CE_DESC_RING_ALIGN), ce_state->dest_ring->base_addr_owner_space, ce_state->dest_ring->base_addr_ce_space); @@ -1194,6 +1538,14 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id) ce_state->dest_ring = NULL; } +void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; + + ce_state->ops->ce_free_pipe(ar, ce_id); +} + void ath10k_ce_dump_registers(struct ath10k *ar, struct ath10k_fw_crash_data *crash_data) { @@ -1232,3 +1584,99 @@ void ath10k_ce_dump_registers(struct ath10k *ar, spin_unlock_bh(&ce->ce_lock); } + +static const struct ath10k_ce_ops ce_ops = { + .ce_alloc_src_ring = ath10k_ce_alloc_src_ring, + .ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring, + .ce_rx_post_buf = __ath10k_ce_rx_post_buf, + .ce_completed_recv_next_nolock = _ath10k_ce_completed_recv_next_nolock, + .ce_revoke_recv_next = _ath10k_ce_revoke_recv_next, + .ce_extract_desc_data = ath10k_ce_extract_desc_data, + .ce_free_pipe = _ath10k_ce_free_pipe, + .ce_send_nolock = _ath10k_ce_send_nolock, +}; + +static const struct ath10k_ce_ops ce_64_ops = { + .ce_alloc_src_ring = ath10k_ce_alloc_src_ring_64, + .ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring_64, + .ce_rx_post_buf = __ath10k_ce_rx_post_buf_64, + .ce_completed_recv_next_nolock = + _ath10k_ce_completed_recv_next_nolock_64, + .ce_revoke_recv_next = _ath10k_ce_revoke_recv_next_64, + .ce_extract_desc_data = ath10k_ce_extract_desc_data_64, + .ce_free_pipe = _ath10k_ce_free_pipe_64, + .ce_send_nolock = _ath10k_ce_send_nolock_64, +}; + +static void ath10k_ce_set_ops(struct ath10k *ar, + struct ath10k_ce_pipe *ce_state) +{ + switch (ar->hw_rev) { + case ATH10K_HW_WCN3990: + ce_state->ops = &ce_64_ops; + break; + default: + ce_state->ops = &ce_ops; + break; + } +} + +int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, + const struct ce_attr *attr) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; + int ret; + + ath10k_ce_set_ops(ar, ce_state); + /* Make sure there's enough CE ringbuffer entries for HTT TX to avoid + * additional TX locking checks. + * + * For the lack of a better place do the check here. + */ + BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC > + (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); + BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC > + (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); + BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC > + (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); + + ce_state->ar = ar; + ce_state->id = ce_id; + ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id); + ce_state->attr_flags = attr->flags; + ce_state->src_sz_max = attr->src_sz_max; + + if (attr->src_nentries) + ce_state->send_cb = attr->send_cb; + + if (attr->dest_nentries) + ce_state->recv_cb = attr->recv_cb; + + if (attr->src_nentries) { + ce_state->src_ring = + ce_state->ops->ce_alloc_src_ring(ar, ce_id, attr); + if (IS_ERR(ce_state->src_ring)) { + ret = PTR_ERR(ce_state->src_ring); + ath10k_err(ar, "failed to alloc CE src ring %d: %d\n", + ce_id, ret); + ce_state->src_ring = NULL; + return ret; + } + } + + if (attr->dest_nentries) { + ce_state->dest_ring = ce_state->ops->ce_alloc_dst_ring(ar, + ce_id, + attr); + if (IS_ERR(ce_state->dest_ring)) { + ret = PTR_ERR(ce_state->dest_ring); + ath10k_err(ar, "failed to alloc CE dest ring %d: %d\n", + ce_id, ret); + ce_state->dest_ring = NULL; + return ret; + } + } + + return 0; +} diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index bdec794704d9..2c3c8f5e90ea 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -36,6 +36,10 @@ struct ath10k_ce_pipe; #define CE_DESC_FLAGS_GATHER (1 << 0) #define CE_DESC_FLAGS_BYTE_SWAP (1 << 1) +#define CE_WCN3990_DESC_FLAGS_GATHER BIT(31) + +#define CE_DESC_FLAGS_GET_MASK GENMASK(4, 0) +#define CE_DESC_37BIT_ADDR_MASK GENMASK_ULL(37, 0) /* Following desc flags are used in QCA99X0 */ #define CE_DESC_FLAGS_HOST_INT_DIS (1 << 2) @@ -50,6 +54,16 @@ struct ce_desc { __le16 flags; /* %CE_DESC_FLAGS_ */ }; +struct ce_desc_64 { + __le64 addr; + __le16 nbytes; /* length in register map */ + __le16 flags; /* fw_metadata_high */ + __le32 toeplitz_hash_result; +}; + +#define CE_DESC_SIZE sizeof(struct ce_desc) +#define CE_DESC_SIZE_64 sizeof(struct ce_desc_64) + struct ath10k_ce_ring { /* Number of entries in this ring; must be power of 2 */ unsigned int nentries; @@ -117,6 +131,7 @@ struct ath10k_ce_pipe { unsigned int src_sz_max; struct ath10k_ce_ring *src_ring; struct ath10k_ce_ring *dest_ring; + const struct ath10k_ce_ops *ops; }; /* Copy Engine settable attributes */ @@ -160,7 +175,7 @@ struct ath10k_ce { */ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state, void *per_transfer_send_context, - u32 buffer, + dma_addr_t buffer, unsigned int nbytes, /* 14 bits */ unsigned int transfer_id, @@ -168,7 +183,7 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state, int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, void *per_transfer_context, - u32 buffer, + dma_addr_t buffer, unsigned int nbytes, unsigned int transfer_id, unsigned int flags); @@ -180,8 +195,8 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe); /*==================Recv=======================*/ int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe); -int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr); -int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr); +int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, + dma_addr_t paddr); void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries); /* recv flags */ @@ -222,7 +237,7 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id); */ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, - u32 *bufferp); + dma_addr_t *bufferp); int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, @@ -235,7 +250,7 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, */ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, - u32 *bufferp, + dma_addr_t *bufferp, unsigned int *nbytesp, unsigned int *transfer_idp); @@ -281,6 +296,32 @@ struct ce_attr { void (*recv_cb)(struct ath10k_ce_pipe *); }; +struct ath10k_ce_ops { + struct ath10k_ce_ring *(*ce_alloc_src_ring)(struct ath10k *ar, + u32 ce_id, + const struct ce_attr *attr); + struct ath10k_ce_ring *(*ce_alloc_dst_ring)(struct ath10k *ar, + u32 ce_id, + const struct ce_attr *attr); + int (*ce_rx_post_buf)(struct ath10k_ce_pipe *pipe, void *ctx, + dma_addr_t paddr); + int (*ce_completed_recv_next_nolock)(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp, + u32 *nbytesp); + int (*ce_revoke_recv_next)(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp, + dma_addr_t *nbytesp); + void (*ce_extract_desc_data)(struct ath10k *ar, + struct ath10k_ce_ring *src_ring, + u32 sw_index, dma_addr_t *bufferp, + u32 *nbytesp, u32 *transfer_idp); + void (*ce_free_pipe)(struct ath10k *ar, int ce_id); + int (*ce_send_nolock)(struct ath10k_ce_pipe *pipe, + void *per_transfer_context, + dma_addr_t buffer, u32 nbytes, + u32 transfer_id, u32 flags); +}; + static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id) { return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id; @@ -292,6 +333,12 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id) #define CE_DEST_RING_TO_DESC(baddr, idx) \ (&(((struct ce_desc *)baddr)[idx])) +#define CE_SRC_RING_TO_DESC_64(baddr, idx) \ + (&(((struct ce_desc_64 *)baddr)[idx])) + +#define CE_DEST_RING_TO_DESC_64(baddr, idx) \ + (&(((struct ce_desc_64 *)baddr)[idx])) + /* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */ #define CE_RING_DELTA(nentries_mask, fromidx, toidx) \ (((int)(toidx) - (int)(fromidx)) & (nentries_mask)) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index b29fdbd21ead..b0fdc1023619 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -32,6 +32,7 @@ #include "htt.h" #include "testmode.h" #include "wmi-ops.h" +#include "coredump.h" unsigned int ath10k_debug_mask; static unsigned int ath10k_cryptmode_param; @@ -39,17 +40,25 @@ static bool uart_print; static bool skip_otp; static bool rawmode; +/* Enable ATH10K_FW_CRASH_DUMP_REGISTERS and ATH10K_FW_CRASH_DUMP_CE_DATA + * by default. + */ +unsigned long ath10k_coredump_mask = 0x3; + +/* FIXME: most of these should be readonly */ module_param_named(debug_mask, ath10k_debug_mask, uint, 0644); module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644); module_param(uart_print, bool, 0644); module_param(skip_otp, bool, 0644); module_param(rawmode, bool, 0644); +module_param_named(coredump_mask, ath10k_coredump_mask, ulong, 0444); MODULE_PARM_DESC(debug_mask, "Debugging mask"); MODULE_PARM_DESC(uart_print, "Uart target debugging"); MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode"); MODULE_PARM_DESC(cryptmode, "Crypto mode: 0-hardware, 1-software"); MODULE_PARM_DESC(rawmode, "Use raw 802.11 frame datapath"); +MODULE_PARM_DESC(coredump_mask, "Bitfield of what to include in firmware crash file"); static const struct ath10k_hw_params ath10k_hw_params_list[] = { { @@ -75,6 +84,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, + .num_peers = TARGET_TLV_NUM_PEERS, + .ast_skid_limit = 0x10, + .num_wds_entries = 0x20, + .target_64bit = false, + .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, }, { .id = QCA9887_HW_1_0_VERSION, @@ -99,6 +113,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, + .num_peers = TARGET_TLV_NUM_PEERS, + .ast_skid_limit = 0x10, + .num_wds_entries = 0x20, + .target_64bit = false, + .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, }, { .id = QCA6174_HW_2_1_VERSION, @@ -122,6 +141,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, + .num_peers = TARGET_TLV_NUM_PEERS, + .ast_skid_limit = 0x10, + .num_wds_entries = 0x20, + .target_64bit = false, + .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, }, { .id = QCA6174_HW_2_1_VERSION, @@ -145,6 +169,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, + .num_peers = TARGET_TLV_NUM_PEERS, + .ast_skid_limit = 0x10, + .num_wds_entries = 0x20, + .target_64bit = false, + .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, }, { .id = QCA6174_HW_3_0_VERSION, @@ -168,6 +197,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, + .num_peers = TARGET_TLV_NUM_PEERS, + .ast_skid_limit = 0x10, + .num_wds_entries = 0x20, + .target_64bit = false, + .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, }, { .id = QCA6174_HW_3_2_VERSION, @@ -194,6 +228,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, + .num_peers = TARGET_TLV_NUM_PEERS, + .ast_skid_limit = 0x10, + .num_wds_entries = 0x20, + .target_64bit = false, + .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, }, { .id = QCA99X0_HW_2_0_DEV_VERSION, @@ -223,6 +262,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 11, + .num_peers = TARGET_TLV_NUM_PEERS, + .ast_skid_limit = 0x10, + .num_wds_entries = 0x20, + .target_64bit = false, + .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, }, { .id = QCA9984_HW_1_0_DEV_VERSION, @@ -257,6 +301,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .vht160_mcs_rx_highest = 1560, .vht160_mcs_tx_highest = 1560, .n_cipher_suites = 11, + .num_peers = TARGET_TLV_NUM_PEERS, + .ast_skid_limit = 0x10, + .num_wds_entries = 0x20, + .target_64bit = false, + .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, }, { .id = QCA9888_HW_2_0_DEV_VERSION, @@ -290,6 +339,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .vht160_mcs_rx_highest = 780, .vht160_mcs_tx_highest = 780, .n_cipher_suites = 11, + .num_peers = TARGET_TLV_NUM_PEERS, + .ast_skid_limit = 0x10, + .num_wds_entries = 0x20, + .target_64bit = false, + .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, }, { .id = QCA9377_HW_1_0_DEV_VERSION, @@ -313,6 +367,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, + .num_peers = TARGET_TLV_NUM_PEERS, + .ast_skid_limit = 0x10, + .num_wds_entries = 0x20, + .target_64bit = false, + .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, }, { .id = QCA9377_HW_1_1_DEV_VERSION, @@ -338,6 +397,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, + .num_peers = TARGET_TLV_NUM_PEERS, + .ast_skid_limit = 0x10, + .num_wds_entries = 0x20, + .target_64bit = false, + .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, }, { .id = QCA4019_HW_1_0_DEV_VERSION, @@ -368,6 +432,31 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 11, + .num_peers = TARGET_TLV_NUM_PEERS, + .ast_skid_limit = 0x10, + .num_wds_entries = 0x20, + .target_64bit = false, + .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + }, + { + .id = WCN3990_HW_1_0_DEV_VERSION, + .dev_id = 0, + .name = "wcn3990 hw1.0", + .continuous_frag_desc = true, + .tx_chain_mask = 0x7, + .rx_chain_mask = 0x7, + .max_spatial_stream = 4, + .fw = { + .dir = WCN3990_HW_1_0_FW_DIR, + }, + .sw_decrypt_mcast_mgmt = true, + .hw_ops = &wcn3990_ops, + .decap_align_bytes = 1, + .num_peers = TARGET_HL_10_TLV_NUM_PEERS, + .ast_skid_limit = TARGET_HL_10_TLV_AST_SKID_LIMIT, + .num_wds_entries = TARGET_HL_10_TLV_NUM_WDS_ENTRIES, + .target_64bit = true, + .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL_DUAL_MAC, }, }; @@ -390,6 +479,8 @@ static const char *const ath10k_core_fw_feature_str[] = { [ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR] = "skip-null-func-war", [ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST] = "allows-mesh-bcast", [ATH10K_FW_FEATURE_NO_PS] = "no-ps", + [ATH10K_FW_FEATURE_MGMT_TX_BY_REF] = "mgmt-tx-by-reference", + [ATH10K_FW_FEATURE_NON_BMI] = "non-bmi", }; static unsigned int ath10k_core_get_fw_feature_str(char *buf, @@ -860,6 +951,28 @@ static int ath10k_core_check_smbios(struct ath10k *ar) return 0; } +static int ath10k_core_check_dt(struct ath10k *ar) +{ + struct device_node *node; + const char *variant = NULL; + + node = ar->dev->of_node; + if (!node) + return -ENOENT; + + of_property_read_string(node, "qcom,ath10k-calibration-variant", + &variant); + if (!variant) + return -ENODATA; + + if (strscpy(ar->id.bdf_ext, variant, sizeof(ar->id.bdf_ext)) < 0) + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "bdf variant string is longer than the buffer can accommodate (variant: %s)\n", + variant); + + return 0; +} + static int ath10k_download_and_run_otp(struct ath10k *ar) { u32 result, address = ar->hw_params.patch_load_addr; @@ -1163,7 +1276,10 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar, len -= sizeof(*hdr); data = hdr->data; - if (len < ALIGN(ie_len, 4)) { + /* jump over the padding */ + ie_len = ALIGN(ie_len, 4); + + if (len < ie_len) { ath10k_err(ar, "invalid length for board ie_id %d ie_len %zu len %zu\n", ie_id, ie_len, len); ret = -EINVAL; @@ -1202,9 +1318,6 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar, goto out; } - /* jump over the padding */ - ie_len = ALIGN(ie_len, 4); - len -= ie_len; data += ie_len; } @@ -1231,19 +1344,19 @@ static int ath10k_core_create_board_name(struct ath10k *ar, char *name, /* strlen(',variant=') + strlen(ar->id.bdf_ext) */ char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = { 0 }; + if (ar->id.bdf_ext[0] != '\0') + scnprintf(variant, sizeof(variant), ",variant=%s", + ar->id.bdf_ext); + if (ar->id.bmi_ids_valid) { scnprintf(name, name_len, - "bus=%s,bmi-chip-id=%d,bmi-board-id=%d", + "bus=%s,bmi-chip-id=%d,bmi-board-id=%d%s", ath10k_bus_str(ar->hif.bus), ar->id.bmi_chip_id, - ar->id.bmi_board_id); + ar->id.bmi_board_id, variant); goto out; } - if (ar->id.bdf_ext[0] != '\0') - scnprintf(variant, sizeof(variant), ",variant=%s", - ar->id.bdf_ext); - scnprintf(name, name_len, "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x%s", ath10k_bus_str(ar->hif.bus), @@ -1335,6 +1448,9 @@ int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name, len -= sizeof(*hdr); data += sizeof(*hdr); + /* jump over the padding */ + ie_len = ALIGN(ie_len, 4); + if (len < ie_len) { ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n", ie_id, len, ie_len); @@ -1440,15 +1556,12 @@ int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name, break; } - /* jump over the padding */ - ie_len = ALIGN(ie_len, 4); - len -= ie_len; data += ie_len; } - if (!fw_file->firmware_data || - !fw_file->firmware_len) { + if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, fw_file->fw_features) && + (!fw_file->firmware_data || !fw_file->firmware_len)) { ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n", ar->hw_params.fw.dir, name); ret = -ENOMEDIUM; @@ -1474,6 +1587,7 @@ static void ath10k_core_get_fw_name(struct ath10k *ar, char *fw_name, break; case ATH10K_BUS_PCI: case ATH10K_BUS_AHB: + case ATH10K_BUS_SNOC: scnprintf(fw_name, fw_name_len, "%s-%d.bin", ATH10K_FW_FILE_BASE, fw_api); break; @@ -1759,7 +1873,7 @@ static void ath10k_core_restart(struct work_struct *work) mutex_unlock(&ar->conf_mutex); - ret = ath10k_debug_fw_devcoredump(ar); + ret = ath10k_coredump_submit(ar); if (ret) ath10k_warn(ar, "failed to send firmware crash dump via devcoredump: %d", ret); @@ -2001,43 +2115,47 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, ar->running_fw = fw; - ath10k_bmi_start(ar); - - if (ath10k_init_configure_target(ar)) { - status = -EINVAL; - goto err; - } + if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, + ar->running_fw->fw_file.fw_features)) { + ath10k_bmi_start(ar); - status = ath10k_download_cal_data(ar); - if (status) - goto err; + if (ath10k_init_configure_target(ar)) { + status = -EINVAL; + goto err; + } - /* Some of of qca988x solutions are having global reset issue - * during target initialization. Bypassing PLL setting before - * downloading firmware and letting the SoC run on REF_CLK is - * fixing the problem. Corresponding firmware change is also needed - * to set the clock source once the target is initialized. - */ - if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT, - ar->running_fw->fw_file.fw_features)) { - status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1); - if (status) { - ath10k_err(ar, "could not write to skip_clock_init: %d\n", - status); + status = ath10k_download_cal_data(ar); + if (status) goto err; + + /* Some of of qca988x solutions are having global reset issue + * during target initialization. Bypassing PLL setting before + * downloading firmware and letting the SoC run on REF_CLK is + * fixing the problem. Corresponding firmware change is also + * needed to set the clock source once the target is + * initialized. + */ + if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT, + ar->running_fw->fw_file.fw_features)) { + status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1); + if (status) { + ath10k_err(ar, "could not write to skip_clock_init: %d\n", + status); + goto err; + } } - } - status = ath10k_download_fw(ar); - if (status) - goto err; + status = ath10k_download_fw(ar); + if (status) + goto err; - status = ath10k_init_uart(ar); - if (status) - goto err; + status = ath10k_init_uart(ar); + if (status) + goto err; - if (ar->hif.bus == ATH10K_BUS_SDIO) - ath10k_init_sdio(ar); + if (ar->hif.bus == ATH10K_BUS_SDIO) + ath10k_init_sdio(ar); + } ar->htc.htc_ops.target_send_suspend_complete = ath10k_send_suspend_complete; @@ -2048,9 +2166,12 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, goto err; } - status = ath10k_bmi_done(ar); - if (status) - goto err; + if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, + ar->running_fw->fw_file.fw_features)) { + status = ath10k_bmi_done(ar); + if (status) + goto err; + } status = ath10k_wmi_attach(ar); if (status) { @@ -2293,19 +2414,35 @@ static int ath10k_core_probe_fw(struct ath10k *ar) return ret; } - memset(&target_info, 0, sizeof(target_info)); - if (ar->hif.bus == ATH10K_BUS_SDIO) + switch (ar->hif.bus) { + case ATH10K_BUS_SDIO: + memset(&target_info, 0, sizeof(target_info)); ret = ath10k_bmi_get_target_info_sdio(ar, &target_info); - else + if (ret) { + ath10k_err(ar, "could not get target info (%d)\n", ret); + goto err_power_down; + } + ar->target_version = target_info.version; + ar->hw->wiphy->hw_version = target_info.version; + break; + case ATH10K_BUS_PCI: + case ATH10K_BUS_AHB: + case ATH10K_BUS_USB: + memset(&target_info, 0, sizeof(target_info)); ret = ath10k_bmi_get_target_info(ar, &target_info); - if (ret) { - ath10k_err(ar, "could not get target info (%d)\n", ret); - goto err_power_down; + if (ret) { + ath10k_err(ar, "could not get target info (%d)\n", ret); + goto err_power_down; + } + ar->target_version = target_info.version; + ar->hw->wiphy->hw_version = target_info.version; + break; + case ATH10K_BUS_SNOC: + break; + default: + ath10k_err(ar, "incorrect hif bus type: %d\n", ar->hif.bus); } - ar->target_version = target_info.version; - ar->hw->wiphy->hw_version = target_info.version; - ret = ath10k_init_hw_params(ar); if (ret) { ath10k_err(ar, "could not get hw params (%d)\n", ret); @@ -2325,33 +2462,40 @@ static int ath10k_core_probe_fw(struct ath10k *ar) ath10k_debug_print_hwfw_info(ar); - ret = ath10k_core_pre_cal_download(ar); - if (ret) { - /* pre calibration data download is not necessary - * for all the chipsets. Ignore failures and continue. - */ - ath10k_dbg(ar, ATH10K_DBG_BOOT, - "could not load pre cal data: %d\n", ret); - } + if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, + ar->normal_mode_fw.fw_file.fw_features)) { + ret = ath10k_core_pre_cal_download(ar); + if (ret) { + /* pre calibration data download is not necessary + * for all the chipsets. Ignore failures and continue. + */ + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "could not load pre cal data: %d\n", ret); + } - ret = ath10k_core_get_board_id_from_otp(ar); - if (ret && ret != -EOPNOTSUPP) { - ath10k_err(ar, "failed to get board id from otp: %d\n", - ret); - goto err_free_firmware_files; - } + ret = ath10k_core_get_board_id_from_otp(ar); + if (ret && ret != -EOPNOTSUPP) { + ath10k_err(ar, "failed to get board id from otp: %d\n", + ret); + goto err_free_firmware_files; + } - ret = ath10k_core_check_smbios(ar); - if (ret) - ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant name not set.\n"); + ret = ath10k_core_check_smbios(ar); + if (ret) + ath10k_dbg(ar, ATH10K_DBG_BOOT, "SMBIOS bdf variant name not set.\n"); - ret = ath10k_core_fetch_board_file(ar); - if (ret) { - ath10k_err(ar, "failed to fetch board file: %d\n", ret); - goto err_free_firmware_files; - } + ret = ath10k_core_check_dt(ar); + if (ret) + ath10k_dbg(ar, ATH10K_DBG_BOOT, "DT bdf variant name not set.\n"); + + ret = ath10k_core_fetch_board_file(ar); + if (ret) { + ath10k_err(ar, "failed to fetch board file: %d\n", ret); + goto err_free_firmware_files; + } - ath10k_debug_print_board_info(ar); + ath10k_debug_print_board_info(ar); + } ret = ath10k_core_init_firmware_features(ar); if (ret) { @@ -2360,11 +2504,15 @@ static int ath10k_core_probe_fw(struct ath10k *ar) goto err_free_firmware_files; } - ret = ath10k_swap_code_seg_init(ar, &ar->normal_mode_fw.fw_file); - if (ret) { - ath10k_err(ar, "failed to initialize code swap segment: %d\n", - ret); - goto err_free_firmware_files; + if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, + ar->normal_mode_fw.fw_file.fw_features)) { + ret = ath10k_swap_code_seg_init(ar, + &ar->normal_mode_fw.fw_file); + if (ret) { + ath10k_err(ar, "failed to initialize code swap segment: %d\n", + ret); + goto err_free_firmware_files; + } } mutex_lock(&ar->conf_mutex); @@ -2416,10 +2564,16 @@ static void ath10k_core_register_work(struct work_struct *work) goto err_release_fw; } + status = ath10k_coredump_register(ar); + if (status) { + ath10k_err(ar, "unable to register coredump\n"); + goto err_unregister_mac; + } + status = ath10k_debug_register(ar); if (status) { ath10k_err(ar, "unable to initialize debugfs\n"); - goto err_unregister_mac; + goto err_unregister_coredump; } status = ath10k_spectral_create(ar); @@ -2442,6 +2596,8 @@ err_spectral_destroy: ath10k_spectral_destroy(ar); err_debug_destroy: ath10k_debug_destroy(ar); +err_unregister_coredump: + ath10k_coredump_unregister(ar); err_unregister_mac: ath10k_mac_unregister(ar); err_release_fw: @@ -2596,12 +2752,19 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, init_dummy_netdev(&ar->napi_dev); - ret = ath10k_debug_create(ar); + ret = ath10k_coredump_create(ar); if (ret) goto err_free_aux_wq; + ret = ath10k_debug_create(ar); + if (ret) + goto err_free_coredump; + return ar; +err_free_coredump: + ath10k_coredump_destroy(ar); + err_free_aux_wq: destroy_workqueue(ar->workqueue_aux); err_free_wq: @@ -2623,6 +2786,7 @@ void ath10k_core_destroy(struct ath10k *ar) destroy_workqueue(ar->workqueue_aux); ath10k_debug_destroy(ar); + ath10k_coredump_destroy(ar); ath10k_htt_tx_destroy(&ar->htt); ath10k_wmi_free_host_mem(ar); ath10k_mac_destroy(ar); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 643041ef3271..fe6b30356d3b 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -67,7 +67,6 @@ /* NAPI poll budget */ #define ATH10K_NAPI_BUDGET 64 -#define ATH10K_NAPI_QUOTA_LIMIT 60 /* SMBIOS type containing Board Data File Name Extension */ #define ATH10K_SMBIOS_BDF_EXT_TYPE 0xF8 @@ -93,6 +92,7 @@ enum ath10k_bus { ATH10K_BUS_AHB, ATH10K_BUS_SDIO, ATH10K_BUS_USB, + ATH10K_BUS_SNOC, }; static inline const char *ath10k_bus_str(enum ath10k_bus bus) @@ -106,6 +106,8 @@ static inline const char *ath10k_bus_str(enum ath10k_bus bus) return "sdio"; case ATH10K_BUS_USB: return "usb"; + case ATH10K_BUS_SNOC: + return "snoc"; } return "unknown"; @@ -364,11 +366,11 @@ struct ath10k_sta { struct rate_info txrate; struct work_struct update_wk; + u64 rx_duration; #ifdef CONFIG_MAC80211_DEBUGFS /* protected by conf_mutex */ bool aggr_mode; - u64 rx_duration; #endif }; @@ -458,14 +460,17 @@ struct ath10k_ce_crash_hdr { struct ath10k_ce_crash_data entries[]; }; +#define MAX_MEM_DUMP_TYPE 5 + /* used for crash-dump storage, protected by data-lock */ struct ath10k_fw_crash_data { - bool crashed_since_read; - guid_t guid; - struct timespec timestamp; + struct timespec64 timestamp; __le32 registers[REG_DUMP_COUNT_QCA988X]; struct ath10k_ce_crash_data ce_crash_data[CE_COUNT_MAX]; + + u8 *ramdump_buf; + size_t ramdump_buf_len; }; struct ath10k_debug { @@ -488,12 +493,9 @@ struct ath10k_debug { /* protected by conf_mutex */ u64 fw_dbglog_mask; u32 fw_dbglog_level; - u32 pktlog_filter; u32 reg_addr; u32 nf_cal_period; void *cal_data; - - struct ath10k_fw_crash_data *fw_crash_data; }; enum ath10k_state { @@ -615,6 +617,12 @@ enum ath10k_fw_features { /* Firmware does not support power save in station mode. */ ATH10K_FW_FEATURE_NO_PS = 17, + /* Firmware allows management tx by reference instead of by value. */ + ATH10K_FW_FEATURE_MGMT_TX_BY_REF = 18, + + /* Firmware load is done externally, not by bmi */ + ATH10K_FW_FEATURE_NON_BMI = 19, + /* keep last */ ATH10K_FW_FEATURE_COUNT, }; @@ -963,6 +971,14 @@ struct ath10k { } spectral; #endif + u32 pktlog_filter; + +#ifdef CONFIG_DEV_COREDUMP + struct { + struct ath10k_fw_crash_data *fw_crash_data; + } coredump; +#endif + struct { /* protected by conf_mutex */ struct ath10k_fw_components utf_mode_fw; @@ -1016,6 +1032,8 @@ static inline bool ath10k_peer_stats_enabled(struct ath10k *ar) return false; } +extern unsigned long ath10k_coredump_mask; + struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, enum ath10k_bus bus, enum ath10k_hw_rev hw_rev, diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c new file mode 100644 index 000000000000..4dde126dab17 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/coredump.c @@ -0,0 +1,993 @@ +/* + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "coredump.h" + +#include <linux/devcoredump.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/utsname.h> + +#include "debug.h" +#include "hw.h" + +static const struct ath10k_mem_section qca6174_hw21_register_sections[] = { + {0x800, 0x810}, + {0x820, 0x82C}, + {0x830, 0x8F4}, + {0x90C, 0x91C}, + {0xA14, 0xA18}, + {0xA84, 0xA94}, + {0xAA8, 0xAD4}, + {0xADC, 0xB40}, + {0x1000, 0x10A4}, + {0x10BC, 0x111C}, + {0x1134, 0x1138}, + {0x1144, 0x114C}, + {0x1150, 0x115C}, + {0x1160, 0x1178}, + {0x1240, 0x1260}, + {0x2000, 0x207C}, + {0x3000, 0x3014}, + {0x4000, 0x4014}, + {0x5000, 0x5124}, + {0x6000, 0x6040}, + {0x6080, 0x60CC}, + {0x6100, 0x611C}, + {0x6140, 0x61D8}, + {0x6200, 0x6238}, + {0x6240, 0x628C}, + {0x62C0, 0x62EC}, + {0x6380, 0x63E8}, + {0x6400, 0x6440}, + {0x6480, 0x64CC}, + {0x6500, 0x651C}, + {0x6540, 0x6580}, + {0x6600, 0x6638}, + {0x6640, 0x668C}, + {0x66C0, 0x66EC}, + {0x6780, 0x67E8}, + {0x7080, 0x708C}, + {0x70C0, 0x70C8}, + {0x7400, 0x741C}, + {0x7440, 0x7454}, + {0x7800, 0x7818}, + {0x8000, 0x8004}, + {0x8010, 0x8064}, + {0x8080, 0x8084}, + {0x80A0, 0x80A4}, + {0x80C0, 0x80C4}, + {0x80E0, 0x80F4}, + {0x8100, 0x8104}, + {0x8110, 0x812C}, + {0x9000, 0x9004}, + {0x9800, 0x982C}, + {0x9830, 0x9838}, + {0x9840, 0x986C}, + {0x9870, 0x9898}, + {0x9A00, 0x9C00}, + {0xD580, 0xD59C}, + {0xF000, 0xF0E0}, + {0xF140, 0xF190}, + {0xF250, 0xF25C}, + {0xF260, 0xF268}, + {0xF26C, 0xF2A8}, + {0x10008, 0x1000C}, + {0x10014, 0x10018}, + {0x1001C, 0x10020}, + {0x10024, 0x10028}, + {0x10030, 0x10034}, + {0x10040, 0x10054}, + {0x10058, 0x1007C}, + {0x10080, 0x100C4}, + {0x100C8, 0x10114}, + {0x1012C, 0x10130}, + {0x10138, 0x10144}, + {0x10200, 0x10220}, + {0x10230, 0x10250}, + {0x10260, 0x10280}, + {0x10290, 0x102B0}, + {0x102C0, 0x102DC}, + {0x102E0, 0x102F4}, + {0x102FC, 0x1037C}, + {0x10380, 0x10390}, + {0x10800, 0x10828}, + {0x10840, 0x10844}, + {0x10880, 0x10884}, + {0x108C0, 0x108E8}, + {0x10900, 0x10928}, + {0x10940, 0x10944}, + {0x10980, 0x10984}, + {0x109C0, 0x109E8}, + {0x10A00, 0x10A28}, + {0x10A40, 0x10A50}, + {0x11000, 0x11028}, + {0x11030, 0x11034}, + {0x11038, 0x11068}, + {0x11070, 0x11074}, + {0x11078, 0x110A8}, + {0x110B0, 0x110B4}, + {0x110B8, 0x110E8}, + {0x110F0, 0x110F4}, + {0x110F8, 0x11128}, + {0x11138, 0x11144}, + {0x11178, 0x11180}, + {0x111B8, 0x111C0}, + {0x111F8, 0x11200}, + {0x11238, 0x1123C}, + {0x11270, 0x11274}, + {0x11278, 0x1127C}, + {0x112B0, 0x112B4}, + {0x112B8, 0x112BC}, + {0x112F0, 0x112F4}, + {0x112F8, 0x112FC}, + {0x11338, 0x1133C}, + {0x11378, 0x1137C}, + {0x113B8, 0x113BC}, + {0x113F8, 0x113FC}, + {0x11438, 0x11440}, + {0x11478, 0x11480}, + {0x114B8, 0x114BC}, + {0x114F8, 0x114FC}, + {0x11538, 0x1153C}, + {0x11578, 0x1157C}, + {0x115B8, 0x115BC}, + {0x115F8, 0x115FC}, + {0x11638, 0x1163C}, + {0x11678, 0x1167C}, + {0x116B8, 0x116BC}, + {0x116F8, 0x116FC}, + {0x11738, 0x1173C}, + {0x11778, 0x1177C}, + {0x117B8, 0x117BC}, + {0x117F8, 0x117FC}, + {0x17000, 0x1701C}, + {0x17020, 0x170AC}, + {0x18000, 0x18050}, + {0x18054, 0x18074}, + {0x18080, 0x180D4}, + {0x180DC, 0x18104}, + {0x18108, 0x1813C}, + {0x18144, 0x18148}, + {0x18168, 0x18174}, + {0x18178, 0x18180}, + {0x181C8, 0x181E0}, + {0x181E4, 0x181E8}, + {0x181EC, 0x1820C}, + {0x1825C, 0x18280}, + {0x18284, 0x18290}, + {0x18294, 0x182A0}, + {0x18300, 0x18304}, + {0x18314, 0x18320}, + {0x18328, 0x18350}, + {0x1835C, 0x1836C}, + {0x18370, 0x18390}, + {0x18398, 0x183AC}, + {0x183BC, 0x183D8}, + {0x183DC, 0x183F4}, + {0x18400, 0x186F4}, + {0x186F8, 0x1871C}, + {0x18720, 0x18790}, + {0x19800, 0x19830}, + {0x19834, 0x19840}, + {0x19880, 0x1989C}, + {0x198A4, 0x198B0}, + {0x198BC, 0x19900}, + {0x19C00, 0x19C88}, + {0x19D00, 0x19D20}, + {0x19E00, 0x19E7C}, + {0x19E80, 0x19E94}, + {0x19E98, 0x19EAC}, + {0x19EB0, 0x19EBC}, + {0x19F70, 0x19F74}, + {0x19F80, 0x19F8C}, + {0x19FA0, 0x19FB4}, + {0x19FC0, 0x19FD8}, + {0x1A000, 0x1A200}, + {0x1A204, 0x1A210}, + {0x1A228, 0x1A22C}, + {0x1A230, 0x1A248}, + {0x1A250, 0x1A270}, + {0x1A280, 0x1A290}, + {0x1A2A0, 0x1A2A4}, + {0x1A2C0, 0x1A2EC}, + {0x1A300, 0x1A3BC}, + {0x1A3F0, 0x1A3F4}, + {0x1A3F8, 0x1A434}, + {0x1A438, 0x1A444}, + {0x1A448, 0x1A468}, + {0x1A580, 0x1A58C}, + {0x1A644, 0x1A654}, + {0x1A670, 0x1A698}, + {0x1A6AC, 0x1A6B0}, + {0x1A6D0, 0x1A6D4}, + {0x1A6EC, 0x1A70C}, + {0x1A710, 0x1A738}, + {0x1A7C0, 0x1A7D0}, + {0x1A7D4, 0x1A7D8}, + {0x1A7DC, 0x1A7E4}, + {0x1A7F0, 0x1A7F8}, + {0x1A888, 0x1A89C}, + {0x1A8A8, 0x1A8AC}, + {0x1A8C0, 0x1A8DC}, + {0x1A8F0, 0x1A8FC}, + {0x1AE04, 0x1AE08}, + {0x1AE18, 0x1AE24}, + {0x1AF80, 0x1AF8C}, + {0x1AFA0, 0x1AFB4}, + {0x1B000, 0x1B200}, + {0x1B284, 0x1B288}, + {0x1B2D0, 0x1B2D8}, + {0x1B2DC, 0x1B2EC}, + {0x1B300, 0x1B340}, + {0x1B374, 0x1B378}, + {0x1B380, 0x1B384}, + {0x1B388, 0x1B38C}, + {0x1B404, 0x1B408}, + {0x1B420, 0x1B428}, + {0x1B440, 0x1B444}, + {0x1B448, 0x1B44C}, + {0x1B450, 0x1B458}, + {0x1B45C, 0x1B468}, + {0x1B584, 0x1B58C}, + {0x1B68C, 0x1B690}, + {0x1B6AC, 0x1B6B0}, + {0x1B7F0, 0x1B7F8}, + {0x1C800, 0x1CC00}, + {0x1CE00, 0x1CE04}, + {0x1CF80, 0x1CF84}, + {0x1D200, 0x1D800}, + {0x1E000, 0x20014}, + {0x20100, 0x20124}, + {0x21400, 0x217A8}, + {0x21800, 0x21BA8}, + {0x21C00, 0x21FA8}, + {0x22000, 0x223A8}, + {0x22400, 0x227A8}, + {0x22800, 0x22BA8}, + {0x22C00, 0x22FA8}, + {0x23000, 0x233A8}, + {0x24000, 0x24034}, + {0x26000, 0x26064}, + {0x27000, 0x27024}, + {0x34000, 0x3400C}, + {0x34400, 0x3445C}, + {0x34800, 0x3485C}, + {0x34C00, 0x34C5C}, + {0x35000, 0x3505C}, + {0x35400, 0x3545C}, + {0x35800, 0x3585C}, + {0x35C00, 0x35C5C}, + {0x36000, 0x3605C}, + {0x38000, 0x38064}, + {0x38070, 0x380E0}, + {0x3A000, 0x3A064}, + {0x40000, 0x400A4}, + {0x80000, 0x8000C}, + {0x80010, 0x80020}, +}; + +static const struct ath10k_mem_section qca6174_hw30_register_sections[] = { + {0x800, 0x810}, + {0x820, 0x82C}, + {0x830, 0x8F4}, + {0x90C, 0x91C}, + {0xA14, 0xA18}, + {0xA84, 0xA94}, + {0xAA8, 0xAD4}, + {0xADC, 0xB40}, + {0x1000, 0x10A4}, + {0x10BC, 0x111C}, + {0x1134, 0x1138}, + {0x1144, 0x114C}, + {0x1150, 0x115C}, + {0x1160, 0x1178}, + {0x1240, 0x1260}, + {0x2000, 0x207C}, + {0x3000, 0x3014}, + {0x4000, 0x4014}, + {0x5000, 0x5124}, + {0x6000, 0x6040}, + {0x6080, 0x60CC}, + {0x6100, 0x611C}, + {0x6140, 0x61D8}, + {0x6200, 0x6238}, + {0x6240, 0x628C}, + {0x62C0, 0x62EC}, + {0x6380, 0x63E8}, + {0x6400, 0x6440}, + {0x6480, 0x64CC}, + {0x6500, 0x651C}, + {0x6540, 0x6580}, + {0x6600, 0x6638}, + {0x6640, 0x668C}, + {0x66C0, 0x66EC}, + {0x6780, 0x67E8}, + {0x7080, 0x708C}, + {0x70C0, 0x70C8}, + {0x7400, 0x741C}, + {0x7440, 0x7454}, + {0x7800, 0x7818}, + {0x8000, 0x8004}, + {0x8010, 0x8064}, + {0x8080, 0x8084}, + {0x80A0, 0x80A4}, + {0x80C0, 0x80C4}, + {0x80E0, 0x80F4}, + {0x8100, 0x8104}, + {0x8110, 0x812C}, + {0x9000, 0x9004}, + {0x9800, 0x982C}, + {0x9830, 0x9838}, + {0x9840, 0x986C}, + {0x9870, 0x9898}, + {0x9A00, 0x9C00}, + {0xD580, 0xD59C}, + {0xF000, 0xF0E0}, + {0xF140, 0xF190}, + {0xF250, 0xF25C}, + {0xF260, 0xF268}, + {0xF26C, 0xF2A8}, + {0x10008, 0x1000C}, + {0x10014, 0x10018}, + {0x1001C, 0x10020}, + {0x10024, 0x10028}, + {0x10030, 0x10034}, + {0x10040, 0x10054}, + {0x10058, 0x1007C}, + {0x10080, 0x100C4}, + {0x100C8, 0x10114}, + {0x1012C, 0x10130}, + {0x10138, 0x10144}, + {0x10200, 0x10220}, + {0x10230, 0x10250}, + {0x10260, 0x10280}, + {0x10290, 0x102B0}, + {0x102C0, 0x102DC}, + {0x102E0, 0x102F4}, + {0x102FC, 0x1037C}, + {0x10380, 0x10390}, + {0x10800, 0x10828}, + {0x10840, 0x10844}, + {0x10880, 0x10884}, + {0x108C0, 0x108E8}, + {0x10900, 0x10928}, + {0x10940, 0x10944}, + {0x10980, 0x10984}, + {0x109C0, 0x109E8}, + {0x10A00, 0x10A28}, + {0x10A40, 0x10A50}, + {0x11000, 0x11028}, + {0x11030, 0x11034}, + {0x11038, 0x11068}, + {0x11070, 0x11074}, + {0x11078, 0x110A8}, + {0x110B0, 0x110B4}, + {0x110B8, 0x110E8}, + {0x110F0, 0x110F4}, + {0x110F8, 0x11128}, + {0x11138, 0x11144}, + {0x11178, 0x11180}, + {0x111B8, 0x111C0}, + {0x111F8, 0x11200}, + {0x11238, 0x1123C}, + {0x11270, 0x11274}, + {0x11278, 0x1127C}, + {0x112B0, 0x112B4}, + {0x112B8, 0x112BC}, + {0x112F0, 0x112F4}, + {0x112F8, 0x112FC}, + {0x11338, 0x1133C}, + {0x11378, 0x1137C}, + {0x113B8, 0x113BC}, + {0x113F8, 0x113FC}, + {0x11438, 0x11440}, + {0x11478, 0x11480}, + {0x114B8, 0x114BC}, + {0x114F8, 0x114FC}, + {0x11538, 0x1153C}, + {0x11578, 0x1157C}, + {0x115B8, 0x115BC}, + {0x115F8, 0x115FC}, + {0x11638, 0x1163C}, + {0x11678, 0x1167C}, + {0x116B8, 0x116BC}, + {0x116F8, 0x116FC}, + {0x11738, 0x1173C}, + {0x11778, 0x1177C}, + {0x117B8, 0x117BC}, + {0x117F8, 0x117FC}, + {0x17000, 0x1701C}, + {0x17020, 0x170AC}, + {0x18000, 0x18050}, + {0x18054, 0x18074}, + {0x18080, 0x180D4}, + {0x180DC, 0x18104}, + {0x18108, 0x1813C}, + {0x18144, 0x18148}, + {0x18168, 0x18174}, + {0x18178, 0x18180}, + {0x181C8, 0x181E0}, + {0x181E4, 0x181E8}, + {0x181EC, 0x1820C}, + {0x1825C, 0x18280}, + {0x18284, 0x18290}, + {0x18294, 0x182A0}, + {0x18300, 0x18304}, + {0x18314, 0x18320}, + {0x18328, 0x18350}, + {0x1835C, 0x1836C}, + {0x18370, 0x18390}, + {0x18398, 0x183AC}, + {0x183BC, 0x183D8}, + {0x183DC, 0x183F4}, + {0x18400, 0x186F4}, + {0x186F8, 0x1871C}, + {0x18720, 0x18790}, + {0x19800, 0x19830}, + {0x19834, 0x19840}, + {0x19880, 0x1989C}, + {0x198A4, 0x198B0}, + {0x198BC, 0x19900}, + {0x19C00, 0x19C88}, + {0x19D00, 0x19D20}, + {0x19E00, 0x19E7C}, + {0x19E80, 0x19E94}, + {0x19E98, 0x19EAC}, + {0x19EB0, 0x19EBC}, + {0x19F70, 0x19F74}, + {0x19F80, 0x19F8C}, + {0x19FA0, 0x19FB4}, + {0x19FC0, 0x19FD8}, + {0x1A000, 0x1A200}, + {0x1A204, 0x1A210}, + {0x1A228, 0x1A22C}, + {0x1A230, 0x1A248}, + {0x1A250, 0x1A270}, + {0x1A280, 0x1A290}, + {0x1A2A0, 0x1A2A4}, + {0x1A2C0, 0x1A2EC}, + {0x1A300, 0x1A3BC}, + {0x1A3F0, 0x1A3F4}, + {0x1A3F8, 0x1A434}, + {0x1A438, 0x1A444}, + {0x1A448, 0x1A468}, + {0x1A580, 0x1A58C}, + {0x1A644, 0x1A654}, + {0x1A670, 0x1A698}, + {0x1A6AC, 0x1A6B0}, + {0x1A6D0, 0x1A6D4}, + {0x1A6EC, 0x1A70C}, + {0x1A710, 0x1A738}, + {0x1A7C0, 0x1A7D0}, + {0x1A7D4, 0x1A7D8}, + {0x1A7DC, 0x1A7E4}, + {0x1A7F0, 0x1A7F8}, + {0x1A888, 0x1A89C}, + {0x1A8A8, 0x1A8AC}, + {0x1A8C0, 0x1A8DC}, + {0x1A8F0, 0x1A8FC}, + {0x1AE04, 0x1AE08}, + {0x1AE18, 0x1AE24}, + {0x1AF80, 0x1AF8C}, + {0x1AFA0, 0x1AFB4}, + {0x1B000, 0x1B200}, + {0x1B284, 0x1B288}, + {0x1B2D0, 0x1B2D8}, + {0x1B2DC, 0x1B2EC}, + {0x1B300, 0x1B340}, + {0x1B374, 0x1B378}, + {0x1B380, 0x1B384}, + {0x1B388, 0x1B38C}, + {0x1B404, 0x1B408}, + {0x1B420, 0x1B428}, + {0x1B440, 0x1B444}, + {0x1B448, 0x1B44C}, + {0x1B450, 0x1B458}, + {0x1B45C, 0x1B468}, + {0x1B584, 0x1B58C}, + {0x1B68C, 0x1B690}, + {0x1B6AC, 0x1B6B0}, + {0x1B7F0, 0x1B7F8}, + {0x1C800, 0x1CC00}, + {0x1CE00, 0x1CE04}, + {0x1CF80, 0x1CF84}, + {0x1D200, 0x1D800}, + {0x1E000, 0x20014}, + {0x20100, 0x20124}, + {0x21400, 0x217A8}, + {0x21800, 0x21BA8}, + {0x21C00, 0x21FA8}, + {0x22000, 0x223A8}, + {0x22400, 0x227A8}, + {0x22800, 0x22BA8}, + {0x22C00, 0x22FA8}, + {0x23000, 0x233A8}, + {0x24000, 0x24034}, + {0x26000, 0x26064}, + {0x27000, 0x27024}, + {0x34000, 0x3400C}, + {0x34400, 0x3445C}, + {0x34800, 0x3485C}, + {0x34C00, 0x34C5C}, + {0x35000, 0x3505C}, + {0x35400, 0x3545C}, + {0x35800, 0x3585C}, + {0x35C00, 0x35C5C}, + {0x36000, 0x3605C}, + {0x38000, 0x38064}, + {0x38070, 0x380E0}, + {0x3A000, 0x3A074}, + {0x40000, 0x400A4}, + {0x80000, 0x8000C}, + {0x80010, 0x80020}, +}; + +static const struct ath10k_mem_region qca6174_hw10_mem_regions[] = { + { + .type = ATH10K_MEM_REGION_TYPE_DRAM, + .start = 0x400000, + .len = 0x70000, + .name = "DRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_REG, + + /* RTC_SOC_BASE_ADDRESS */ + .start = 0x0, + + /* WLAN_MBOX_BASE_ADDRESS - RTC_SOC_BASE_ADDRESS */ + .len = 0x800 - 0x0, + + .name = "REG_PART1", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_REG, + + /* STEREO_BASE_ADDRESS */ + .start = 0x27000, + + /* USB_BASE_ADDRESS - STEREO_BASE_ADDRESS */ + .len = 0x60000 - 0x27000, + + .name = "REG_PART2", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, +}; + +static const struct ath10k_mem_region qca6174_hw21_mem_regions[] = { + { + .type = ATH10K_MEM_REGION_TYPE_DRAM, + .start = 0x400000, + .len = 0x70000, + .name = "DRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_AXI, + .start = 0xa0000, + .len = 0x18000, + .name = "AXI", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_REG, + .start = 0x800, + .len = 0x80020 - 0x800, + .name = "REG_TOTAL", + .section_table = { + .sections = qca6174_hw21_register_sections, + .size = ARRAY_SIZE(qca6174_hw21_register_sections), + }, + }, +}; + +static const struct ath10k_mem_region qca6174_hw30_mem_regions[] = { + { + .type = ATH10K_MEM_REGION_TYPE_DRAM, + .start = 0x400000, + .len = 0x90000, + .name = "DRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_AXI, + .start = 0xa0000, + .len = 0x18000, + .name = "AXI", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_REG, + .start = 0x800, + .len = 0x80020 - 0x800, + .name = "REG_TOTAL", + .section_table = { + .sections = qca6174_hw30_register_sections, + .size = ARRAY_SIZE(qca6174_hw30_register_sections), + }, + }, + + /* IRAM dump must be put last */ + { + .type = ATH10K_MEM_REGION_TYPE_IRAM1, + .start = 0x00980000, + .len = 0x00080000, + .name = "IRAM1", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IRAM2, + .start = 0x00a00000, + .len = 0x00040000, + .name = "IRAM2", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, +}; + +static const struct ath10k_mem_region qca988x_hw20_mem_regions[] = { + { + .type = ATH10K_MEM_REGION_TYPE_DRAM, + .start = 0x400000, + .len = 0x50000, + .name = "DRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_REG, + .start = 0x4000, + .len = 0x2000, + .name = "REG_PART1", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_REG, + .start = 0x8000, + .len = 0x58000, + .name = "REG_PART2", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, +}; + +static const struct ath10k_hw_mem_layout hw_mem_layouts[] = { + { + .hw_id = QCA6174_HW_1_0_VERSION, + .region_table = { + .regions = qca6174_hw10_mem_regions, + .size = ARRAY_SIZE(qca6174_hw10_mem_regions), + }, + }, + { + .hw_id = QCA6174_HW_1_1_VERSION, + .region_table = { + .regions = qca6174_hw10_mem_regions, + .size = ARRAY_SIZE(qca6174_hw10_mem_regions), + }, + }, + { + .hw_id = QCA6174_HW_1_3_VERSION, + .region_table = { + .regions = qca6174_hw10_mem_regions, + .size = ARRAY_SIZE(qca6174_hw10_mem_regions), + }, + }, + { + .hw_id = QCA6174_HW_2_1_VERSION, + .region_table = { + .regions = qca6174_hw21_mem_regions, + .size = ARRAY_SIZE(qca6174_hw21_mem_regions), + }, + }, + { + .hw_id = QCA6174_HW_3_0_VERSION, + .region_table = { + .regions = qca6174_hw30_mem_regions, + .size = ARRAY_SIZE(qca6174_hw30_mem_regions), + }, + }, + { + .hw_id = QCA6174_HW_3_2_VERSION, + .region_table = { + .regions = qca6174_hw30_mem_regions, + .size = ARRAY_SIZE(qca6174_hw30_mem_regions), + }, + }, + { + .hw_id = QCA9377_HW_1_1_DEV_VERSION, + .region_table = { + .regions = qca6174_hw30_mem_regions, + .size = ARRAY_SIZE(qca6174_hw30_mem_regions), + }, + }, + { + .hw_id = QCA988X_HW_2_0_VERSION, + .region_table = { + .regions = qca988x_hw20_mem_regions, + .size = ARRAY_SIZE(qca988x_hw20_mem_regions), + }, + }, +}; + +static u32 ath10k_coredump_get_ramdump_size(struct ath10k *ar) +{ + const struct ath10k_hw_mem_layout *hw; + const struct ath10k_mem_region *mem_region; + size_t size = 0; + int i; + + hw = ath10k_coredump_get_mem_layout(ar); + + if (!hw) + return 0; + + mem_region = &hw->region_table.regions[0]; + + for (i = 0; i < hw->region_table.size; i++) { + size += mem_region->len; + mem_region++; + } + + /* reserve space for the headers */ + size += hw->region_table.size * sizeof(struct ath10k_dump_ram_data_hdr); + + /* make sure it is aligned 16 bytes for debug message print out */ + size = ALIGN(size, 16); + + return size; +} + +const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k *ar) +{ + int i; + + if (!test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) + return NULL; + + if (WARN_ON(ar->target_version == 0)) + return NULL; + + for (i = 0; i < ARRAY_SIZE(hw_mem_layouts); i++) { + if (ar->target_version == hw_mem_layouts[i].hw_id) + return &hw_mem_layouts[i]; + } + + return NULL; +} +EXPORT_SYMBOL(ath10k_coredump_get_mem_layout); + +struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar) +{ + struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data; + + lockdep_assert_held(&ar->data_lock); + + if (ath10k_coredump_mask == 0) + /* coredump disabled */ + return NULL; + + guid_gen(&crash_data->guid); + ktime_get_real_ts64(&crash_data->timestamp); + + return crash_data; +} +EXPORT_SYMBOL(ath10k_coredump_new); + +static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar) +{ + struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data; + struct ath10k_ce_crash_hdr *ce_hdr; + struct ath10k_dump_file_data *dump_data; + struct ath10k_tlv_dump_data *dump_tlv; + size_t hdr_len = sizeof(*dump_data); + size_t len, sofar = 0; + unsigned char *buf; + + len = hdr_len; + + if (test_bit(ATH10K_FW_CRASH_DUMP_REGISTERS, &ath10k_coredump_mask)) + len += sizeof(*dump_tlv) + sizeof(crash_data->registers); + + if (test_bit(ATH10K_FW_CRASH_DUMP_CE_DATA, &ath10k_coredump_mask)) + len += sizeof(*dump_tlv) + sizeof(*ce_hdr) + + CE_COUNT * sizeof(ce_hdr->entries[0]); + + if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) + len += sizeof(*dump_tlv) + crash_data->ramdump_buf_len; + + sofar += hdr_len; + + /* This is going to get big when we start dumping FW RAM and such, + * so go ahead and use vmalloc. + */ + buf = vzalloc(len); + if (!buf) + return NULL; + + spin_lock_bh(&ar->data_lock); + + dump_data = (struct ath10k_dump_file_data *)(buf); + strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP", + sizeof(dump_data->df_magic)); + dump_data->len = cpu_to_le32(len); + + dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION); + + guid_copy(&dump_data->guid, &crash_data->guid); + dump_data->chip_id = cpu_to_le32(ar->chip_id); + dump_data->bus_type = cpu_to_le32(0); + dump_data->target_version = cpu_to_le32(ar->target_version); + dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major); + dump_data->fw_version_minor = cpu_to_le32(ar->fw_version_minor); + dump_data->fw_version_release = cpu_to_le32(ar->fw_version_release); + dump_data->fw_version_build = cpu_to_le32(ar->fw_version_build); + dump_data->phy_capability = cpu_to_le32(ar->phy_capability); + dump_data->hw_min_tx_power = cpu_to_le32(ar->hw_min_tx_power); + dump_data->hw_max_tx_power = cpu_to_le32(ar->hw_max_tx_power); + dump_data->ht_cap_info = cpu_to_le32(ar->ht_cap_info); + dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info); + dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains); + + strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version, + sizeof(dump_data->fw_ver)); + + dump_data->kernel_ver_code = 0; + strlcpy(dump_data->kernel_ver, init_utsname()->release, + sizeof(dump_data->kernel_ver)); + + dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec); + dump_data->tv_nsec = cpu_to_le64(crash_data->timestamp.tv_nsec); + + if (test_bit(ATH10K_FW_CRASH_DUMP_REGISTERS, &ath10k_coredump_mask)) { + dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar); + dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_REGISTERS); + dump_tlv->tlv_len = cpu_to_le32(sizeof(crash_data->registers)); + memcpy(dump_tlv->tlv_data, &crash_data->registers, + sizeof(crash_data->registers)); + sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers); + } + + if (test_bit(ATH10K_FW_CRASH_DUMP_CE_DATA, &ath10k_coredump_mask)) { + dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar); + dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_CE_DATA); + dump_tlv->tlv_len = cpu_to_le32(sizeof(*ce_hdr) + + CE_COUNT * sizeof(ce_hdr->entries[0])); + ce_hdr = (struct ath10k_ce_crash_hdr *)(dump_tlv->tlv_data); + ce_hdr->ce_count = cpu_to_le32(CE_COUNT); + memset(ce_hdr->reserved, 0, sizeof(ce_hdr->reserved)); + memcpy(ce_hdr->entries, crash_data->ce_crash_data, + CE_COUNT * sizeof(ce_hdr->entries[0])); + sofar += sizeof(*dump_tlv) + sizeof(*ce_hdr) + + CE_COUNT * sizeof(ce_hdr->entries[0]); + } + + /* Gather ram dump */ + if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) { + dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar); + dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_RAM_DATA); + dump_tlv->tlv_len = cpu_to_le32(crash_data->ramdump_buf_len); + memcpy(dump_tlv->tlv_data, crash_data->ramdump_buf, + crash_data->ramdump_buf_len); + sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len; + } + + spin_unlock_bh(&ar->data_lock); + + return dump_data; +} + +int ath10k_coredump_submit(struct ath10k *ar) +{ + struct ath10k_dump_file_data *dump; + + if (ath10k_coredump_mask == 0) + /* coredump disabled */ + return 0; + + dump = ath10k_coredump_build(ar); + if (!dump) { + ath10k_warn(ar, "no crash dump data found for devcoredump"); + return -ENODATA; + } + + dev_coredumpv(ar->dev, dump, le32_to_cpu(dump->len), GFP_KERNEL); + + return 0; +} + +int ath10k_coredump_create(struct ath10k *ar) +{ + if (ath10k_coredump_mask == 0) + /* coredump disabled */ + return 0; + + ar->coredump.fw_crash_data = vzalloc(sizeof(*ar->coredump.fw_crash_data)); + if (!ar->coredump.fw_crash_data) + return -ENOMEM; + + return 0; +} + +int ath10k_coredump_register(struct ath10k *ar) +{ + struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data; + + if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) { + crash_data->ramdump_buf_len = ath10k_coredump_get_ramdump_size(ar); + + crash_data->ramdump_buf = vzalloc(crash_data->ramdump_buf_len); + if (!crash_data->ramdump_buf) + return -ENOMEM; + } + + return 0; +} + +void ath10k_coredump_unregister(struct ath10k *ar) +{ + struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data; + + vfree(crash_data->ramdump_buf); +} + +void ath10k_coredump_destroy(struct ath10k *ar) +{ + if (ar->coredump.fw_crash_data->ramdump_buf) { + vfree(ar->coredump.fw_crash_data->ramdump_buf); + ar->coredump.fw_crash_data->ramdump_buf = NULL; + ar->coredump.fw_crash_data->ramdump_buf_len = 0; + } + + vfree(ar->coredump.fw_crash_data); + ar->coredump.fw_crash_data = NULL; +} diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h new file mode 100644 index 000000000000..bfee13038e59 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/coredump.h @@ -0,0 +1,225 @@ +/* + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _COREDUMP_H_ +#define _COREDUMP_H_ + +#include "core.h" + +#define ATH10K_FW_CRASH_DUMP_VERSION 1 + +/** + * enum ath10k_fw_crash_dump_type - types of data in the dump file + * @ATH10K_FW_CRASH_DUMP_REGDUMP: Register crash dump in binary format + */ +enum ath10k_fw_crash_dump_type { + ATH10K_FW_CRASH_DUMP_REGISTERS = 0, + ATH10K_FW_CRASH_DUMP_CE_DATA = 1, + + /* contains multiple struct ath10k_dump_ram_data_hdr */ + ATH10K_FW_CRASH_DUMP_RAM_DATA = 2, + + ATH10K_FW_CRASH_DUMP_MAX, +}; + +struct ath10k_tlv_dump_data { + /* see ath10k_fw_crash_dump_type above */ + __le32 type; + + /* in bytes */ + __le32 tlv_len; + + /* pad to 32-bit boundaries as needed */ + u8 tlv_data[]; +} __packed; + +struct ath10k_dump_file_data { + /* dump file information */ + + /* "ATH10K-FW-DUMP" */ + char df_magic[16]; + + __le32 len; + + /* file dump version */ + __le32 version; + + /* some info we can get from ath10k struct that might help */ + + guid_t guid; + + __le32 chip_id; + + /* 0 for now, in place for later hardware */ + __le32 bus_type; + + __le32 target_version; + __le32 fw_version_major; + __le32 fw_version_minor; + __le32 fw_version_release; + __le32 fw_version_build; + __le32 phy_capability; + __le32 hw_min_tx_power; + __le32 hw_max_tx_power; + __le32 ht_cap_info; + __le32 vht_cap_info; + __le32 num_rf_chains; + + /* firmware version string */ + char fw_ver[ETHTOOL_FWVERS_LEN]; + + /* Kernel related information */ + + /* time-of-day stamp */ + __le64 tv_sec; + + /* time-of-day stamp, nano-seconds */ + __le64 tv_nsec; + + /* LINUX_VERSION_CODE */ + __le32 kernel_ver_code; + + /* VERMAGIC_STRING */ + char kernel_ver[64]; + + /* room for growth w/out changing binary format */ + u8 unused[128]; + + /* struct ath10k_tlv_dump_data + more */ + u8 data[0]; +} __packed; + +struct ath10k_dump_ram_data_hdr { + /* enum ath10k_mem_region_type */ + __le32 region_type; + + __le32 start; + + /* length of payload data, not including this header */ + __le32 length; + + u8 data[0]; +}; + +/* magic number to fill the holes not copied due to sections in regions */ +#define ATH10K_MAGIC_NOT_COPIED 0xAA + +/* part of user space ABI */ +enum ath10k_mem_region_type { + ATH10K_MEM_REGION_TYPE_REG = 1, + ATH10K_MEM_REGION_TYPE_DRAM = 2, + ATH10K_MEM_REGION_TYPE_AXI = 3, + ATH10K_MEM_REGION_TYPE_IRAM1 = 4, + ATH10K_MEM_REGION_TYPE_IRAM2 = 5, +}; + +/* Define a section of the region which should be copied. As not all parts + * of the memory is possible to copy, for example some of the registers can + * be like that, sections can be used to define what is safe to copy. + * + * To minimize the size of the array, the list must obey the format: + * '{start0,stop0},{start1,stop1},{start2,stop2}....' The values below must + * also obey to 'start0 < stop0 < start1 < stop1 < start2 < ...', otherwise + * we may encouter error in the dump processing. + */ +struct ath10k_mem_section { + u32 start; + u32 end; +}; + +/* One region of a memory layout. If the sections field is null entire + * region is copied. If sections is non-null only the areas specified in + * sections are copied and rest of the areas are filled with + * ATH10K_MAGIC_NOT_COPIED. + */ +struct ath10k_mem_region { + enum ath10k_mem_region_type type; + u32 start; + u32 len; + + const char *name; + + struct { + const struct ath10k_mem_section *sections; + u32 size; + } section_table; +}; + +/* Contains the memory layout of a hardware version identified with the + * hardware id, split into regions. + */ +struct ath10k_hw_mem_layout { + u32 hw_id; + + struct { + const struct ath10k_mem_region *regions; + int size; + } region_table; +}; + +/* FIXME: where to put this? */ +extern unsigned long ath10k_coredump_mask; + +#ifdef CONFIG_DEV_COREDUMP + +int ath10k_coredump_submit(struct ath10k *ar); +struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar); +int ath10k_coredump_create(struct ath10k *ar); +int ath10k_coredump_register(struct ath10k *ar); +void ath10k_coredump_unregister(struct ath10k *ar); +void ath10k_coredump_destroy(struct ath10k *ar); + +const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k *ar); + +#else /* CONFIG_DEV_COREDUMP */ + +static inline int ath10k_coredump_submit(struct ath10k *ar) +{ + return 0; +} + +static inline struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar) +{ + return NULL; +} + +static inline int ath10k_coredump_create(struct ath10k *ar) +{ + return 0; +} + +static inline int ath10k_coredump_register(struct ath10k *ar) +{ + return 0; +} + +static inline void ath10k_coredump_unregister(struct ath10k *ar) +{ +} + +static inline void ath10k_coredump_destroy(struct ath10k *ar) +{ +} + +static inline const struct ath10k_hw_mem_layout * +ath10k_coredump_get_mem_layout(struct ath10k *ar) +{ + return NULL; +} + +#endif /* CONFIG_DEV_COREDUMP */ + +#endif /* _COREDUMP_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index df514507d3f1..6d836a26272f 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -18,10 +18,8 @@ #include <linux/module.h> #include <linux/debugfs.h> #include <linux/vmalloc.h> -#include <linux/utsname.h> #include <linux/crc32.h> #include <linux/firmware.h> -#include <linux/devcoredump.h> #include "core.h" #include "debug.h" @@ -33,86 +31,6 @@ #define ATH10K_DEBUG_CAL_DATA_LEN 12064 -#define ATH10K_FW_CRASH_DUMP_VERSION 1 - -/** - * enum ath10k_fw_crash_dump_type - types of data in the dump file - * @ATH10K_FW_CRASH_DUMP_REGDUMP: Register crash dump in binary format - */ -enum ath10k_fw_crash_dump_type { - ATH10K_FW_CRASH_DUMP_REGISTERS = 0, - ATH10K_FW_CRASH_DUMP_CE_DATA = 1, - - ATH10K_FW_CRASH_DUMP_MAX, -}; - -struct ath10k_tlv_dump_data { - /* see ath10k_fw_crash_dump_type above */ - __le32 type; - - /* in bytes */ - __le32 tlv_len; - - /* pad to 32-bit boundaries as needed */ - u8 tlv_data[]; -} __packed; - -struct ath10k_dump_file_data { - /* dump file information */ - - /* "ATH10K-FW-DUMP" */ - char df_magic[16]; - - __le32 len; - - /* file dump version */ - __le32 version; - - /* some info we can get from ath10k struct that might help */ - - guid_t guid; - - __le32 chip_id; - - /* 0 for now, in place for later hardware */ - __le32 bus_type; - - __le32 target_version; - __le32 fw_version_major; - __le32 fw_version_minor; - __le32 fw_version_release; - __le32 fw_version_build; - __le32 phy_capability; - __le32 hw_min_tx_power; - __le32 hw_max_tx_power; - __le32 ht_cap_info; - __le32 vht_cap_info; - __le32 num_rf_chains; - - /* firmware version string */ - char fw_ver[ETHTOOL_FWVERS_LEN]; - - /* Kernel related information */ - - /* time-of-day stamp */ - __le64 tv_sec; - - /* time-of-day stamp, nano-seconds */ - __le64 tv_nsec; - - /* LINUX_VERSION_CODE */ - __le32 kernel_ver_code; - - /* VERMAGIC_STRING */ - char kernel_ver[64]; - - /* room for growth w/out changing binary format */ - u8 unused[128]; - - /* struct ath10k_tlv_dump_data + more */ - u8 data[0]; -} __packed; - void ath10k_info(struct ath10k *ar, const char *fmt, ...) { struct va_format vaf = { @@ -711,189 +629,6 @@ static const struct file_operations fops_chip_id = { .llseek = default_llseek, }; -struct ath10k_fw_crash_data * -ath10k_debug_get_new_fw_crash_data(struct ath10k *ar) -{ - struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data; - - lockdep_assert_held(&ar->data_lock); - - crash_data->crashed_since_read = true; - guid_gen(&crash_data->guid); - getnstimeofday(&crash_data->timestamp); - - return crash_data; -} -EXPORT_SYMBOL(ath10k_debug_get_new_fw_crash_data); - -static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar, - bool mark_read) -{ - struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data; - struct ath10k_ce_crash_hdr *ce_hdr; - struct ath10k_dump_file_data *dump_data; - struct ath10k_tlv_dump_data *dump_tlv; - size_t hdr_len = sizeof(*dump_data); - size_t len, sofar = 0; - unsigned char *buf; - - len = hdr_len; - len += sizeof(*dump_tlv) + sizeof(crash_data->registers); - len += sizeof(*dump_tlv) + sizeof(*ce_hdr) + - CE_COUNT * sizeof(ce_hdr->entries[0]); - - sofar += hdr_len; - - /* This is going to get big when we start dumping FW RAM and such, - * so go ahead and use vmalloc. - */ - buf = vzalloc(len); - if (!buf) - return NULL; - - spin_lock_bh(&ar->data_lock); - - if (!crash_data->crashed_since_read) { - spin_unlock_bh(&ar->data_lock); - vfree(buf); - return NULL; - } - - dump_data = (struct ath10k_dump_file_data *)(buf); - strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP", - sizeof(dump_data->df_magic)); - dump_data->len = cpu_to_le32(len); - - dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION); - - guid_copy(&dump_data->guid, &crash_data->guid); - dump_data->chip_id = cpu_to_le32(ar->chip_id); - dump_data->bus_type = cpu_to_le32(0); - dump_data->target_version = cpu_to_le32(ar->target_version); - dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major); - dump_data->fw_version_minor = cpu_to_le32(ar->fw_version_minor); - dump_data->fw_version_release = cpu_to_le32(ar->fw_version_release); - dump_data->fw_version_build = cpu_to_le32(ar->fw_version_build); - dump_data->phy_capability = cpu_to_le32(ar->phy_capability); - dump_data->hw_min_tx_power = cpu_to_le32(ar->hw_min_tx_power); - dump_data->hw_max_tx_power = cpu_to_le32(ar->hw_max_tx_power); - dump_data->ht_cap_info = cpu_to_le32(ar->ht_cap_info); - dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info); - dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains); - - strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version, - sizeof(dump_data->fw_ver)); - - dump_data->kernel_ver_code = 0; - strlcpy(dump_data->kernel_ver, init_utsname()->release, - sizeof(dump_data->kernel_ver)); - - dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec); - dump_data->tv_nsec = cpu_to_le64(crash_data->timestamp.tv_nsec); - - /* Gather crash-dump */ - dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar); - dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_REGISTERS); - dump_tlv->tlv_len = cpu_to_le32(sizeof(crash_data->registers)); - memcpy(dump_tlv->tlv_data, &crash_data->registers, - sizeof(crash_data->registers)); - sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers); - - dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar); - dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_CE_DATA); - dump_tlv->tlv_len = cpu_to_le32(sizeof(*ce_hdr) + - CE_COUNT * sizeof(ce_hdr->entries[0])); - ce_hdr = (struct ath10k_ce_crash_hdr *)(dump_tlv->tlv_data); - ce_hdr->ce_count = cpu_to_le32(CE_COUNT); - memset(ce_hdr->reserved, 0, sizeof(ce_hdr->reserved)); - memcpy(ce_hdr->entries, crash_data->ce_crash_data, - CE_COUNT * sizeof(ce_hdr->entries[0])); - sofar += sizeof(*dump_tlv) + sizeof(*ce_hdr) + - CE_COUNT * sizeof(ce_hdr->entries[0]); - - ar->debug.fw_crash_data->crashed_since_read = !mark_read; - - spin_unlock_bh(&ar->data_lock); - - return dump_data; -} - -int ath10k_debug_fw_devcoredump(struct ath10k *ar) -{ - struct ath10k_dump_file_data *dump; - void *dump_ptr; - u32 dump_len; - - /* To keep the dump file available also for debugfs don't mark the - * file read, only debugfs should do that. - */ - dump = ath10k_build_dump_file(ar, false); - if (!dump) { - ath10k_warn(ar, "no crash dump data found for devcoredump"); - return -ENODATA; - } - - /* Make a copy of the dump file for dev_coredumpv() as during the - * transition period we need to own the original file. Once - * fw_crash_dump debugfs file is removed no need to have a copy - * anymore. - */ - dump_len = le32_to_cpu(dump->len); - dump_ptr = vzalloc(dump_len); - - if (!dump_ptr) - return -ENOMEM; - - memcpy(dump_ptr, dump, dump_len); - - dev_coredumpv(ar->dev, dump_ptr, dump_len, GFP_KERNEL); - - return 0; -} - -static int ath10k_fw_crash_dump_open(struct inode *inode, struct file *file) -{ - struct ath10k *ar = inode->i_private; - struct ath10k_dump_file_data *dump; - - ath10k_warn(ar, "fw_crash_dump debugfs file is deprecated, please use /sys/class/devcoredump instead."); - - dump = ath10k_build_dump_file(ar, true); - if (!dump) - return -ENODATA; - - file->private_data = dump; - - return 0; -} - -static ssize_t ath10k_fw_crash_dump_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ath10k_dump_file_data *dump_file = file->private_data; - - return simple_read_from_buffer(user_buf, count, ppos, - dump_file, - le32_to_cpu(dump_file->len)); -} - -static int ath10k_fw_crash_dump_release(struct inode *inode, - struct file *file) -{ - vfree(file->private_data); - - return 0; -} - -static const struct file_operations fops_fw_crash_dump = { - .open = ath10k_fw_crash_dump_open, - .read = ath10k_fw_crash_dump_read, - .release = ath10k_fw_crash_dump_release, - .owner = THIS_MODULE, - .llseek = default_llseek, -}; - static ssize_t ath10k_reg_addr_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -1950,14 +1685,14 @@ int ath10k_debug_start(struct ath10k *ar) ret); } - if (ar->debug.pktlog_filter) { + if (ar->pktlog_filter) { ret = ath10k_wmi_pdev_pktlog_enable(ar, - ar->debug.pktlog_filter); + ar->pktlog_filter); if (ret) /* not serious */ ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n", - ar->debug.pktlog_filter, ret); + ar->pktlog_filter, ret); } else { ret = ath10k_wmi_pdev_pktlog_disable(ar); if (ret) @@ -2097,12 +1832,12 @@ static ssize_t ath10k_write_pktlog_filter(struct file *file, mutex_lock(&ar->conf_mutex); if (ar->state != ATH10K_STATE_ON) { - ar->debug.pktlog_filter = filter; + ar->pktlog_filter = filter; ret = count; goto out; } - if (filter == ar->debug.pktlog_filter) { + if (filter == ar->pktlog_filter) { ret = count; goto out; } @@ -2111,7 +1846,7 @@ static ssize_t ath10k_write_pktlog_filter(struct file *file, ret = ath10k_wmi_pdev_pktlog_enable(ar, filter); if (ret) { ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n", - ar->debug.pktlog_filter, ret); + ar->pktlog_filter, ret); goto out; } } else { @@ -2122,7 +1857,7 @@ static ssize_t ath10k_write_pktlog_filter(struct file *file, } } - ar->debug.pktlog_filter = filter; + ar->pktlog_filter = filter; ret = count; out: @@ -2139,7 +1874,7 @@ static ssize_t ath10k_read_pktlog_filter(struct file *file, char __user *ubuf, mutex_lock(&ar->conf_mutex); len = scnprintf(buf, sizeof(buf) - len, "%08x\n", - ar->debug.pktlog_filter); + ar->pktlog_filter); mutex_unlock(&ar->conf_mutex); return simple_read_from_buffer(ubuf, count, ppos, buf, len); @@ -2402,10 +2137,6 @@ static const struct file_operations fops_fw_checksums = { int ath10k_debug_create(struct ath10k *ar) { - ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data)); - if (!ar->debug.fw_crash_data) - return -ENOMEM; - ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN); if (!ar->debug.cal_data) return -ENOMEM; @@ -2420,9 +2151,6 @@ int ath10k_debug_create(struct ath10k *ar) void ath10k_debug_destroy(struct ath10k *ar) { - vfree(ar->debug.fw_crash_data); - ar->debug.fw_crash_data = NULL; - vfree(ar->debug.cal_data); ar->debug.cal_data = NULL; @@ -2460,9 +2188,6 @@ int ath10k_debug_register(struct ath10k *ar) debugfs_create_file("simulate_fw_crash", 0600, ar->debug.debugfs_phy, ar, &fops_simulate_fw_crash); - debugfs_create_file("fw_crash_dump", 0400, ar->debug.debugfs_phy, ar, - &fops_fw_crash_dump); - debugfs_create_file("reg_addr", 0600, ar->debug.debugfs_phy, ar, &fops_reg_addr); diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h index 548ad5483a4a..e54308889e59 100644 --- a/drivers/net/wireless/ath/ath10k/debug.h +++ b/drivers/net/wireless/ath/ath10k/debug.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -42,6 +42,7 @@ enum ath10k_debug_mask { ATH10K_DBG_SDIO_DUMP = 0x00020000, ATH10K_DBG_USB = 0x00040000, ATH10K_DBG_USB_BULK = 0x00080000, + ATH10K_DBG_SNOC = 0x00100000, ATH10K_DBG_ANY = 0xffffffff, }; @@ -51,7 +52,8 @@ enum ath10k_pktlog_filter { ATH10K_PKTLOG_RCFIND = 0x000000004, ATH10K_PKTLOG_RCUPDATE = 0x000000008, ATH10K_PKTLOG_DBG_PRINT = 0x000000010, - ATH10K_PKTLOG_ANY = 0x00000001f, + ATH10K_PKTLOG_PEER_STATS = 0x000000040, + ATH10K_PKTLOG_ANY = 0x00000005f, }; enum ath10k_dbg_aggr_mode { @@ -60,6 +62,21 @@ enum ath10k_dbg_aggr_mode { ATH10K_DBG_AGGR_MODE_MAX, }; +/* Types of packet log events */ +enum ath_pktlog_type { + ATH_PKTLOG_TYPE_TX_CTRL = 1, + ATH_PKTLOG_TYPE_TX_STAT, +}; + +struct ath10k_pktlog_hdr { + __le16 flags; + __le16 missed_cnt; + __le16 log_type; /* Type of log information foll this header */ + __le16 size; /* Size of variable length log information in bytes */ + __le32 timestamp; + u8 payload[0]; +} __packed; + /* FIXME: How to calculate the buffer size sanely? */ #define ATH10K_FW_STATS_BUF_SIZE (1024 * 1024) @@ -84,13 +101,8 @@ void ath10k_debug_unregister(struct ath10k *ar); void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb); void ath10k_debug_tpc_stats_process(struct ath10k *ar, struct ath10k_tpc_stats *tpc_stats); -struct ath10k_fw_crash_data * -ath10k_debug_get_new_fw_crash_data(struct ath10k *ar); - void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len); -int ath10k_debug_fw_devcoredump(struct ath10k *ar); - #define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++) void ath10k_debug_get_et_strings(struct ieee80211_hw *hw, @@ -157,12 +169,6 @@ static inline void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, { } -static inline struct ath10k_fw_crash_data * -ath10k_debug_get_new_fw_crash_data(struct ath10k *ar) -{ - return NULL; -} - static inline u64 ath10k_debug_get_fw_dbglog_mask(struct ath10k *ar) { return 0; @@ -173,11 +179,6 @@ static inline u32 ath10k_debug_get_fw_dbglog_level(struct ath10k *ar) return 0; } -static inline int ath10k_debug_fw_devcoredump(struct ath10k *ar) -{ - return 0; -} - #define ATH10K_DFS_STAT_INC(ar, c) do { } while (0) #define ath10k_debug_get_et_strings NULL @@ -190,9 +191,6 @@ void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct dentry *dir); void ath10k_sta_update_rx_duration(struct ath10k *ar, struct ath10k_fw_stats *stats); -void ath10k_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct station_info *sinfo); #else static inline void ath10k_sta_update_rx_duration(struct ath10k *ar, diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c index d59ac6b83340..b260b09dd4d3 100644 --- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c +++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 Qualcomm Atheros, Inc. + * Copyright (c) 2014-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -65,33 +65,6 @@ void ath10k_sta_update_rx_duration(struct ath10k *ar, ath10k_sta_update_stats_rx_duration(ar, stats); } -void ath10k_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct station_info *sinfo) -{ - struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; - struct ath10k *ar = arsta->arvif->ar; - - if (!ath10k_peer_stats_enabled(ar)) - return; - - sinfo->rx_duration = arsta->rx_duration; - sinfo->filled |= 1ULL << NL80211_STA_INFO_RX_DURATION; - - if (!arsta->txrate.legacy && !arsta->txrate.nss) - return; - - if (arsta->txrate.legacy) { - sinfo->txrate.legacy = arsta->txrate.legacy; - } else { - sinfo->txrate.mcs = arsta->txrate.mcs; - sinfo->txrate.nss = arsta->txrate.nss; - sinfo->txrate.bw = arsta->txrate.bw; - } - sinfo->txrate.flags = arsta->txrate.flags; - sinfo->filled |= 1ULL << NL80211_STA_INFO_TX_BITRATE; -} - static ssize_t ath10k_dbg_sta_read_aggr_mode(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h index 6679dd9cfd12..6da4e3369c5a 100644 --- a/drivers/net/wireless/ath/ath10k/hif.h +++ b/drivers/net/wireless/ath/ath10k/hif.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2015,2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c index e5c80f582ff5..492dc5b4bbf2 100644 --- a/drivers/net/wireless/ath/ath10k/htc.c +++ b/drivers/net/wireless/ath/ath10k/htc.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h index 24663b07eeac..a2f8814b3e53 100644 --- a/drivers/net/wireless/ath/ath10k/htc.h +++ b/drivers/net/wireless/ath/ath10k/htc.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c index cd160b16db1e..625198dea18b 100644 --- a/drivers/net/wireless/ath/ath10k/htt.c +++ b/drivers/net/wireless/ath/ath10k/htt.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -207,6 +207,9 @@ int ath10k_htt_init(struct ath10k *ar) WARN_ON(1); return -EINVAL; } + ath10k_htt_set_tx_ops(htt); + ath10k_htt_set_rx_ops(htt); + return 0; } @@ -254,11 +257,11 @@ int ath10k_htt_setup(struct ath10k_htt *htt) return status; } - status = ath10k_htt_send_frag_desc_bank_cfg(htt); + status = htt->tx_ops->htt_send_frag_desc_bank_cfg(htt); if (status) return status; - status = ath10k_htt_send_rx_ring_cfg_ll(htt); + status = htt->tx_ops->htt_send_rx_ring_cfg(htt); if (status) { ath10k_warn(ar, "failed to setup rx ring: %d\n", status); diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 6305308422c4..8cc2a8b278e4 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -107,6 +107,14 @@ struct htt_msdu_ext_desc { struct htt_data_tx_desc_frag frags[6]; }; +struct htt_msdu_ext_desc_64 { + __le32 tso_flag[5]; + __le16 ip_identification; + u8 flags; + u8 reserved; + struct htt_data_tx_desc_frag frags[6]; +}; + #define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE BIT(0) #define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE BIT(1) #define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE BIT(2) @@ -179,6 +187,22 @@ struct htt_data_tx_desc { u8 prefetch[0]; /* start of frame, for FW classification engine */ } __packed; +struct htt_data_tx_desc_64 { + u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */ + __le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */ + __le16 len; + __le16 id; + __le64 frags_paddr; + union { + __le32 peerid; + struct { + __le16 peerid; + __le16 freq; + } __packed offchan_tx; + } __packed; + u8 prefetch[0]; /* start of frame, for FW classification engine */ +} __packed; + enum htt_rx_ring_flags { HTT_RX_RING_FLAGS_MAC80211_HDR = 1 << 0, HTT_RX_RING_FLAGS_MSDU_PAYLOAD = 1 << 1, @@ -200,8 +224,11 @@ enum htt_rx_ring_flags { #define HTT_RX_RING_SIZE_MIN 128 #define HTT_RX_RING_SIZE_MAX 2048 +#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX +#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1) +#define HTT_RX_RING_FILL_LEVEL_DUAL_MAC (HTT_RX_RING_SIZE - 1) -struct htt_rx_ring_setup_ring { +struct htt_rx_ring_setup_ring32 { __le32 fw_idx_shadow_reg_paddr; __le32 rx_ring_base_paddr; __le16 rx_ring_len; /* in 4-byte words */ @@ -222,14 +249,40 @@ struct htt_rx_ring_setup_ring { __le16 frag_info_offset; } __packed; +struct htt_rx_ring_setup_ring64 { + __le64 fw_idx_shadow_reg_paddr; + __le64 rx_ring_base_paddr; + __le16 rx_ring_len; /* in 4-byte words */ + __le16 rx_ring_bufsize; /* rx skb size - in bytes */ + __le16 flags; /* %HTT_RX_RING_FLAGS_ */ + __le16 fw_idx_init_val; + + /* the following offsets are in 4-byte units */ + __le16 mac80211_hdr_offset; + __le16 msdu_payload_offset; + __le16 ppdu_start_offset; + __le16 ppdu_end_offset; + __le16 mpdu_start_offset; + __le16 mpdu_end_offset; + __le16 msdu_start_offset; + __le16 msdu_end_offset; + __le16 rx_attention_offset; + __le16 frag_info_offset; +} __packed; + struct htt_rx_ring_setup_hdr { u8 num_rings; /* supported values: 1, 2 */ __le16 rsvd0; } __packed; -struct htt_rx_ring_setup { +struct htt_rx_ring_setup_32 { struct htt_rx_ring_setup_hdr hdr; - struct htt_rx_ring_setup_ring rings[0]; + struct htt_rx_ring_setup_ring32 rings[0]; +} __packed; + +struct htt_rx_ring_setup_64 { + struct htt_rx_ring_setup_hdr hdr; + struct htt_rx_ring_setup_ring64 rings[0]; } __packed; /* @@ -855,13 +908,23 @@ struct htt_rx_in_ord_msdu_desc { u8 reserved; } __packed; +struct htt_rx_in_ord_msdu_desc_ext { + __le64 msdu_paddr; + __le16 msdu_len; + u8 fw_desc; + u8 reserved; +} __packed; + struct htt_rx_in_ord_ind { u8 info; __le16 peer_id; u8 vdev_id; u8 reserved; __le16 msdu_count; - struct htt_rx_in_ord_msdu_desc msdu_descs[0]; + union { + struct htt_rx_in_ord_msdu_desc msdu_descs32[0]; + struct htt_rx_in_ord_msdu_desc_ext msdu_descs64[0]; + } __packed; } __packed; #define HTT_RX_IN_ORD_IND_INFO_TID_MASK 0x0000001f @@ -1351,7 +1414,7 @@ struct htt_q_state_conf { u8 pad[2]; } __packed; -struct htt_frag_desc_bank_cfg { +struct htt_frag_desc_bank_cfg32 { u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */ u8 num_banks; u8 desc_size; @@ -1360,6 +1423,15 @@ struct htt_frag_desc_bank_cfg { struct htt_q_state_conf q_state; } __packed; +struct htt_frag_desc_bank_cfg64 { + u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */ + u8 num_banks; + u8 desc_size; + __le64 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX]; + struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX]; + struct htt_q_state_conf q_state; +} __packed; + #define HTT_TX_Q_STATE_ENTRY_COEFFICIENT 128 #define HTT_TX_Q_STATE_ENTRY_FACTOR_MASK 0x3f #define HTT_TX_Q_STATE_ENTRY_FACTOR_LSB 0 @@ -1497,6 +1569,23 @@ struct htt_peer_tx_stats { u8 payload[0]; } __packed; +#define ATH10K_10_2_TX_STATS_OFFSET 136 +#define PEER_STATS_FOR_NO_OF_PPDUS 4 + +struct ath10k_10_2_peer_tx_stats { + u8 ratecode[PEER_STATS_FOR_NO_OF_PPDUS]; + u8 success_pkts[PEER_STATS_FOR_NO_OF_PPDUS]; + __le16 success_bytes[PEER_STATS_FOR_NO_OF_PPDUS]; + u8 retry_pkts[PEER_STATS_FOR_NO_OF_PPDUS]; + __le16 retry_bytes[PEER_STATS_FOR_NO_OF_PPDUS]; + u8 failed_pkts[PEER_STATS_FOR_NO_OF_PPDUS]; + __le16 failed_bytes[PEER_STATS_FOR_NO_OF_PPDUS]; + u8 flags[PEER_STATS_FOR_NO_OF_PPDUS]; + __le32 tx_duration; + u8 tx_ppdu_cnt; + u8 peer_id; +} __packed; + union htt_rx_pn_t { /* WEP: 24-bit PN */ u32 pn24; @@ -1514,11 +1603,13 @@ struct htt_cmd { struct htt_ver_req ver_req; struct htt_mgmt_tx_desc mgmt_tx; struct htt_data_tx_desc data_tx; - struct htt_rx_ring_setup rx_setup; + struct htt_rx_ring_setup_32 rx_setup_32; + struct htt_rx_ring_setup_64 rx_setup_64; struct htt_stats_req stats_req; struct htt_oob_sync_req oob_sync_req; struct htt_aggr_conf aggr_conf; - struct htt_frag_desc_bank_cfg frag_desc_bank_cfg; + struct htt_frag_desc_bank_cfg32 frag_desc_bank_cfg32; + struct htt_frag_desc_bank_cfg64 frag_desc_bank_cfg64; struct htt_tx_fetch_resp tx_fetch_resp; }; } __packed; @@ -1576,13 +1667,20 @@ struct htt_peer_unmap_event { u16 peer_id; }; -struct ath10k_htt_txbuf { +struct ath10k_htt_txbuf_32 { struct htt_data_tx_desc_frag frags[2]; struct ath10k_htc_hdr htc_hdr; struct htt_cmd_hdr cmd_hdr; struct htt_data_tx_desc cmd_tx; } __packed; +struct ath10k_htt_txbuf_64 { + struct htt_data_tx_desc_frag frags[2]; + struct ath10k_htc_hdr htc_hdr; + struct htt_cmd_hdr cmd_hdr; + struct htt_data_tx_desc_64 cmd_tx; +} __packed; + struct ath10k_htt { struct ath10k *ar; enum ath10k_htc_ep_id eid; @@ -1627,7 +1725,10 @@ struct ath10k_htt { * rx buffers the host SW provides for the MAC HW to * fill. */ - __le32 *paddrs_ring; + union { + __le64 *paddrs_ring_64; + __le32 *paddrs_ring_32; + }; /* * Base address of ring, as a "physical" device address @@ -1695,7 +1796,7 @@ struct ath10k_htt { /* This is used to group tx/rx completions separately and process them * in batches to reduce cache stalls */ - struct sk_buff_head rx_compl_q; + struct sk_buff_head rx_msdus_q; struct sk_buff_head rx_in_ord_compl_q; struct sk_buff_head tx_fetch_ind_q; @@ -1704,12 +1805,20 @@ struct ath10k_htt { struct { dma_addr_t paddr; - struct htt_msdu_ext_desc *vaddr; + union { + struct htt_msdu_ext_desc *vaddr_desc_32; + struct htt_msdu_ext_desc_64 *vaddr_desc_64; + }; + size_t size; } frag_desc; struct { dma_addr_t paddr; - struct ath10k_htt_txbuf *vaddr; + union { + struct ath10k_htt_txbuf_32 *vaddr_txbuff_32; + struct ath10k_htt_txbuf_64 *vaddr_txbuff_64; + }; + size_t size; } txbuf; struct { @@ -1724,6 +1833,28 @@ struct ath10k_htt { } tx_q_state; bool tx_mem_allocated; + const struct ath10k_htt_tx_ops *tx_ops; + const struct ath10k_htt_rx_ops *rx_ops; +}; + +struct ath10k_htt_tx_ops { + int (*htt_send_rx_ring_cfg)(struct ath10k_htt *htt); + int (*htt_send_frag_desc_bank_cfg)(struct ath10k_htt *htt); + int (*htt_alloc_frag_desc)(struct ath10k_htt *htt); + void (*htt_free_frag_desc)(struct ath10k_htt *htt); + int (*htt_tx)(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, + struct sk_buff *msdu); + int (*htt_alloc_txbuff)(struct ath10k_htt *htt); + void (*htt_free_txbuff)(struct ath10k_htt *htt); +}; + +struct ath10k_htt_rx_ops { + size_t (*htt_get_rx_ring_size)(struct ath10k_htt *htt); + void (*htt_config_paddrs_ring)(struct ath10k_htt *htt, void *vaddr); + void (*htt_set_paddrs_ring)(struct ath10k_htt *htt, dma_addr_t paddr, + int idx); + void* (*htt_get_vaddr_ring)(struct ath10k_htt *htt); + void (*htt_reset_paddrs_ring)(struct ath10k_htt *htt, int idx); }; #define RX_HTT_HDR_STATUS_LEN 64 @@ -1803,8 +1934,6 @@ void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb); bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb); int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt); int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie); -int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt); -int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt); int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, u8 max_subfrms_ampdu, u8 max_subfrms_amsdu); @@ -1829,11 +1958,9 @@ int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt, int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb); void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id); int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu); -int ath10k_htt_tx(struct ath10k_htt *htt, - enum ath10k_hw_txrx_mode txmode, - struct sk_buff *msdu); void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar, struct sk_buff *skb); int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget); - +void ath10k_htt_set_tx_ops(struct ath10k_htt *htt); +void ath10k_htt_set_rx_ops(struct ath10k_htt *htt); #endif diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 7d295ee71534..6d96f9560950 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -25,9 +25,6 @@ #include <linux/log2.h> -#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX -#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1) - /* when under memory pressure rx ring refill may fail and needs a retry */ #define HTT_RX_RING_REFILL_RETRY_MS 50 @@ -36,7 +33,7 @@ static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb); static struct sk_buff * -ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr) +ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr) { struct ath10k_skb_rxcb *rxcb; @@ -84,6 +81,60 @@ static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt) htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0])); } +static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt) +{ + return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32); +} + +static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt) +{ + return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64); +} + +static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt, + void *vaddr) +{ + htt->rx_ring.paddrs_ring_32 = vaddr; +} + +static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt, + void *vaddr) +{ + htt->rx_ring.paddrs_ring_64 = vaddr; +} + +static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt, + dma_addr_t paddr, int idx) +{ + htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr); +} + +static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt, + dma_addr_t paddr, int idx) +{ + htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr); +} + +static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx) +{ + htt->rx_ring.paddrs_ring_32[idx] = 0; +} + +static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx) +{ + htt->rx_ring.paddrs_ring_64[idx] = 0; +} + +static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt) +{ + return (void *)htt->rx_ring.paddrs_ring_32; +} + +static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt) +{ + return (void *)htt->rx_ring.paddrs_ring_64; +} + static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) { struct htt_rx_desc *rx_desc; @@ -129,13 +180,13 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) rxcb = ATH10K_SKB_RXCB(skb); rxcb->paddr = paddr; htt->rx_ring.netbufs_ring[idx] = skb; - htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr); + htt->rx_ops->htt_set_paddrs_ring(htt, paddr, idx); htt->rx_ring.fill_cnt++; if (htt->rx_ring.in_ord_rx) { hash_add(htt->rx_ring.skb_table, &ATH10K_SKB_RXCB(skb)->hlist, - (u32)paddr); + paddr); } num--; @@ -227,16 +278,15 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt) { del_timer_sync(&htt->rx_ring.refill_retry_timer); - skb_queue_purge(&htt->rx_compl_q); + skb_queue_purge(&htt->rx_msdus_q); skb_queue_purge(&htt->rx_in_ord_compl_q); skb_queue_purge(&htt->tx_fetch_ind_q); ath10k_htt_rx_ring_free(htt); dma_free_coherent(htt->ar->dev, - (htt->rx_ring.size * - sizeof(htt->rx_ring.paddrs_ring)), - htt->rx_ring.paddrs_ring, + htt->rx_ops->htt_get_rx_ring_size(htt), + htt->rx_ops->htt_get_vaddr_ring(htt), htt->rx_ring.base_paddr); dma_free_coherent(htt->ar->dev, @@ -263,7 +313,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) idx = htt->rx_ring.sw_rd_idx.msdu_payld; msdu = htt->rx_ring.netbufs_ring[idx]; htt->rx_ring.netbufs_ring[idx] = NULL; - htt->rx_ring.paddrs_ring[idx] = 0; + htt->rx_ops->htt_reset_paddrs_ring(htt, idx); idx++; idx &= htt->rx_ring.size_mask; @@ -383,7 +433,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, } static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt, - u32 paddr) + u64 paddr) { struct ath10k *ar = htt->ar; struct ath10k_skb_rxcb *rxcb; @@ -408,12 +458,12 @@ static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt, return msdu; } -static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt, - struct htt_rx_in_ord_ind *ev, - struct sk_buff_head *list) +static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt, + struct htt_rx_in_ord_ind *ev, + struct sk_buff_head *list) { struct ath10k *ar = htt->ar; - struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs; + struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32; struct htt_rx_desc *rxd; struct sk_buff *msdu; int msdu_count; @@ -458,11 +508,60 @@ static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt, return 0; } +static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt, + struct htt_rx_in_ord_ind *ev, + struct sk_buff_head *list) +{ + struct ath10k *ar = htt->ar; + struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64; + struct htt_rx_desc *rxd; + struct sk_buff *msdu; + int msdu_count; + bool is_offload; + u64 paddr; + + lockdep_assert_held(&htt->rx_ring.lock); + + msdu_count = __le16_to_cpu(ev->msdu_count); + is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); + + while (msdu_count--) { + paddr = __le64_to_cpu(msdu_desc->msdu_paddr); + msdu = ath10k_htt_rx_pop_paddr(htt, paddr); + if (!msdu) { + __skb_queue_purge(list); + return -ENOENT; + } + + __skb_queue_tail(list, msdu); + + if (!is_offload) { + rxd = (void *)msdu->data; + + trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd)); + + skb_put(msdu, sizeof(*rxd)); + skb_pull(msdu, sizeof(*rxd)); + skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len)); + + if (!(__le32_to_cpu(rxd->attention.flags) & + RX_ATTENTION_FLAGS_MSDU_DONE)) { + ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n"); + return -EIO; + } + } + + msdu_desc++; + } + + return 0; +} + int ath10k_htt_rx_alloc(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; dma_addr_t paddr; - void *vaddr; + void *vaddr, *vaddr_ring; size_t size; struct timer_list *timer = &htt->rx_ring.refill_retry_timer; @@ -473,7 +572,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) */ htt->rx_ring.size = HTT_RX_RING_SIZE; htt->rx_ring.size_mask = htt->rx_ring.size - 1; - htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL; + htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level; if (!is_power_of_2(htt->rx_ring.size)) { ath10k_warn(ar, "htt rx ring size is not power of 2\n"); @@ -486,13 +585,13 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) if (!htt->rx_ring.netbufs_ring) goto err_netbuf; - size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring); + size = htt->rx_ops->htt_get_rx_ring_size(htt); - vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL); - if (!vaddr) + vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL); + if (!vaddr_ring) goto err_dma_ring; - htt->rx_ring.paddrs_ring = vaddr; + htt->rx_ops->htt_config_paddrs_ring(htt, vaddr_ring); htt->rx_ring.base_paddr = paddr; vaddr = dma_alloc_coherent(htt->ar->dev, @@ -515,7 +614,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) htt->rx_ring.sw_rd_idx.msdu_payld = 0; hash_init(htt->rx_ring.skb_table); - skb_queue_head_init(&htt->rx_compl_q); + skb_queue_head_init(&htt->rx_msdus_q); skb_queue_head_init(&htt->rx_in_ord_compl_q); skb_queue_head_init(&htt->tx_fetch_ind_q); atomic_set(&htt->num_mpdus_ready, 0); @@ -526,9 +625,8 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) err_dma_idx: dma_free_coherent(htt->ar->dev, - (htt->rx_ring.size * - sizeof(htt->rx_ring.paddrs_ring)), - htt->rx_ring.paddrs_ring, + htt->rx_ops->htt_get_rx_ring_size(htt), + vaddr_ring, htt->rx_ring.base_paddr); err_dma_ring: kfree(htt->rx_ring.netbufs_ring); @@ -974,16 +1072,25 @@ static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size) return out; } -static void ath10k_process_rx(struct ath10k *ar, - struct ieee80211_rx_status *rx_status, - struct sk_buff *skb) +static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar, + struct ieee80211_rx_status *rx_status, + struct sk_buff *skb) +{ + struct ieee80211_rx_status *status; + + status = IEEE80211_SKB_RXCB(skb); + *status = *rx_status; + + __skb_queue_tail(&ar->htt.rx_msdus_q, skb); +} + +static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb) { struct ieee80211_rx_status *status; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; char tid[32]; status = IEEE80211_SKB_RXCB(skb); - *status = *rx_status; ath10k_dbg(ar, ATH10K_DBG_DATA, "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", @@ -1517,7 +1624,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, } } -static void ath10k_htt_rx_h_deliver(struct ath10k *ar, +static void ath10k_htt_rx_h_enqueue(struct ath10k *ar, struct sk_buff_head *amsdu, struct ieee80211_rx_status *status) { @@ -1540,7 +1647,7 @@ static void ath10k_htt_rx_h_deliver(struct ath10k *ar, status->flag |= RX_FLAG_ALLOW_SAME_PN; } - ath10k_process_rx(ar, status, msdu); + ath10k_htt_rx_h_queue_msdu(ar, status, msdu); } } @@ -1652,7 +1759,7 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) struct ath10k *ar = htt->ar; struct ieee80211_rx_status *rx_status = &htt->rx_status; struct sk_buff_head amsdu; - int ret, num_msdus; + int ret; __skb_queue_head_init(&amsdu); @@ -1674,7 +1781,6 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) return ret; } - num_msdus = skb_queue_len(&amsdu); ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff); /* only for ret = 1 indicates chained msdus */ @@ -1683,9 +1789,9 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) ath10k_htt_rx_h_filter(ar, &amsdu, rx_status); ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true); - ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status); + ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status); - return num_msdus; + return 0; } static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt, @@ -1893,15 +1999,14 @@ static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status, RX_FLAG_MMIC_STRIPPED; } -static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar, - struct sk_buff_head *list) +static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar, + struct sk_buff_head *list) { struct ath10k_htt *htt = &ar->htt; struct ieee80211_rx_status *status = &htt->rx_status; struct htt_rx_offload_msdu *rx; struct sk_buff *msdu; size_t offset; - int num_msdu = 0; while ((msdu = __skb_dequeue(list))) { /* Offloaded frames don't have Rx descriptor. Instead they have @@ -1940,10 +2045,8 @@ static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar, ath10k_htt_rx_h_rx_offload_prot(status, msdu); ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id); - ath10k_process_rx(ar, status, msdu); - num_msdu++; + ath10k_htt_rx_h_queue_msdu(ar, status, msdu); } - return num_msdu; } static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) @@ -1959,7 +2062,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) u8 tid; bool offload; bool frag; - int ret, num_msdus = 0; + int ret; lockdep_assert_held(&htt->rx_ring.lock); @@ -1981,7 +2084,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n", vdev_id, peer_id, tid, offload, frag, msdu_count); - if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) { + if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) { ath10k_warn(ar, "dropping invalid in order rx indication\n"); return -EINVAL; } @@ -1990,7 +2093,13 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) * extracted and processed. */ __skb_queue_head_init(&list); - ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list); + if (ar->hw_params.target_64bit) + ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind, + &list); + else + ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind, + &list); + if (ret < 0) { ath10k_warn(ar, "failed to pop paddr list: %d\n", ret); htt->rx_confused = true; @@ -2001,7 +2110,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) * separately. */ if (offload) - num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list); + ath10k_htt_rx_h_rx_offload(ar, &list); while (!skb_queue_empty(&list)) { __skb_queue_head_init(&amsdu); @@ -2014,11 +2123,10 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) * better to report something than nothing though. This * should still give an idea about rx rate to the user. */ - num_msdus += skb_queue_len(&amsdu); ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id); ath10k_htt_rx_h_filter(ar, &amsdu, status); ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false); - ath10k_htt_rx_h_deliver(ar, &amsdu, status); + ath10k_htt_rx_h_enqueue(ar, &amsdu, status); break; case -EAGAIN: /* fall through */ @@ -2030,7 +2138,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) return -EIO; } } - return num_msdus; + return ret; } static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar, @@ -2449,6 +2557,62 @@ out: rcu_read_unlock(); } +static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data) +{ + struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data; + struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats; + struct ath10k_10_2_peer_tx_stats *tx_stats; + struct ieee80211_sta *sta; + struct ath10k_peer *peer; + u16 log_type = __le16_to_cpu(hdr->log_type); + u32 peer_id = 0, i; + + if (log_type != ATH_PKTLOG_TYPE_TX_STAT) + return; + + tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) + + ATH10K_10_2_TX_STATS_OFFSET); + + if (!tx_stats->tx_ppdu_cnt) + return; + + peer_id = tx_stats->peer_id; + + rcu_read_lock(); + spin_lock_bh(&ar->data_lock); + peer = ath10k_peer_find_by_id(ar, peer_id); + if (!peer) { + ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n", + peer_id); + goto out; + } + + sta = peer->sta; + for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) { + p_tx_stats->succ_bytes = + __le16_to_cpu(tx_stats->success_bytes[i]); + p_tx_stats->retry_bytes = + __le16_to_cpu(tx_stats->retry_bytes[i]); + p_tx_stats->failed_bytes = + __le16_to_cpu(tx_stats->failed_bytes[i]); + p_tx_stats->ratecode = tx_stats->ratecode[i]; + p_tx_stats->flags = tx_stats->flags[i]; + p_tx_stats->succ_pkts = tx_stats->success_pkts[i]; + p_tx_stats->retry_pkts = tx_stats->retry_pkts[i]; + p_tx_stats->failed_pkts = tx_stats->failed_pkts[i]; + + ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats); + } + spin_unlock_bh(&ar->data_lock); + rcu_read_unlock(); + + return; + +out: + spin_unlock_bh(&ar->data_lock); + rcu_read_unlock(); +} + bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) { struct ath10k_htt *htt = &ar->htt; @@ -2566,6 +2730,10 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) skb->len - offsetof(struct htt_resp, pktlog_msg.payload)); + + if (ath10k_peer_stats_enabled(ar)) + ath10k_fetch_10_2_tx_stats(ar, + resp->pktlog_msg.payload); break; } case HTT_T2H_MSG_TYPE_RX_FLUSH: { @@ -2631,6 +2799,24 @@ void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar, } EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler); +static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget) +{ + struct sk_buff *skb; + + while (quota < budget) { + if (skb_queue_empty(&ar->htt.rx_msdus_q)) + break; + + skb = __skb_dequeue(&ar->htt.rx_msdus_q); + if (!skb) + break; + ath10k_process_rx(ar, skb); + quota++; + } + + return quota; +} + int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget) { struct ath10k_htt *htt = &ar->htt; @@ -2638,63 +2824,44 @@ int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget) struct sk_buff_head tx_ind_q; struct sk_buff *skb; unsigned long flags; - int quota = 0, done, num_rx_msdus; + int quota = 0, done, ret; bool resched_napi = false; __skb_queue_head_init(&tx_ind_q); - /* Since in-ord-ind can deliver more than 1 A-MSDU in single event, - * process it first to utilize full available quota. + /* Process pending frames before dequeuing more data + * from hardware. */ - while (quota < budget) { - if (skb_queue_empty(&htt->rx_in_ord_compl_q)) - break; - - skb = __skb_dequeue(&htt->rx_in_ord_compl_q); - if (!skb) { - resched_napi = true; - goto exit; - } + quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget); + if (quota == budget) { + resched_napi = true; + goto exit; + } + while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) { spin_lock_bh(&htt->rx_ring.lock); - num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb); + ret = ath10k_htt_rx_in_ord_ind(ar, skb); spin_unlock_bh(&htt->rx_ring.lock); - if (num_rx_msdus < 0) { - resched_napi = true; - goto exit; - } dev_kfree_skb_any(skb); - if (num_rx_msdus > 0) - quota += num_rx_msdus; - - if ((quota > ATH10K_NAPI_QUOTA_LIMIT) && - !skb_queue_empty(&htt->rx_in_ord_compl_q)) { + if (ret == -EIO) { resched_napi = true; goto exit; } } - while (quota < budget) { - /* no more data to receive */ - if (!atomic_read(&htt->num_mpdus_ready)) - break; - - num_rx_msdus = ath10k_htt_rx_handle_amsdu(htt); - if (num_rx_msdus < 0) { + while (atomic_read(&htt->num_mpdus_ready)) { + ret = ath10k_htt_rx_handle_amsdu(htt); + if (ret == -EIO) { resched_napi = true; goto exit; } - - quota += num_rx_msdus; atomic_dec(&htt->num_mpdus_ready); - if ((quota > ATH10K_NAPI_QUOTA_LIMIT) && - atomic_read(&htt->num_mpdus_ready)) { - resched_napi = true; - goto exit; - } } + /* Deliver received data after processing data from hardware */ + quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget); + /* From NAPI documentation: * The napi poll() function may also process TX completions, in which * case if it processes the entire TX ring then it should count that @@ -2732,3 +2899,29 @@ exit: return done; } EXPORT_SYMBOL(ath10k_htt_txrx_compl_task); + +static const struct ath10k_htt_rx_ops htt_rx_ops_32 = { + .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32, + .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32, + .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32, + .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32, + .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32, +}; + +static const struct ath10k_htt_rx_ops htt_rx_ops_64 = { + .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64, + .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64, + .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64, + .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64, + .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64, +}; + +void ath10k_htt_set_rx_ops(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; + + if (ar->hw_params.target_64bit) + htt->rx_ops = &htt_rx_ops_64; + else + htt->rx_ops = &htt_rx_ops_32; +} diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 685faac1368f..d334b7be1fea 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -229,50 +229,91 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) idr_remove(&htt->pending_tx, msdu_id); } -static void ath10k_htt_tx_free_cont_txbuf(struct ath10k_htt *htt) +static void ath10k_htt_tx_free_cont_txbuf_32(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; size_t size; - if (!htt->txbuf.vaddr) + if (!htt->txbuf.vaddr_txbuff_32) return; - size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf); - dma_free_coherent(ar->dev, size, htt->txbuf.vaddr, htt->txbuf.paddr); - htt->txbuf.vaddr = NULL; + size = htt->txbuf.size; + dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_32, + htt->txbuf.paddr); + htt->txbuf.vaddr_txbuff_32 = NULL; } -static int ath10k_htt_tx_alloc_cont_txbuf(struct ath10k_htt *htt) +static int ath10k_htt_tx_alloc_cont_txbuf_32(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; size_t size; - size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf); - htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size, &htt->txbuf.paddr, - GFP_KERNEL); - if (!htt->txbuf.vaddr) + size = htt->max_num_pending_tx * + sizeof(struct ath10k_htt_txbuf_32); + + htt->txbuf.vaddr_txbuff_32 = dma_alloc_coherent(ar->dev, size, + &htt->txbuf.paddr, + GFP_KERNEL); + if (!htt->txbuf.vaddr_txbuff_32) return -ENOMEM; + htt->txbuf.size = size; + return 0; } -static void ath10k_htt_tx_free_cont_frag_desc(struct ath10k_htt *htt) +static void ath10k_htt_tx_free_cont_txbuf_64(struct ath10k_htt *htt) { + struct ath10k *ar = htt->ar; size_t size; - if (!htt->frag_desc.vaddr) + if (!htt->txbuf.vaddr_txbuff_64) return; - size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); + size = htt->txbuf.size; + dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_64, + htt->txbuf.paddr); + htt->txbuf.vaddr_txbuff_64 = NULL; +} + +static int ath10k_htt_tx_alloc_cont_txbuf_64(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; + size_t size; + + size = htt->max_num_pending_tx * + sizeof(struct ath10k_htt_txbuf_64); + + htt->txbuf.vaddr_txbuff_64 = dma_alloc_coherent(ar->dev, size, + &htt->txbuf.paddr, + GFP_KERNEL); + if (!htt->txbuf.vaddr_txbuff_64) + return -ENOMEM; + + htt->txbuf.size = size; + + return 0; +} + +static void ath10k_htt_tx_free_cont_frag_desc_32(struct ath10k_htt *htt) +{ + size_t size; + + if (!htt->frag_desc.vaddr_desc_32) + return; + + size = htt->max_num_pending_tx * + sizeof(struct htt_msdu_ext_desc); dma_free_coherent(htt->ar->dev, size, - htt->frag_desc.vaddr, + htt->frag_desc.vaddr_desc_32, htt->frag_desc.paddr); - htt->frag_desc.vaddr = NULL; + + htt->frag_desc.vaddr_desc_32 = NULL; } -static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt) +static int ath10k_htt_tx_alloc_cont_frag_desc_32(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; size_t size; @@ -280,12 +321,57 @@ static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt) if (!ar->hw_params.continuous_frag_desc) return 0; - size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); - htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size, - &htt->frag_desc.paddr, - GFP_KERNEL); - if (!htt->frag_desc.vaddr) + size = htt->max_num_pending_tx * + sizeof(struct htt_msdu_ext_desc); + htt->frag_desc.vaddr_desc_32 = dma_alloc_coherent(ar->dev, size, + &htt->frag_desc.paddr, + GFP_KERNEL); + if (!htt->frag_desc.vaddr_desc_32) { + ath10k_err(ar, "failed to alloc fragment desc memory\n"); return -ENOMEM; + } + htt->frag_desc.size = size; + + return 0; +} + +static void ath10k_htt_tx_free_cont_frag_desc_64(struct ath10k_htt *htt) +{ + size_t size; + + if (!htt->frag_desc.vaddr_desc_64) + return; + + size = htt->max_num_pending_tx * + sizeof(struct htt_msdu_ext_desc_64); + + dma_free_coherent(htt->ar->dev, + size, + htt->frag_desc.vaddr_desc_64, + htt->frag_desc.paddr); + + htt->frag_desc.vaddr_desc_64 = NULL; +} + +static int ath10k_htt_tx_alloc_cont_frag_desc_64(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; + size_t size; + + if (!ar->hw_params.continuous_frag_desc) + return 0; + + size = htt->max_num_pending_tx * + sizeof(struct htt_msdu_ext_desc_64); + + htt->frag_desc.vaddr_desc_64 = dma_alloc_coherent(ar->dev, size, + &htt->frag_desc.paddr, + GFP_KERNEL); + if (!htt->frag_desc.vaddr_desc_64) { + ath10k_err(ar, "failed to alloc fragment desc memory\n"); + return -ENOMEM; + } + htt->frag_desc.size = size; return 0; } @@ -357,13 +443,13 @@ static int ath10k_htt_tx_alloc_buf(struct ath10k_htt *htt) struct ath10k *ar = htt->ar; int ret; - ret = ath10k_htt_tx_alloc_cont_txbuf(htt); + ret = htt->tx_ops->htt_alloc_txbuff(htt); if (ret) { ath10k_err(ar, "failed to alloc cont tx buffer: %d\n", ret); return ret; } - ret = ath10k_htt_tx_alloc_cont_frag_desc(htt); + ret = htt->tx_ops->htt_alloc_frag_desc(htt); if (ret) { ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret); goto free_txbuf; @@ -387,10 +473,10 @@ free_txq: ath10k_htt_tx_free_txq(htt); free_frag_desc: - ath10k_htt_tx_free_cont_frag_desc(htt); + htt->tx_ops->htt_free_frag_desc(htt); free_txbuf: - ath10k_htt_tx_free_cont_txbuf(htt); + htt->tx_ops->htt_free_txbuff(htt); return ret; } @@ -444,9 +530,9 @@ void ath10k_htt_tx_destroy(struct ath10k_htt *htt) if (!htt->tx_mem_allocated) return; - ath10k_htt_tx_free_cont_txbuf(htt); + htt->tx_ops->htt_free_txbuff(htt); ath10k_htt_tx_free_txq(htt); - ath10k_htt_tx_free_cont_frag_desc(htt); + htt->tx_ops->htt_free_frag_desc(htt); ath10k_htt_tx_free_txdone_fifo(htt); htt->tx_mem_allocated = false; } @@ -545,12 +631,12 @@ int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie) return 0; } -int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt) +static int ath10k_htt_send_frag_desc_bank_cfg_32(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; struct sk_buff *skb; struct htt_cmd *cmd; - struct htt_frag_desc_bank_cfg *cfg; + struct htt_frag_desc_bank_cfg32 *cfg; int ret, size; u8 info; @@ -562,7 +648,7 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt) return -EINVAL; } - size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg); + size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg32); skb = ath10k_htc_alloc_skb(ar, size); if (!skb) return -ENOMEM; @@ -579,7 +665,7 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt) ar->running_fw->fw_file.fw_features)) info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID; - cfg = &cmd->frag_desc_bank_cfg; + cfg = &cmd->frag_desc_bank_cfg32; cfg->info = info; cfg->num_banks = 1; cfg->desc_size = sizeof(struct htt_msdu_ext_desc); @@ -607,12 +693,112 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt) return 0; } -int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) +static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; struct sk_buff *skb; struct htt_cmd *cmd; - struct htt_rx_ring_setup_ring *ring; + struct htt_frag_desc_bank_cfg64 *cfg; + int ret, size; + u8 info; + + if (!ar->hw_params.continuous_frag_desc) + return 0; + + if (!htt->frag_desc.paddr) { + ath10k_warn(ar, "invalid frag desc memory\n"); + return -EINVAL; + } + + size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg64); + skb = ath10k_htc_alloc_skb(ar, size); + if (!skb) + return -ENOMEM; + + skb_put(skb, size); + cmd = (struct htt_cmd *)skb->data; + cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG; + + info = 0; + info |= SM(htt->tx_q_state.type, + HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE); + + if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, + ar->running_fw->fw_file.fw_features)) + info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID; + + cfg = &cmd->frag_desc_bank_cfg64; + cfg->info = info; + cfg->num_banks = 1; + cfg->desc_size = sizeof(struct htt_msdu_ext_desc_64); + cfg->bank_base_addrs[0] = __cpu_to_le64(htt->frag_desc.paddr); + cfg->bank_id[0].bank_min_id = 0; + cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx - + 1); + + cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr); + cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers); + cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids); + cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE; + cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER; + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n"); + + ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); + if (ret) { + ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n", + ret); + dev_kfree_skb_any(skb); + return ret; + } + + return 0; +} + +static void ath10k_htt_fill_rx_desc_offset_32(void *rx_ring) +{ + struct htt_rx_ring_setup_ring32 *ring = + (struct htt_rx_ring_setup_ring32 *)rx_ring; + +#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4) + ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status)); + ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload)); + ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start)); + ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end)); + ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start)); + ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end)); + ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start)); + ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end)); + ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention)); + ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info)); +#undef desc_offset +} + +static void ath10k_htt_fill_rx_desc_offset_64(void *rx_ring) +{ + struct htt_rx_ring_setup_ring64 *ring = + (struct htt_rx_ring_setup_ring64 *)rx_ring; + +#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4) + ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status)); + ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload)); + ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start)); + ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end)); + ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start)); + ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end)); + ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start)); + ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end)); + ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention)); + ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info)); +#undef desc_offset +} + +static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; + struct sk_buff *skb; + struct htt_cmd *cmd; + struct htt_rx_ring_setup_ring32 *ring; const int num_rx_ring = 1; u16 flags; u32 fw_idx; @@ -626,7 +812,7 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); - len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr) + len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr) + (sizeof(*ring) * num_rx_ring); skb = ath10k_htc_alloc_skb(ar, len); if (!skb) @@ -635,10 +821,10 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) skb_put(skb, len); cmd = (struct htt_cmd *)skb->data; - ring = &cmd->rx_setup.rings[0]; + ring = &cmd->rx_setup_32.rings[0]; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; - cmd->rx_setup.hdr.num_rings = 1; + cmd->rx_setup_32.hdr.num_rings = 1; /* FIXME: do we need all of this? */ flags = 0; @@ -669,21 +855,76 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) ring->flags = __cpu_to_le16(flags); ring->fw_idx_init_val = __cpu_to_le16(fw_idx); -#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4) + ath10k_htt_fill_rx_desc_offset_32(ring); + ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); + if (ret) { + dev_kfree_skb_any(skb); + return ret; + } - ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status)); - ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload)); - ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start)); - ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end)); - ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start)); - ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end)); - ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start)); - ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end)); - ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention)); - ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info)); + return 0; +} -#undef desc_offset +static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; + struct sk_buff *skb; + struct htt_cmd *cmd; + struct htt_rx_ring_setup_ring64 *ring; + const int num_rx_ring = 1; + u16 flags; + u32 fw_idx; + int len; + int ret; + + /* HW expects the buffer to be an integral number of 4-byte + * "words" + */ + BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); + BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); + + len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_64.hdr) + + (sizeof(*ring) * num_rx_ring); + skb = ath10k_htc_alloc_skb(ar, len); + if (!skb) + return -ENOMEM; + + skb_put(skb, len); + + cmd = (struct htt_cmd *)skb->data; + ring = &cmd->rx_setup_64.rings[0]; + + cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; + cmd->rx_setup_64.hdr.num_rings = 1; + + flags = 0; + flags |= HTT_RX_RING_FLAGS_MAC80211_HDR; + flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; + flags |= HTT_RX_RING_FLAGS_PPDU_START; + flags |= HTT_RX_RING_FLAGS_PPDU_END; + flags |= HTT_RX_RING_FLAGS_MPDU_START; + flags |= HTT_RX_RING_FLAGS_MPDU_END; + flags |= HTT_RX_RING_FLAGS_MSDU_START; + flags |= HTT_RX_RING_FLAGS_MSDU_END; + flags |= HTT_RX_RING_FLAGS_RX_ATTENTION; + flags |= HTT_RX_RING_FLAGS_FRAG_INFO; + flags |= HTT_RX_RING_FLAGS_UNICAST_RX; + flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; + flags |= HTT_RX_RING_FLAGS_CTRL_RX; + flags |= HTT_RX_RING_FLAGS_MGMT_RX; + flags |= HTT_RX_RING_FLAGS_NULL_RX; + flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX; + fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); + + ring->fw_idx_shadow_reg_paddr = __cpu_to_le64(htt->rx_ring.alloc_idx.paddr); + ring->rx_ring_base_paddr = __cpu_to_le64(htt->rx_ring.base_paddr); + ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size); + ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); + ring->flags = __cpu_to_le16(flags); + ring->fw_idx_init_val = __cpu_to_le16(fw_idx); + + ath10k_htt_fill_rx_desc_offset_64(ring); ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { dev_kfree_skb_any(skb); @@ -895,8 +1136,9 @@ err: return res; } -int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, - struct sk_buff *msdu) +static int ath10k_htt_tx_32(struct ath10k_htt *htt, + enum ath10k_hw_txrx_mode txmode, + struct sk_buff *msdu) { struct ath10k *ar = htt->ar; struct device *dev = ar->dev; @@ -904,7 +1146,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); struct ath10k_hif_sg_item sg_items[2]; - struct ath10k_htt_txbuf *txbuf; + struct ath10k_htt_txbuf_32 *txbuf; struct htt_data_tx_desc_frag *frags; bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET); u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); @@ -917,6 +1159,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, u32 frags_paddr = 0; u32 txbuf_paddr; struct htt_msdu_ext_desc *ext_desc = NULL; + struct htt_msdu_ext_desc *ext_desc_t = NULL; spin_lock_bh(&htt->tx_lock); res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); @@ -929,9 +1172,9 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, prefetch_len = min(htt->prefetch_len, msdu->len); prefetch_len = roundup(prefetch_len, 4); - txbuf = &htt->txbuf.vaddr[msdu_id]; + txbuf = htt->txbuf.vaddr_txbuff_32 + msdu_id; txbuf_paddr = htt->txbuf.paddr + - (sizeof(struct ath10k_htt_txbuf) * msdu_id); + (sizeof(struct ath10k_htt_txbuf_32) * msdu_id); if ((ieee80211_is_action(hdr->frame_control) || ieee80211_is_deauth(hdr->frame_control) || @@ -962,11 +1205,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, /* pass through */ case ATH10K_HW_TXRX_ETHERNET: if (ar->hw_params.continuous_frag_desc) { - memset(&htt->frag_desc.vaddr[msdu_id], 0, + ext_desc_t = htt->frag_desc.vaddr_desc_32; + memset(&ext_desc_t[msdu_id], 0, sizeof(struct htt_msdu_ext_desc)); frags = (struct htt_data_tx_desc_frag *) - &htt->frag_desc.vaddr[msdu_id].frags; - ext_desc = &htt->frag_desc.vaddr[msdu_id]; + &ext_desc_t[msdu_id].frags; + ext_desc = &ext_desc_t[msdu_id]; frags[0].tword_addr.paddr_lo = __cpu_to_le32(skb_cb->paddr); frags[0].tword_addr.paddr_hi = 0; @@ -1055,9 +1299,9 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid); ath10k_dbg(ar, ATH10K_DBG_HTT, - "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n", - flags0, flags1, msdu->len, msdu_id, frags_paddr, - (u32)skb_cb->paddr, vdev_id, tid, freq); + "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n", + flags0, flags1, msdu->len, msdu_id, &frags_paddr, + &skb_cb->paddr, vdev_id, tid, freq); ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", msdu->data, msdu->len); trace_ath10k_tx_hdr(ar, msdu->data, msdu->len); @@ -1093,3 +1337,239 @@ err_free_msdu_id: err: return res; } + +static int ath10k_htt_tx_64(struct ath10k_htt *htt, + enum ath10k_hw_txrx_mode txmode, + struct sk_buff *msdu) +{ + struct ath10k *ar = htt->ar; + struct device *dev = ar->dev; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); + struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); + struct ath10k_hif_sg_item sg_items[2]; + struct ath10k_htt_txbuf_64 *txbuf; + struct htt_data_tx_desc_frag *frags; + bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET); + u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); + u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth); + int prefetch_len; + int res; + u8 flags0 = 0; + u16 msdu_id, flags1 = 0; + u16 freq = 0; + dma_addr_t frags_paddr = 0; + u32 txbuf_paddr; + struct htt_msdu_ext_desc_64 *ext_desc = NULL; + struct htt_msdu_ext_desc_64 *ext_desc_t = NULL; + + spin_lock_bh(&htt->tx_lock); + res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); + spin_unlock_bh(&htt->tx_lock); + if (res < 0) + goto err; + + msdu_id = res; + + prefetch_len = min(htt->prefetch_len, msdu->len); + prefetch_len = roundup(prefetch_len, 4); + + txbuf = htt->txbuf.vaddr_txbuff_64 + msdu_id; + txbuf_paddr = htt->txbuf.paddr + + (sizeof(struct ath10k_htt_txbuf_64) * msdu_id); + + if ((ieee80211_is_action(hdr->frame_control) || + ieee80211_is_deauth(hdr->frame_control) || + ieee80211_is_disassoc(hdr->frame_control)) && + ieee80211_has_protected(hdr->frame_control)) { + skb_put(msdu, IEEE80211_CCMP_MIC_LEN); + } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) && + txmode == ATH10K_HW_TXRX_RAW && + ieee80211_has_protected(hdr->frame_control)) { + skb_put(msdu, IEEE80211_CCMP_MIC_LEN); + } + + skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, + DMA_TO_DEVICE); + res = dma_mapping_error(dev, skb_cb->paddr); + if (res) { + res = -EIO; + goto err_free_msdu_id; + } + + if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) + freq = ar->scan.roc_freq; + + switch (txmode) { + case ATH10K_HW_TXRX_RAW: + case ATH10K_HW_TXRX_NATIVE_WIFI: + flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; + /* pass through */ + case ATH10K_HW_TXRX_ETHERNET: + if (ar->hw_params.continuous_frag_desc) { + ext_desc_t = htt->frag_desc.vaddr_desc_64; + memset(&ext_desc_t[msdu_id], 0, + sizeof(struct htt_msdu_ext_desc_64)); + frags = (struct htt_data_tx_desc_frag *) + &ext_desc_t[msdu_id].frags; + ext_desc = &ext_desc_t[msdu_id]; + frags[0].tword_addr.paddr_lo = + __cpu_to_le32(skb_cb->paddr); + frags[0].tword_addr.paddr_hi = + __cpu_to_le16(upper_32_bits(skb_cb->paddr)); + frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); + + frags_paddr = htt->frag_desc.paddr + + (sizeof(struct htt_msdu_ext_desc_64) * msdu_id); + } else { + frags = txbuf->frags; + frags[0].tword_addr.paddr_lo = + __cpu_to_le32(skb_cb->paddr); + frags[0].tword_addr.paddr_hi = + __cpu_to_le16(upper_32_bits(skb_cb->paddr)); + frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); + frags[1].tword_addr.paddr_lo = 0; + frags[1].tword_addr.paddr_hi = 0; + frags[1].tword_addr.len_16 = 0; + } + flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); + break; + case ATH10K_HW_TXRX_MGMT: + flags0 |= SM(ATH10K_HW_TXRX_MGMT, + HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); + flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; + + frags_paddr = skb_cb->paddr; + break; + } + + /* Normally all commands go through HTC which manages tx credits for + * each endpoint and notifies when tx is completed. + * + * HTT endpoint is creditless so there's no need to care about HTC + * flags. In that case it is trivial to fill the HTC header here. + * + * MSDU transmission is considered completed upon HTT event. This + * implies no relevant resources can be freed until after the event is + * received. That's why HTC tx completion handler itself is ignored by + * setting NULL to transfer_context for all sg items. + * + * There is simply no point in pushing HTT TX_FRM through HTC tx path + * as it's a waste of resources. By bypassing HTC it is possible to + * avoid extra memory allocations, compress data structures and thus + * improve performance. + */ + + txbuf->htc_hdr.eid = htt->eid; + txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) + + sizeof(txbuf->cmd_tx) + + prefetch_len); + txbuf->htc_hdr.flags = 0; + + if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) + flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; + + flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); + flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); + if (msdu->ip_summed == CHECKSUM_PARTIAL && + !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { + flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; + flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; + if (ar->hw_params.continuous_frag_desc) + ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE; + } + + /* Prevent firmware from sending up tx inspection requests. There's + * nothing ath10k can do with frames requested for inspection so force + * it to simply rely a regular tx completion with discard status. + */ + flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; + + txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; + txbuf->cmd_tx.flags0 = flags0; + txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1); + txbuf->cmd_tx.len = __cpu_to_le16(msdu->len); + txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); + + /* fill fragment descriptor */ + txbuf->cmd_tx.frags_paddr = __cpu_to_le64(frags_paddr); + if (ath10k_mac_tx_frm_has_freq(ar)) { + txbuf->cmd_tx.offchan_tx.peerid = + __cpu_to_le16(HTT_INVALID_PEERID); + txbuf->cmd_tx.offchan_tx.freq = + __cpu_to_le16(freq); + } else { + txbuf->cmd_tx.peerid = + __cpu_to_le32(HTT_INVALID_PEERID); + } + + trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid); + ath10k_dbg(ar, ATH10K_DBG_HTT, + "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n", + flags0, flags1, msdu->len, msdu_id, &frags_paddr, + &skb_cb->paddr, vdev_id, tid, freq); + ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", + msdu->data, msdu->len); + trace_ath10k_tx_hdr(ar, msdu->data, msdu->len); + trace_ath10k_tx_payload(ar, msdu->data, msdu->len); + + sg_items[0].transfer_id = 0; + sg_items[0].transfer_context = NULL; + sg_items[0].vaddr = &txbuf->htc_hdr; + sg_items[0].paddr = txbuf_paddr + + sizeof(txbuf->frags); + sg_items[0].len = sizeof(txbuf->htc_hdr) + + sizeof(txbuf->cmd_hdr) + + sizeof(txbuf->cmd_tx); + + sg_items[1].transfer_id = 0; + sg_items[1].transfer_context = NULL; + sg_items[1].vaddr = msdu->data; + sg_items[1].paddr = skb_cb->paddr; + sg_items[1].len = prefetch_len; + + res = ath10k_hif_tx_sg(htt->ar, + htt->ar->htc.endpoint[htt->eid].ul_pipe_id, + sg_items, ARRAY_SIZE(sg_items)); + if (res) + goto err_unmap_msdu; + + return 0; + +err_unmap_msdu: + dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); +err_free_msdu_id: + ath10k_htt_tx_free_msdu_id(htt, msdu_id); +err: + return res; +} + +static const struct ath10k_htt_tx_ops htt_tx_ops_32 = { + .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_32, + .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32, + .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_32, + .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_32, + .htt_tx = ath10k_htt_tx_32, + .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_32, + .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_32, +}; + +static const struct ath10k_htt_tx_ops htt_tx_ops_64 = { + .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_64, + .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_64, + .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_64, + .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_64, + .htt_tx = ath10k_htt_tx_64, + .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_64, + .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_64, +}; + +void ath10k_htt_set_tx_ops(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; + + if (ar->hw_params.target_64bit) + htt->tx_ops = &htt_tx_ops_64; + else + htt->tx_ops = &htt_tx_ops_32; +} diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c index 88955bbe20bd..497ac33e0fbf 100644 --- a/drivers/net/wireless/ath/ath10k/hw.c +++ b/drivers/net/wireless/ath/ath10k/hw.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2014-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -931,3 +931,5 @@ const struct ath10k_hw_ops qca6174_ops = { .set_coverage_class = ath10k_hw_qca988x_set_coverage_class, .enable_pll_clk = ath10k_hw_qca6174_enable_pll_clock, }; + +const struct ath10k_hw_ops wcn3990_ops = {}; diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 05f26e5858ad..6203bc65799b 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -128,6 +128,10 @@ enum qca9377_chip_id_rev { #define QCA4019_HW_1_0_BOARD_DATA_FILE "board.bin" #define QCA4019_HW_1_0_PATCH_LOAD_ADDR 0x1234 +/* WCN3990 1.0 definitions */ +#define WCN3990_HW_1_0_DEV_VERSION ATH10K_HW_WCN3990 +#define WCN3990_HW_1_0_FW_DIR ATH10K_FW_DIR "/WCN3990/hw3.0" + #define ATH10K_FW_FILE_BASE "firmware" #define ATH10K_FW_API_MAX 6 #define ATH10K_FW_API_MIN 2 @@ -553,6 +557,16 @@ struct ath10k_hw_params { /* Number of ciphers supported (i.e First N) in cipher_suites array */ int n_cipher_suites; + + u32 num_peers; + u32 ast_skid_limit; + u32 num_wds_entries; + + /* Targets supporting physical addressing capability above 32-bits */ + bool target_64bit; + + /* Target rx ring fill level */ + u32 rx_ring_fill_level; }; struct htt_rx_desc; @@ -567,6 +581,7 @@ struct ath10k_hw_ops { extern const struct ath10k_hw_ops qca988x_ops; extern const struct ath10k_hw_ops qca99x0_ops; extern const struct ath10k_hw_ops qca6174_ops; +extern const struct ath10k_hw_ops wcn3990_ops; extern const struct ath10k_hw_clk_params qca6174_clk[]; @@ -663,6 +678,11 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw, #define TARGET_TLV_NUM_MSDU_DESC (1024 + 32) #define TARGET_TLV_NUM_WOW_PATTERNS 22 +/* Target specific defines for WMI-HL-1.0 firmware */ +#define TARGET_HL_10_TLV_NUM_PEERS 14 +#define TARGET_HL_10_TLV_AST_SKID_LIMIT 6 +#define TARGET_HL_10_TLV_NUM_WDS_ENTRIES 2 + /* Diagnostic Window */ #define CE_DIAG_PIPE 7 @@ -868,6 +888,7 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw, #define PCIE_INTR_CLR_ADDRESS ar->regs->pcie_intr_clr_address #define SCRATCH_3_ADDRESS ar->regs->scratch_3_address #define CPU_INTR_ADDRESS 0x0010 +#define FW_RAM_CONFIG_ADDRESS 0x0018 #define CCNT_TO_MSEC(ar, x) ((x) / ar->hw_params.channel_counters_freq_hz) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 0a947eef348d..ebb3f1b046f3 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -2563,7 +2563,7 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar, } break; case WMI_VDEV_TYPE_STA: - if (vif->bss_conf.qos) + if (sta->wme) arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; break; case WMI_VDEV_TYPE_IBSS: @@ -3574,7 +3574,9 @@ ath10k_mac_tx_h_get_txpath(struct ath10k *ar, return ATH10K_MAC_TX_HTT; case ATH10K_HW_TXRX_MGMT: if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, - ar->running_fw->fw_file.fw_features)) + ar->running_fw->fw_file.fw_features) || + test_bit(WMI_SERVICE_MGMT_TX_WMI, + ar->wmi.svc_map)) return ATH10K_MAC_TX_WMI_MGMT; else if (ar->htt.target_version_major >= 3) return ATH10K_MAC_TX_HTT; @@ -3595,7 +3597,7 @@ static int ath10k_mac_tx_submit(struct ath10k *ar, switch (txpath) { case ATH10K_MAC_TX_HTT: - ret = ath10k_htt_tx(htt, txmode, skb); + ret = htt->tx_ops->htt_tx(htt, txmode, skb); break; case ATH10K_MAC_TX_HTT_MGMT: ret = ath10k_htt_mgmt_tx(htt, skb); @@ -6201,6 +6203,16 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, "mac vdev %d peer delete %pM sta %pK (sta gone)\n", arvif->vdev_id, sta->addr, sta); + if (sta->tdls) { + ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, + sta, + WMI_TDLS_PEER_STATE_TEARDOWN); + if (ret) + ath10k_warn(ar, "failed to update tdls peer state for %pM state %d: %i\n", + sta->addr, + WMI_TDLS_PEER_STATE_TEARDOWN, ret); + } + ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); if (ret) ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n", @@ -7536,6 +7548,16 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, arvif->vdev_id, ret); } + if (ath10k_peer_stats_enabled(ar)) { + ar->pktlog_filter |= ATH10K_PKTLOG_PEER_STATS; + ret = ath10k_wmi_pdev_pktlog_enable(ar, + ar->pktlog_filter); + if (ret) { + ath10k_warn(ar, "failed to enable pktlog %d\n", ret); + goto err_stop; + } + } + mutex_unlock(&ar->conf_mutex); return 0; @@ -7620,6 +7642,34 @@ static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw, peer->removed = true; } +static void ath10k_sta_statistics(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct station_info *sinfo) +{ + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ath10k *ar = arsta->arvif->ar; + + if (!ath10k_peer_stats_enabled(ar)) + return; + + sinfo->rx_duration = arsta->rx_duration; + sinfo->filled |= 1ULL << NL80211_STA_INFO_RX_DURATION; + + if (!arsta->txrate.legacy && !arsta->txrate.nss) + return; + + if (arsta->txrate.legacy) { + sinfo->txrate.legacy = arsta->txrate.legacy; + } else { + sinfo->txrate.mcs = arsta->txrate.mcs; + sinfo->txrate.nss = arsta->txrate.nss; + sinfo->txrate.bw = arsta->txrate.bw; + } + sinfo->txrate.flags = arsta->txrate.flags; + sinfo->filled |= 1ULL << NL80211_STA_INFO_TX_BITRATE; +} + static const struct ieee80211_ops ath10k_ops = { .tx = ath10k_mac_op_tx, .wake_tx_queue = ath10k_mac_op_wake_tx_queue, @@ -7661,6 +7711,7 @@ static const struct ieee80211_ops ath10k_ops = { .unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx, .switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx, .sta_pre_rcu_remove = ath10k_mac_op_sta_pre_rcu_remove, + .sta_statistics = ath10k_sta_statistics, CFG80211_TESTMODE_CMD(ath10k_tm_cmd) @@ -7671,7 +7722,6 @@ static const struct ieee80211_ops ath10k_ops = { #endif #ifdef CONFIG_MAC80211_DEBUGFS .sta_add_debugfs = ath10k_sta_add_debugfs, - .sta_statistics = ath10k_sta_statistics, #endif }; @@ -8244,7 +8294,8 @@ int ath10k_mac_register(struct ath10k *ar) if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map) || test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map)) { ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; - ieee80211_hw_set(ar->hw, TDLS_WIDER_BW); + if (test_bit(WMI_SERVICE_TDLS_WIDER_BANDWIDTH, ar->wmi.svc_map)) + ieee80211_hw_set(ar->hw, TDLS_WIDER_BW); } ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; @@ -8329,15 +8380,6 @@ int ath10k_mac_register(struct ath10k *ar) ath10k_warn(ar, "failed to initialise DFS pattern detector\n"); } - /* Current wake_tx_queue implementation imposes a significant - * performance penalty in some setups. The tx scheduling code needs - * more work anyway so disable the wake_tx_queue unless firmware - * supports the pull-push mechanism. - */ - if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, - ar->running_fw->fw_file.fw_features)) - ar->ops->wake_tx_queue = NULL; - ret = ath10k_mac_init_rd(ar); if (ret) { ath10k_err(ar, "failed to derive regdom: %d\n", ret); diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h index 553747bc19ed..81f8d6c0af35 100644 --- a/drivers/net/wireless/ath/ath10k/mac.h +++ b/drivers/net/wireless/ath/ath10k/mac.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index ffea348b2190..355db6a0fcf3 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -23,6 +23,7 @@ #include "core.h" #include "debug.h" +#include "coredump.h" #include "targaddrs.h" #include "bmi.h" @@ -51,6 +52,11 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)"); #define ATH10K_PCI_TARGET_WAIT 3000 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3 +/* Maximum number of bytes that can be handled atomically by + * diag read and write. + */ +#define ATH10K_DIAG_TRANSFER_LIMIT 0x5000 + static const struct pci_device_id ath10k_pci_id_table[] = { { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */ @@ -785,7 +791,7 @@ static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe) ATH10K_SKB_RXCB(skb)->paddr = paddr; spin_lock_bh(&ce->ce_lock); - ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr); + ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr); spin_unlock_bh(&ce->ce_lock); if (ret) { dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb), @@ -923,7 +929,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, nbytes = min_t(unsigned int, remaining_bytes, DIAG_TRANSFER_LIMIT); - ret = __ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data); + ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &ce_data, ce_data); if (ret != 0) goto done; @@ -1089,7 +1095,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT); /* Set up to receive directly into Target(!) address */ - ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address); + ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &address, address); if (ret != 0) goto done; @@ -1461,6 +1467,215 @@ static void ath10k_pci_dump_registers(struct ath10k *ar, crash_data->registers[i] = reg_dump_values[i]; } +static int ath10k_pci_dump_memory_section(struct ath10k *ar, + const struct ath10k_mem_region *mem_region, + u8 *buf, size_t buf_len) +{ + const struct ath10k_mem_section *cur_section, *next_section; + unsigned int count, section_size, skip_size; + int ret, i, j; + + if (!mem_region || !buf) + return 0; + + cur_section = &mem_region->section_table.sections[0]; + + if (mem_region->start > cur_section->start) { + ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n", + mem_region->start, cur_section->start); + return 0; + } + + skip_size = cur_section->start - mem_region->start; + + /* fill the gap between the first register section and register + * start address + */ + for (i = 0; i < skip_size; i++) { + *buf = ATH10K_MAGIC_NOT_COPIED; + buf++; + } + + count = 0; + + for (i = 0; cur_section != NULL; i++) { + section_size = cur_section->end - cur_section->start; + + if (section_size <= 0) { + ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n", + cur_section->start, + cur_section->end); + break; + } + + if ((i + 1) == mem_region->section_table.size) { + /* last section */ + next_section = NULL; + skip_size = 0; + } else { + next_section = cur_section + 1; + + if (cur_section->end > next_section->start) { + ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n", + next_section->start, + cur_section->end); + break; + } + + skip_size = next_section->start - cur_section->end; + } + + if (buf_len < (skip_size + section_size)) { + ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len); + break; + } + + buf_len -= skip_size + section_size; + + /* read section to dest memory */ + ret = ath10k_pci_diag_read_mem(ar, cur_section->start, + buf, section_size); + if (ret) { + ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n", + cur_section->start, ret); + break; + } + + buf += section_size; + count += section_size; + + /* fill in the gap between this section and the next */ + for (j = 0; j < skip_size; j++) { + *buf = ATH10K_MAGIC_NOT_COPIED; + buf++; + } + + count += skip_size; + + if (!next_section) + /* this was the last section */ + break; + + cur_section = next_section; + } + + return count; +} + +static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config) +{ + u32 val; + + ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + + FW_RAM_CONFIG_ADDRESS, config); + + val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + + FW_RAM_CONFIG_ADDRESS); + if (val != config) { + ath10k_warn(ar, "failed to set RAM config from 0x%x to 0x%x\n", + val, config); + return -EIO; + } + + return 0; +} + +static void ath10k_pci_dump_memory(struct ath10k *ar, + struct ath10k_fw_crash_data *crash_data) +{ + const struct ath10k_hw_mem_layout *mem_layout; + const struct ath10k_mem_region *current_region; + struct ath10k_dump_ram_data_hdr *hdr; + u32 count, shift; + size_t buf_len; + int ret, i; + u8 *buf; + + lockdep_assert_held(&ar->data_lock); + + if (!crash_data) + return; + + mem_layout = ath10k_coredump_get_mem_layout(ar); + if (!mem_layout) + return; + + current_region = &mem_layout->region_table.regions[0]; + + buf = crash_data->ramdump_buf; + buf_len = crash_data->ramdump_buf_len; + + memset(buf, 0, buf_len); + + for (i = 0; i < mem_layout->region_table.size; i++) { + count = 0; + + if (current_region->len > buf_len) { + ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n", + current_region->name, + current_region->len, + buf_len); + break; + } + + /* To get IRAM dump, the host driver needs to switch target + * ram config from DRAM to IRAM. + */ + if (current_region->type == ATH10K_MEM_REGION_TYPE_IRAM1 || + current_region->type == ATH10K_MEM_REGION_TYPE_IRAM2) { + shift = current_region->start >> 20; + + ret = ath10k_pci_set_ram_config(ar, shift); + if (ret) { + ath10k_warn(ar, "failed to switch ram config to IRAM for section %s: %d\n", + current_region->name, ret); + break; + } + } + + /* Reserve space for the header. */ + hdr = (void *)buf; + buf += sizeof(*hdr); + buf_len -= sizeof(*hdr); + + if (current_region->section_table.size > 0) { + /* Copy each section individually. */ + count = ath10k_pci_dump_memory_section(ar, + current_region, + buf, + current_region->len); + } else { + /* No individiual memory sections defined so we can + * copy the entire memory region. + */ + ret = ath10k_pci_diag_read_mem(ar, + current_region->start, + buf, + current_region->len); + if (ret) { + ath10k_warn(ar, "failed to copy ramdump region %s: %d\n", + current_region->name, ret); + break; + } + + count = current_region->len; + } + + hdr->region_type = cpu_to_le32(current_region->type); + hdr->start = cpu_to_le32(current_region->start); + hdr->length = cpu_to_le32(count); + + if (count == 0) + /* Note: the header remains, just with zero length. */ + break; + + buf += count; + buf_len -= count; + + current_region++; + } +} + static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) { struct ath10k_fw_crash_data *crash_data; @@ -1470,7 +1685,7 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) ar->stats.fw_crash_counter++; - crash_data = ath10k_debug_get_new_fw_crash_data(ar); + crash_data = ath10k_coredump_new(ar); if (crash_data) scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid); @@ -1481,6 +1696,7 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) ath10k_print_driver_info(ar); ath10k_pci_dump_registers(ar, crash_data); ath10k_ce_dump_registers(ar, crash_data); + ath10k_pci_dump_memory(ar, crash_data); spin_unlock_bh(&ar->data_lock); @@ -1858,7 +2074,7 @@ int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, ret = ath10k_pci_bmi_wait(ar, ce_tx, ce_rx, &xfer); if (ret) { - u32 unused_buffer; + dma_addr_t unused_buffer; unsigned int unused_nbytes; unsigned int unused_id; @@ -1871,7 +2087,7 @@ int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, err_resp: if (resp) { - u32 unused_buffer; + dma_addr_t unused_buffer; ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer); dma_unmap_single(ar->dev, resp_paddr, diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index 08704fbc11e3..e52fd83156b6 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h index 28da14398951..545deb6d7af1 100644 --- a/drivers/net/wireless/ath/ath10k/rx_desc.h +++ b/drivers/net/wireless/ath/ath10k/rx_desc.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -210,6 +210,10 @@ struct rx_frag_info { u8 ring1_more_count; u8 ring2_more_count; u8 ring3_more_count; + u8 ring4_more_count; + u8 ring5_more_count; + u8 ring6_more_count; + u8 ring7_more_count; } __packed; /* @@ -471,10 +475,16 @@ struct rx_msdu_start_qca99x0 { __le32 info2; /* %RX_MSDU_START_INFO2_ */ } __packed; +struct rx_msdu_start_wcn3990 { + __le32 info2; /* %RX_MSDU_START_INFO2_ */ + __le32 info3; /* %RX_MSDU_START_INFO3_ */ +} __packed; + struct rx_msdu_start { struct rx_msdu_start_common common; union { struct rx_msdu_start_qca99x0 qca99x0; + struct rx_msdu_start_wcn3990 wcn3990; } __packed; } __packed; @@ -595,10 +605,23 @@ struct rx_msdu_end_qca99x0 { __le32 info2; } __packed; +struct rx_msdu_end_wcn3990 { + __le32 ipv6_crc; + __le32 tcp_seq_no; + __le32 tcp_ack_no; + __le32 info1; + __le32 info2; + __le32 rule_indication_0; + __le32 rule_indication_1; + __le32 rule_indication_2; + __le32 rule_indication_3; +} __packed; + struct rx_msdu_end { struct rx_msdu_end_common common; union { struct rx_msdu_end_qca99x0 qca99x0; + struct rx_msdu_end_wcn3990 wcn3990; } __packed; } __packed; @@ -963,6 +986,12 @@ struct rx_pkt_end { __le32 phy_timestamp_2; } __packed; +struct rx_pkt_end_wcn3990 { + __le32 info0; /* %RX_PKT_END_INFO0_ */ + __le64 phy_timestamp_1; + __le64 phy_timestamp_2; +} __packed; + #define RX_LOCATION_INFO0_RTT_FAC_LEGACY_MASK 0x00003fff #define RX_LOCATION_INFO0_RTT_FAC_LEGACY_LSB 0 #define RX_LOCATION_INFO0_RTT_FAC_VHT_MASK 0x1fff8000 @@ -998,6 +1027,12 @@ struct rx_location_info { __le32 rx_location_info1; /* %RX_LOCATION_INFO1_ */ } __packed; +struct rx_location_info_wcn3990 { + __le32 rx_location_info0; /* %RX_LOCATION_INFO0_ */ + __le32 rx_location_info1; /* %RX_LOCATION_INFO1_ */ + __le32 rx_location_info2; /* %RX_LOCATION_INFO2_ */ +} __packed; + enum rx_phy_ppdu_end_info0 { RX_PHY_PPDU_END_INFO0_ERR_RADAR = BIT(2), RX_PHY_PPDU_END_INFO0_ERR_RX_ABORT = BIT(3), @@ -1086,6 +1121,20 @@ struct rx_ppdu_end_qca9984 { __le16 info1; /* %RX_PPDU_END_INFO1_ */ } __packed; +struct rx_ppdu_end_wcn3990 { + struct rx_pkt_end_wcn3990 rx_pkt_end; + struct rx_location_info_wcn3990 rx_location_info; + struct rx_phy_ppdu_end rx_phy_ppdu_end; + __le32 rx_timing_offset; + __le32 reserved_info_0; + __le32 reserved_info_1; + __le32 rx_antenna_info; + __le32 rx_coex_info; + __le32 rx_mpdu_cnt_info; + __le64 phy_timestamp_tx; + __le32 rx_bb_length; +} __packed; + struct rx_ppdu_end { struct rx_ppdu_end_common common; union { @@ -1093,6 +1142,7 @@ struct rx_ppdu_end { struct rx_ppdu_end_qca6174 qca6174; struct rx_ppdu_end_qca99x0 qca99x0; struct rx_ppdu_end_qca9984 qca9984; + struct rx_ppdu_end_wcn3990 wcn3990; } __packed; } __packed; diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c index 2048b1e5262b..af6995de7e00 100644 --- a/drivers/net/wireless/ath/ath10k/spectral.c +++ b/drivers/net/wireless/ath/ath10k/spectral.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Qualcomm Atheros, Inc. + * Copyright (c) 2013-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath10k/spectral.h b/drivers/net/wireless/ath/ath10k/spectral.h index 89b0ad769d4f..13276f4dc12c 100644 --- a/drivers/net/wireless/ath/ath10k/spectral.h +++ b/drivers/net/wireless/ath/ath10k/spectral.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Qualcomm Atheros, Inc. + * Copyright (c) 2013-2015 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -44,7 +44,7 @@ enum ath10k_spectral_mode { SPECTRAL_MANUAL, }; -#ifdef CONFIG_ATH10K_DEBUGFS +#ifdef CONFIG_ATH10K_SPECTRAL int ath10k_spectral_process_fft(struct ath10k *ar, struct wmi_phyerr_ev_arg *phyerr, @@ -85,6 +85,6 @@ static inline void ath10k_spectral_destroy(struct ath10k *ar) { } -#endif /* CONFIG_ATH10K_DEBUGFS */ +#endif /* CONFIG_ATH10K_SPECTRAL */ #endif /* SPECTRAL_H */ diff --git a/drivers/net/wireless/ath/ath10k/swap.c b/drivers/net/wireless/ath/ath10k/swap.c index adf4592374b4..e7f57efadae1 100644 --- a/drivers/net/wireless/ath/ath10k/swap.c +++ b/drivers/net/wireless/ath/ath10k/swap.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Qualcomm Atheros, Inc. + * Copyright (c) 2015-2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath10k/swap.h b/drivers/net/wireless/ath/ath10k/swap.h index f5dc0476493e..fa602f15fa93 100644 --- a/drivers/net/wireless/ath/ath10k/swap.h +++ b/drivers/net/wireless/ath/ath10k/swap.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Qualcomm Atheros, Inc. + * Copyright (c) 2015-2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h index 8bded5da9d0d..c2b5bad0459b 100644 --- a/drivers/net/wireless/ath/ath10k/targaddrs.h +++ b/drivers/net/wireless/ath/ath10k/targaddrs.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c index 9d3eb258ac2f..568810b41657 100644 --- a/drivers/net/wireless/ath/ath10k/testmode.c +++ b/drivers/net/wireless/ath/ath10k/testmode.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 Qualcomm Atheros, Inc. + * Copyright (c) 2014-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath10k/testmode_i.h b/drivers/net/wireless/ath/ath10k/testmode_i.h index 191a8f34c8ea..6514d1a14242 100644 --- a/drivers/net/wireless/ath/ath10k/testmode_i.h +++ b/drivers/net/wireless/ath/ath10k/testmode_i.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 Qualcomm Atheros, Inc. + * Copyright (c) 2014,2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c index ef717b631ff8..aa8978a8d751 100644 --- a/drivers/net/wireless/ath/ath10k/thermal.c +++ b/drivers/net/wireless/ath/ath10k/thermal.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 Qualcomm Atheros, Inc. + * Copyright (c) 2014-2015 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath10k/thermal.h b/drivers/net/wireless/ath/ath10k/thermal.h index 3abb97f63b1e..65e2419543f9 100644 --- a/drivers/net/wireless/ath/ath10k/thermal.h +++ b/drivers/net/wireless/ath/ath10k/thermal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 Qualcomm Atheros, Inc. + * Copyright (c) 2014-2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h index e0d00cef0bd8..e40edced1d82 100644 --- a/drivers/net/wireless/ath/ath10k/trace.h +++ b/drivers/net/wireless/ath/ath10k/trace.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index d4986f626c35..5b3b021526ab 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h index e7ea1ae1c438..2bf401e436d3 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.h +++ b/drivers/net/wireless/ath/ath10k/txrx.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2014,2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h index 2fc3f24ff1ca..14093cfdc505 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-ops.h +++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2014 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -377,6 +377,7 @@ ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); struct sk_buff *skb; int ret; + u32 mgmt_tx_cmdid; if (!ar->wmi.ops->gen_mgmt_tx) return -EOPNOTSUPP; @@ -385,7 +386,13 @@ ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) if (IS_ERR(skb)) return PTR_ERR(skb); - ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid); + if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF, + ar->running_fw->fw_file.fw_features)) + mgmt_tx_cmdid = ar->wmi.cmd->mgmt_tx_send_cmdid; + else + mgmt_tx_cmdid = ar->wmi.cmd->mgmt_tx_cmdid; + + ret = ath10k_wmi_cmd_send(ar, skb, mgmt_tx_cmdid); if (ret) return ret; diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 7616c1c4bbd3..ae77a007ae07 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2014 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -917,33 +917,69 @@ ath10k_wmi_tlv_parse_mem_reqs(struct ath10k *ar, u16 tag, u16 len, return -ENOMEM; } +struct wmi_tlv_svc_rdy_parse { + const struct hal_reg_capabilities *reg; + const struct wmi_tlv_svc_rdy_ev *ev; + const __le32 *svc_bmap; + const struct wlan_host_mem_req *mem_reqs; + bool svc_bmap_done; + bool dbs_hw_mode_done; +}; + +static int ath10k_wmi_tlv_svc_rdy_parse(struct ath10k *ar, u16 tag, u16 len, + const void *ptr, void *data) +{ + struct wmi_tlv_svc_rdy_parse *svc_rdy = data; + + switch (tag) { + case WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT: + svc_rdy->ev = ptr; + break; + case WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES: + svc_rdy->reg = ptr; + break; + case WMI_TLV_TAG_ARRAY_STRUCT: + svc_rdy->mem_reqs = ptr; + break; + case WMI_TLV_TAG_ARRAY_UINT32: + if (!svc_rdy->svc_bmap_done) { + svc_rdy->svc_bmap_done = true; + svc_rdy->svc_bmap = ptr; + } else if (!svc_rdy->dbs_hw_mode_done) { + svc_rdy->dbs_hw_mode_done = true; + } + break; + default: + break; + } + return 0; +} + static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb, struct wmi_svc_rdy_ev_arg *arg) { - const void **tb; const struct hal_reg_capabilities *reg; const struct wmi_tlv_svc_rdy_ev *ev; const __le32 *svc_bmap; const struct wlan_host_mem_req *mem_reqs; + struct wmi_tlv_svc_rdy_parse svc_rdy = { }; int ret; - tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); - if (IS_ERR(tb)) { - ret = PTR_ERR(tb); + ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len, + ath10k_wmi_tlv_svc_rdy_parse, &svc_rdy); + if (ret) { ath10k_warn(ar, "failed to parse tlv: %d\n", ret); return ret; } - ev = tb[WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT]; - reg = tb[WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES]; - svc_bmap = tb[WMI_TLV_TAG_ARRAY_UINT32]; - mem_reqs = tb[WMI_TLV_TAG_ARRAY_STRUCT]; + ev = svc_rdy.ev; + reg = svc_rdy.reg; + svc_bmap = svc_rdy.svc_bmap; + mem_reqs = svc_rdy.mem_reqs; - if (!ev || !reg || !svc_bmap || !mem_reqs) { - kfree(tb); + if (!ev || !reg || !svc_bmap || !mem_reqs) return -EPROTO; - } /* This is an internal ABI compatibility check for WMI TLV so check it * here instead of the generic WMI code. @@ -961,7 +997,6 @@ static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar, __le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 || __le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 || __le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) { - kfree(tb); return -ENOTSUPP; } @@ -982,12 +1017,10 @@ static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar, ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs), ath10k_wmi_tlv_parse_mem_reqs, arg); if (ret) { - kfree(tb); ath10k_warn(ar, "failed to parse mem_reqs tlv: %d\n", ret); return ret; } - kfree(tb); return 0; } @@ -1406,7 +1439,10 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar) cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); - cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS); + + cfg->num_peers = __cpu_to_le32(ar->hw_params.num_peers); + cfg->ast_skid_limit = __cpu_to_le32(ar->hw_params.ast_skid_limit); + cfg->num_wds_entries = __cpu_to_le32(ar->hw_params.num_wds_entries); if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) { cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); @@ -1418,7 +1454,6 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar) cfg->num_peer_keys = __cpu_to_le32(2); cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS); - cfg->ast_skid_limit = __cpu_to_le32(0x10); cfg->tx_chain_mask = __cpu_to_le32(0x7); cfg->rx_chain_mask = __cpu_to_le32(0x7); cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64); @@ -1434,7 +1469,6 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar) cfg->num_mcast_table_elems = __cpu_to_le32(0); cfg->mcast2ucast_mode = __cpu_to_le32(0); cfg->tx_dbg_log_size = __cpu_to_le32(0x400); - cfg->num_wds_entries = __cpu_to_le32(0x20); cfg->dma_burst_size = __cpu_to_le32(0); cfg->mac_aggr_delim = __cpu_to_le32(0); cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0); @@ -2450,6 +2484,80 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask) } static struct sk_buff * +ath10k_wmi_tlv_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) +{ + struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu); + struct wmi_tlv_mgmt_tx_cmd *cmd; + struct wmi_tlv *tlv; + struct ieee80211_hdr *hdr; + struct sk_buff *skb; + void *ptr; + int len; + u32 buf_len = msdu->len; + struct ath10k_vif *arvif; + dma_addr_t mgmt_frame_dma; + u32 vdev_id; + + if (!cb->vif) + return ERR_PTR(-EINVAL); + + hdr = (struct ieee80211_hdr *)msdu->data; + arvif = (void *)cb->vif->drv_priv; + vdev_id = arvif->vdev_id; + + if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control))) + return ERR_PTR(-EINVAL); + + len = sizeof(*cmd) + 2 * sizeof(*tlv); + + if ((ieee80211_is_action(hdr->frame_control) || + ieee80211_is_deauth(hdr->frame_control) || + ieee80211_is_disassoc(hdr->frame_control)) && + ieee80211_has_protected(hdr->frame_control)) { + len += IEEE80211_CCMP_MIC_LEN; + buf_len += IEEE80211_CCMP_MIC_LEN; + } + + buf_len = min_t(u32, buf_len, WMI_TLV_MGMT_TX_FRAME_MAX_LEN); + buf_len = round_up(buf_len, 4); + + len += buf_len; + len = round_up(len, 4); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->desc_id = 0; + cmd->chanfreq = 0; + cmd->buf_len = __cpu_to_le32(buf_len); + cmd->frame_len = __cpu_to_le32(msdu->len); + mgmt_frame_dma = dma_map_single(arvif->ar->dev, msdu->data, + msdu->len, DMA_TO_DEVICE); + if (!mgmt_frame_dma) + return ERR_PTR(-ENOMEM); + + cmd->paddr = __cpu_to_le64(mgmt_frame_dma); + + ptr += sizeof(*tlv); + ptr += sizeof(*cmd); + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); + tlv->len = __cpu_to_le16(buf_len); + + ptr += sizeof(*tlv); + memcpy(ptr, msdu->data, buf_len); + + return skb; +} + +static struct sk_buff * ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar, enum wmi_force_fw_hang_type type, u32 delay_ms) @@ -3258,6 +3366,7 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = { .bcn_filter_rx_cmdid = WMI_TLV_BCN_FILTER_RX_CMDID, .prb_req_filter_rx_cmdid = WMI_TLV_PRB_REQ_FILTER_RX_CMDID, .mgmt_tx_cmdid = WMI_TLV_MGMT_TX_CMDID, + .mgmt_tx_send_cmdid = WMI_TLV_MGMT_TX_SEND_CMD, .prb_tmpl_cmdid = WMI_TLV_PRB_TMPL_CMDID, .addba_clear_resp_cmdid = WMI_TLV_ADDBA_CLEAR_RESP_CMDID, .addba_send_cmdid = WMI_TLV_ADDBA_SEND_CMDID, @@ -3592,6 +3701,7 @@ static const struct wmi_ops wmi_tlv_ops = { .gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats, .gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang, /* .gen_mgmt_tx = not implemented; HTT is used */ + .gen_mgmt_tx = ath10k_wmi_tlv_op_gen_mgmt_tx, .gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg, .gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable, .gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable, diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h index 22cf011e839a..da89128e8dd6 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2014 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -22,6 +22,7 @@ #define WMI_TLV_CMD_UNSUPPORTED 0 #define WMI_TLV_PDEV_PARAM_UNSUPPORTED 0 #define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0 +#define WMI_TLV_MGMT_TX_FRAME_MAX_LEN 64 enum wmi_tlv_grp_id { WMI_TLV_GRP_START = 0x3, @@ -132,6 +133,7 @@ enum wmi_tlv_cmd_id { WMI_TLV_PRB_REQ_FILTER_RX_CMDID, WMI_TLV_MGMT_TX_CMDID, WMI_TLV_PRB_TMPL_CMDID, + WMI_TLV_MGMT_TX_SEND_CMD, WMI_TLV_ADDBA_CLEAR_RESP_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_BA_NEG), WMI_TLV_ADDBA_SEND_CMDID, WMI_TLV_ADDBA_STATUS_CMDID, @@ -890,6 +892,63 @@ enum wmi_tlv_tag { WMI_TLV_TAG_STRUCT_SAP_OFL_DEL_STA_EVENT, WMI_TLV_TAG_STRUCT_APFIND_CMD_PARAM, WMI_TLV_TAG_STRUCT_APFIND_EVENT_HDR, + WMI_TLV_TAG_STRUCT_OCB_SET_SCHED_CMD, + WMI_TLV_TAG_STRUCT_OCB_SET_SCHED_EVENT, + WMI_TLV_TAG_STRUCT_OCB_SET_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_OCB_SET_CONFIG_RESP_EVENT, + WMI_TLV_TAG_STRUCT_OCB_SET_UTC_TIME_CMD, + WMI_TLV_TAG_STRUCT_OCB_START_TIMING_ADVERT_CMD, + WMI_TLV_TAG_STRUCT_OCB_STOP_TIMING_ADVERT_CMD, + WMI_TLV_TAG_STRUCT_OCB_GET_TSF_TIMER_CMD, + WMI_TLV_TAG_STRUCT_OCB_GET_TSF_TIMER_RESP_EVENT, + WMI_TLV_TAG_STRUCT_DCC_GET_STATS_CMD, + WMI_TLV_TAG_STRUCT_DCC_CHANNEL_STATS_REQUEST, + WMI_TLV_TAG_STRUCT_DCC_GET_STATS_RESP_EVENT, + WMI_TLV_TAG_STRUCT_DCC_CLEAR_STATS_CMD, + WMI_TLV_TAG_STRUCT_DCC_UPDATE_NDL_CMD, + WMI_TLV_TAG_STRUCT_DCC_UPDATE_NDL_RESP_EVENT, + WMI_TLV_TAG_STRUCT_DCC_STATS_EVENT, + WMI_TLV_TAG_STRUCT_OCB_CHANNEL, + WMI_TLV_TAG_STRUCT_OCB_SCHEDULE_ELEMENT, + WMI_TLV_TAG_STRUCT_DCC_NDL_STATS_PER_CHANNEL, + WMI_TLV_TAG_STRUCT_DCC_NDL_CHAN, + WMI_TLV_TAG_STRUCT_QOS_PARAMETER, + WMI_TLV_TAG_STRUCT_DCC_NDL_ACTIVE_STATE_CONFIG, + WMI_TLV_TAG_STRUCT_ROAM_SCAN_EXTENDED_THRESHOLD_PARAM, + WMI_TLV_TAG_STRUCT_ROAM_FILTER_FIXED_PARAM, + WMI_TLV_TAG_STRUCT_PASSPOINT_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_PASSPOINT_EVENT_HDR, + WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_HOTLIST_SSID_MONITOR_CMD, + WMI_TLV_TAG_STRUCT_EXTSCAN_HOTLIST_SSID_MATCH_EVENT, + WMI_TLV_TAG_STRUCT_VDEV_TSF_TSTAMP_ACTION_CMD, + WMI_TLV_TAG_STRUCT_VDEV_TSF_REPORT_EVENT, + WMI_TLV_TAG_STRUCT_GET_FW_MEM_DUMP, + WMI_TLV_TAG_STRUCT_UPDATE_FW_MEM_DUMP, + WMI_TLV_TAG_STRUCT_FW_MEM_DUMP_PARAMS, + WMI_TLV_TAG_STRUCT_DEBUG_MESG_FLUSH, + WMI_TLV_TAG_STRUCT_DEBUG_MESG_FLUSH_COMPLETE, + WMI_TLV_TAG_STRUCT_PEER_SET_RATE_REPORT_CONDITION, + WMI_TLV_TAG_STRUCT_ROAM_SUBNET_CHANGE_CONFIG, + WMI_TLV_TAG_STRUCT_VDEV_SET_IE_CMD, + WMI_TLV_TAG_STRUCT_RSSI_BREACH_MONITOR_CONFIG, + WMI_TLV_TAG_STRUCT_RSSI_BREACH_EVENT, + WMI_TLV_TAG_STRUCT_EVENT_INITIAL_WAKEUP, + WMI_TLV_TAG_STRUCT_SOC_SET_PCL_CMD, + WMI_TLV_TAG_STRUCT_SOC_SET_HW_MODE_CMD, + WMI_TLV_TAG_STRUCT_SOC_SET_HW_MODE_RESPONSE_EVENT, + WMI_TLV_TAG_STRUCT_SOC_HW_MODE_TRANSITION_EVENT, + WMI_TLV_TAG_STRUCT_VDEV_TXRX_STREAMS, + WMI_TLV_TAG_STRUCT_SOC_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY, + WMI_TLV_TAG_STRUCT_SOC_SET_DUAL_MAC_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_SOC_SET_DUAL_MAC_CONFIG_RESPONSE_EVENT, + WMI_TLV_TAG_STRUCT_IOAC_SOCK_PATTERN_T, + WMI_TLV_TAG_STRUCT_WOW_ENABLE_ICMPV6_NA_FLT_CMD, + WMI_TLV_TAG_STRUCT_DIAG_EVENT_LOG_CONFIG, + WMI_TLV_TAG_STRUCT_DIAG_EVENT_LOG_SUPPORTED_EVENT, + WMI_TLV_TAG_STRUCT_PACKET_FILTER_CONFIG, + WMI_TLV_TAG_STRUCT_PACKET_FILTER_ENABLE, + WMI_TLV_TAG_STRUCT_SAP_SET_BLACKLIST_PARAM_CMD, + WMI_TLV_TAG_STRUCT_MGMT_TX_CMD, WMI_TLV_TAG_MAX }; @@ -965,6 +1024,50 @@ enum wmi_tlv_service { WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT, WMI_TLV_SERVICE_MDNS_OFFLOAD, WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD, + WMI_TLV_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT, + WMI_TLV_SERVICE_OCB, + WMI_TLV_SERVICE_AP_ARPNS_OFFLOAD, + WMI_TLV_SERVICE_PER_BAND_CHAINMASK_SUPPORT, + WMI_TLV_SERVICE_PACKET_FILTER_OFFLOAD, + WMI_TLV_SERVICE_MGMT_TX_HTT, + WMI_TLV_SERVICE_MGMT_TX_WMI, + WMI_TLV_SERVICE_EXT_MSG, + WMI_TLV_SERVICE_MAWC, + WMI_TLV_SERVICE_PEER_ASSOC_CONF, + WMI_TLV_SERVICE_EGAP, + WMI_TLV_SERVICE_STA_PMF_OFFLOAD, + WMI_TLV_SERVICE_UNIFIED_WOW_CAPABILITY, + WMI_TLV_SERVICE_ENHANCED_PROXY_STA, + WMI_TLV_SERVICE_ATF, + WMI_TLV_SERVICE_COEX_GPIO, + WMI_TLV_SERVICE_AUX_SPECTRAL_INTF, + WMI_TLV_SERVICE_AUX_CHAN_LOAD_INTF, + WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64, + WMI_TLV_SERVICE_ENTERPRISE_MESH, + WMI_TLV_SERVICE_RESTRT_CHNL_SUPPORT, + WMI_TLV_SERVICE_BPF_OFFLOAD, + WMI_TLV_SERVICE_SYNC_DELETE_CMDS, + WMI_TLV_SERVICE_SMART_ANTENNA_SW_SUPPORT, + WMI_TLV_SERVICE_SMART_ANTENNA_HW_SUPPORT, + WMI_TLV_SERVICE_RATECTRL_LIMIT_MAX_MIN_RATES, + WMI_TLV_SERVICE_NAN_DATA, + WMI_TLV_SERVICE_NAN_RTT, + WMI_TLV_SERVICE_11AX, + WMI_TLV_SERVICE_DEPRECATED_REPLACE, + WMI_TLV_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE, + WMI_TLV_SERVICE_ENHANCED_MCAST_FILTER, + WMI_TLV_SERVICE_PERIODIC_CHAN_STAT_SUPPORT, + WMI_TLV_SERVICE_MESH_11S, + WMI_TLV_SERVICE_HALF_RATE_QUARTER_RATE_SUPPORT, + WMI_TLV_SERVICE_VDEV_RX_FILTER, + WMI_TLV_SERVICE_P2P_LISTEN_OFFLOAD_SUPPORT, + WMI_TLV_SERVICE_MARK_FIRST_WAKEUP_PACKET, + WMI_TLV_SERVICE_MULTIPLE_MCAST_FILTER_SET, + WMI_TLV_SERVICE_HOST_MANAGED_RX_REORDER, + WMI_TLV_SERVICE_FLASH_RDWR_SUPPORT, + WMI_TLV_SERVICE_WLAN_STATS_REPORT, + WMI_TLV_SERVICE_TX_MSDU_ID_NEW_PARTITION_SUPPORT, + WMI_TLV_SERVICE_DFS_PHYERR_OFFLOAD, }; #define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \ @@ -1121,6 +1224,8 @@ wmi_tlv_svc_map(const __le32 *in, unsigned long *out, size_t len) WMI_SERVICE_MDNS_OFFLOAD, len); SVCMAP(WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD, WMI_SERVICE_SAP_AUTH_OFFLOAD, len); + SVCMAP(WMI_TLV_SERVICE_MGMT_TX_WMI, + WMI_SERVICE_MGMT_TX_WMI, len); } #undef SVCMAP @@ -1643,4 +1748,12 @@ struct wmi_tlv_tx_pause_ev { void ath10k_wmi_tlv_attach(struct ath10k *ar); +struct wmi_tlv_mgmt_tx_cmd { + __le32 vdev_id; + __le32 desc_id; + __le32 chanfreq; + __le64 paddr; + __le32 frame_len; + __le32 buf_len; +} __packed; #endif diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index cad2e42dcef6..58dc2189ba49 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -29,6 +29,7 @@ #include "p2p.h" #include "hw.h" #include "hif.h" +#include "txrx.h" #define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9 #define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ) @@ -4456,6 +4457,74 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) __le32_to_cpu(ev->rate_max)); } +static void +ath10k_wmi_handle_tdls_peer_event(struct ath10k *ar, struct sk_buff *skb) +{ + struct wmi_tdls_peer_event *ev; + struct ath10k_peer *peer; + struct ath10k_vif *arvif; + int vdev_id; + int peer_status; + int peer_reason; + u8 reason; + + if (skb->len < sizeof(*ev)) { + ath10k_err(ar, "received tdls peer event with invalid size (%d bytes)\n", + skb->len); + return; + } + + ev = (struct wmi_tdls_peer_event *)skb->data; + vdev_id = __le32_to_cpu(ev->vdev_id); + peer_status = __le32_to_cpu(ev->peer_status); + peer_reason = __le32_to_cpu(ev->peer_reason); + + spin_lock_bh(&ar->data_lock); + peer = ath10k_peer_find(ar, vdev_id, ev->peer_macaddr.addr); + spin_unlock_bh(&ar->data_lock); + + if (!peer) { + ath10k_warn(ar, "failed to find peer entry for %pM\n", + ev->peer_macaddr.addr); + return; + } + + switch (peer_status) { + case WMI_TDLS_SHOULD_TEARDOWN: + switch (peer_reason) { + case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT: + case WMI_TDLS_TEARDOWN_REASON_NO_RESPONSE: + case WMI_TDLS_TEARDOWN_REASON_RSSI: + reason = WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE; + break; + default: + reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED; + break; + } + + arvif = ath10k_get_arvif(ar, vdev_id); + if (!arvif) { + ath10k_warn(ar, "received tdls peer event for invalid vdev id %u\n", + vdev_id); + return; + } + + ieee80211_tdls_oper_request(arvif->vif, ev->peer_macaddr.addr, + NL80211_TDLS_TEARDOWN, reason, + GFP_ATOMIC); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "received tdls teardown event for peer %pM reason %u\n", + ev->peer_macaddr.addr, peer_reason); + break; + default: + ath10k_dbg(ar, ATH10K_DBG_WMI, + "received unknown tdls peer event %u\n", + peer_status); + break; + } +} + void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb) { ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n"); @@ -5477,6 +5546,9 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb) case WMI_10_4_PDEV_TPC_CONFIG_EVENTID: ath10k_wmi_event_pdev_tpc_config(ar, skb); break; + case WMI_10_4_TDLS_PEER_EVENTID: + ath10k_wmi_handle_tdls_peer_event(ar, skb); + break; default: ath10k_warn(ar, "Unknown eventid: %d\n", id); break; diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index c02b21cff38d..c7b30ed9015d 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. - * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -195,6 +195,8 @@ enum wmi_service { WMI_SERVICE_SMART_LOGGING_SUPPORT, WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE, WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, + WMI_SERVICE_MGMT_TX_WMI, + WMI_SERVICE_TDLS_WIDER_BANDWIDTH, /* keep last */ WMI_SERVICE_MAX, @@ -336,6 +338,7 @@ enum wmi_10_4_service { WMI_10_4_SERVICE_TDLS_UAPSD_SLEEP_STA, WMI_10_4_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE, WMI_10_4_SERVICE_TDLS_EXPLICIT_MODE_ONLY, + WMI_10_4_SERVICE_TDLS_WIDER_BANDWIDTH, }; static inline char *wmi_service_name(int service_id) @@ -444,6 +447,7 @@ static inline char *wmi_service_name(int service_id) SVCSTR(WMI_SERVICE_SMART_LOGGING_SUPPORT); SVCSTR(WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE); SVCSTR(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY); + SVCSTR(WMI_SERVICE_TDLS_WIDER_BANDWIDTH); default: return NULL; } @@ -740,6 +744,8 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out, WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE, len); SVCMAP(WMI_10_4_SERVICE_TDLS_EXPLICIT_MODE_ONLY, WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, len); + SVCMAP(WMI_10_4_SERVICE_TDLS_WIDER_BANDWIDTH, + WMI_SERVICE_TDLS_WIDER_BANDWIDTH, len); } #undef SVCMAP @@ -797,6 +803,7 @@ struct wmi_cmd_map { u32 bcn_filter_rx_cmdid; u32 prb_req_filter_rx_cmdid; u32 mgmt_tx_cmdid; + u32 mgmt_tx_send_cmdid; u32 prb_tmpl_cmdid; u32 addba_clear_resp_cmdid; u32 addba_send_cmdid; @@ -2922,7 +2929,7 @@ struct wmi_ext_resource_config_10_4_cmd { __le32 max_tdls_concurrent_buffer_sta; }; -/* strucutre describing host memory chunk. */ +/* structure describing host memory chunk. */ struct host_memory_chunk { /* id of the request that is passed up in service ready */ __le32 req_id; @@ -5236,7 +5243,8 @@ enum wmi_10_4_vdev_param { #define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3) #define WMI_TXBF_STS_CAP_OFFSET_LSB 4 -#define WMI_TXBF_STS_CAP_OFFSET_MASK 0xf0 +#define WMI_TXBF_STS_CAP_OFFSET_MASK 0x70 +#define WMI_TXBF_CONF_IMPLICIT_BF BIT(7) #define WMI_BF_SOUND_DIM_OFFSET_LSB 8 #define WMI_BF_SOUND_DIM_OFFSET_MASK 0xf00 diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c index 0d46d6dc7578..c4cbccb29b31 100644 --- a/drivers/net/wireless/ath/ath10k/wow.c +++ b/drivers/net/wireless/ath/ath10k/wow.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Qualcomm Atheros, Inc. + * Copyright (c) 2015-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath10k/wow.h b/drivers/net/wireless/ath/ath10k/wow.h index 9745b9ddc7f5..6e810105b775 100644 --- a/drivers/net/wireless/ath/ath10k/wow.h +++ b/drivers/net/wireless/ath/ath10k/wow.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Qualcomm Atheros, Inc. + * Copyright (c) 2015,2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index b53eb2b85f02..2ba8cf3f38af 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -2766,7 +2766,6 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev, struct ieee80211_mgmt *mgmt; bool hidden = false; u8 *ies; - int ies_len; struct wmi_connect_cmd p; int res; int i, ret; @@ -2804,7 +2803,6 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev, ies = mgmt->u.beacon.variable; if (ies > info->beacon.head + info->beacon.head_len) return -EINVAL; - ies_len = info->beacon.head + info->beacon.head_len - ies; if (info->ssid == NULL) return -EINVAL; diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c index 1379906bf849..8da9506f8c2b 100644 --- a/drivers/net/wireless/ath/ath6kl/txrx.c +++ b/drivers/net/wireless/ath/ath6kl/txrx.c @@ -1001,7 +1001,7 @@ static void aggr_slice_amsdu(struct aggr_info *p_aggr, while (amsdu_len > mac_hdr_len) { hdr = (struct ethhdr *) framep; - payload_8023_len = ntohs(hdr->h_proto); + payload_8023_len = be16_to_cpu(hdr->h_proto); if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN || payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) { diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig index 783a38f1a626..1f3523019509 100644 --- a/drivers/net/wireless/ath/ath9k/Kconfig +++ b/drivers/net/wireless/ath/ath9k/Kconfig @@ -61,13 +61,12 @@ config ATH9K_DEBUGFS depends on ATH9K && DEBUG_FS select MAC80211_DEBUGFS select ATH9K_COMMON_DEBUG - select RELAY ---help--- Say Y, if you need access to ath9k's statistics for interrupts, rate control, etc. - Also required for changing debug message flags at run time. - As well as access to the FFT/spectral data and TX99. + Also required for changing debug message flags at run time and for + TX99. config ATH9K_STATION_STATISTICS bool "Detailed station statistics" @@ -177,7 +176,6 @@ config ATH9K_HTC_DEBUGFS bool "Atheros ath9k_htc debugging" depends on ATH9K_HTC && DEBUG_FS select ATH9K_COMMON_DEBUG - select RELAY ---help--- Say Y, if you need access to ath9k_htc's statistics. As well as access to the FFT/spectral data. @@ -192,3 +190,11 @@ config ATH9K_HWRNG Say Y, feeds the entropy directly from the WiFi driver to the input pool. + +config ATH9K_COMMON_SPECTRAL + bool "Atheros ath9k/ath9k_htc spectral scan support" + depends on ATH9K_DEBUGFS || ATH9K_HTC_DEBUGFS + select RELAY + default n + ---help--- + Say Y to enable access to the FFT/spectral data via debugfs. diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile index d804ce7391a0..f71b2ad8275c 100644 --- a/drivers/net/wireless/ath/ath9k/Makefile +++ b/drivers/net/wireless/ath/ath9k/Makefile @@ -62,8 +62,8 @@ ath9k_common-y:= common.o \ common-init.o \ common-beacon.o \ -ath9k_common-$(CONFIG_ATH9K_COMMON_DEBUG) += common-debug.o \ - common-spectral.o +ath9k_common-$(CONFIG_ATH9K_COMMON_DEBUG) += common-debug.o +ath9k_common-$(CONFIG_ATH9K_COMMON_SPECTRAL) += common-spectral.o ath9k_htc-y += htc_hst.o \ hif_usb.o \ diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index c2e210c0a770..f019a20e5a1f 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -3310,6 +3310,12 @@ static int ar9300_eeprom_restore_internal(struct ath_hw *ah, if (ar9300_check_eeprom_header(ah, read, cptr)) goto found; + cptr = AR9300_BASE_ADDR_4K; + ath_dbg(common, EEPROM, "Trying EEPROM access at Address 0x%04x\n", + cptr); + if (ar9300_check_eeprom_header(ah, read, cptr)) + goto found; + cptr = AR9300_BASE_ADDR_512; ath_dbg(common, EEPROM, "Trying EEPROM access at Address 0x%04x\n", cptr); @@ -3430,6 +3436,60 @@ static u32 ar9003_dump_modal_eeprom(char *buf, u32 len, u32 size, return len; } +static u32 ar9003_dump_cal_data(struct ath_hw *ah, char *buf, u32 len, u32 size, + bool is_2g) +{ + struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; + struct ar9300_base_eep_hdr *pBase; + struct ar9300_cal_data_per_freq_op_loop *cal_pier; + int cal_pier_nr; + int freq; + int i, j; + + pBase = &eep->baseEepHeader; + + if (is_2g) + cal_pier_nr = AR9300_NUM_2G_CAL_PIERS; + else + cal_pier_nr = AR9300_NUM_5G_CAL_PIERS; + + for (i = 0; i < AR9300_MAX_CHAINS; i++) { + if (!((pBase->txrxMask >> i) & 1)) + continue; + + len += snprintf(buf + len, size - len, "Chain %d\n", i); + + len += snprintf(buf + len, size - len, + "Freq\t ref\tvolt\ttemp\tnf_cal\tnf_pow\trx_temp\n"); + + for (j = 0; j < cal_pier_nr; j++) { + if (is_2g) { + cal_pier = &eep->calPierData2G[i][j]; + freq = 2300 + eep->calFreqPier2G[j]; + } else { + cal_pier = &eep->calPierData5G[i][j]; + freq = 4800 + eep->calFreqPier5G[j] * 5; + } + + len += snprintf(buf + len, size - len, + "%d\t", freq); + + len += snprintf(buf + len, size - len, + "%d\t%d\t%d\t%d\t%d\t%d\n", + cal_pier->refPower, + cal_pier->voltMeas, + cal_pier->tempMeas, + cal_pier->rxTempMeas ? + N2DBM(cal_pier->rxNoisefloorCal) : 0, + cal_pier->rxTempMeas ? + N2DBM(cal_pier->rxNoisefloorPower) : 0, + cal_pier->rxTempMeas); + } + } + + return len; +} + static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, u8 *buf, u32 len, u32 size) { @@ -3441,10 +3501,18 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, "%20s :\n", "2GHz modal Header"); len = ar9003_dump_modal_eeprom(buf, len, size, &eep->modalHeader2G); - len += scnprintf(buf + len, size - len, + + len += scnprintf(buf + len, size - len, "Calibration data\n"); + len = ar9003_dump_cal_data(ah, buf, len, size, true); + + len += snprintf(buf + len, size - len, "%20s :\n", "5GHz modal Header"); len = ar9003_dump_modal_eeprom(buf, len, size, &eep->modalHeader5G); + + len += snprintf(buf + len, size - len, "Calibration data\n"); + len = ar9003_dump_cal_data(ah, buf, len, size, false); + goto out; } @@ -4683,7 +4751,8 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah, int ichain, int *pfrequency, int *pcorrection, - int *ptemperature, int *pvoltage) + int *ptemperature, int *pvoltage, + int *pnf_cal, int *pnf_power) { u8 *pCalPier; struct ar9300_cal_data_per_freq_op_loop *pCalPierStruct; @@ -4725,6 +4794,10 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah, *pcorrection = pCalPierStruct->refPower; *ptemperature = pCalPierStruct->tempMeas; *pvoltage = pCalPierStruct->voltMeas; + *pnf_cal = pCalPierStruct->rxTempMeas ? + N2DBM(pCalPierStruct->rxNoisefloorCal) : 0; + *pnf_power = pCalPierStruct->rxTempMeas ? + N2DBM(pCalPierStruct->rxNoisefloorPower) : 0; return 0; } @@ -4889,14 +4962,18 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency) int mode; int lfrequency[AR9300_MAX_CHAINS], lcorrection[AR9300_MAX_CHAINS], - ltemperature[AR9300_MAX_CHAINS], lvoltage[AR9300_MAX_CHAINS]; + ltemperature[AR9300_MAX_CHAINS], lvoltage[AR9300_MAX_CHAINS], + lnf_cal[AR9300_MAX_CHAINS], lnf_pwr[AR9300_MAX_CHAINS]; int hfrequency[AR9300_MAX_CHAINS], hcorrection[AR9300_MAX_CHAINS], - htemperature[AR9300_MAX_CHAINS], hvoltage[AR9300_MAX_CHAINS]; + htemperature[AR9300_MAX_CHAINS], hvoltage[AR9300_MAX_CHAINS], + hnf_cal[AR9300_MAX_CHAINS], hnf_pwr[AR9300_MAX_CHAINS]; int fdiff; int correction[AR9300_MAX_CHAINS], - voltage[AR9300_MAX_CHAINS], temperature[AR9300_MAX_CHAINS]; - int pfrequency, pcorrection, ptemperature, pvoltage; + voltage[AR9300_MAX_CHAINS], temperature[AR9300_MAX_CHAINS], + nf_cal[AR9300_MAX_CHAINS], nf_pwr[AR9300_MAX_CHAINS]; + int pfrequency, pcorrection, ptemperature, pvoltage, + pnf_cal, pnf_pwr; struct ath_common *common = ath9k_hw_common(ah); mode = (frequency >= 4000); @@ -4914,7 +4991,8 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency) for (ipier = 0; ipier < npier; ipier++) { if (!ar9003_hw_cal_pier_get(ah, mode, ipier, ichain, &pfrequency, &pcorrection, - &ptemperature, &pvoltage)) { + &ptemperature, &pvoltage, + &pnf_cal, &pnf_pwr)) { fdiff = frequency - pfrequency; /* @@ -4936,6 +5014,8 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency) htemperature[ichain] = ptemperature; hvoltage[ichain] = pvoltage; + hnf_cal[ichain] = pnf_cal; + hnf_pwr[ichain] = pnf_pwr; } } if (fdiff >= 0) { @@ -4952,6 +5032,8 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency) ltemperature[ichain] = ptemperature; lvoltage[ichain] = pvoltage; + lnf_cal[ichain] = pnf_cal; + lnf_pwr[ichain] = pnf_pwr; } } } @@ -4960,15 +5042,20 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency) /* interpolate */ for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) { - ath_dbg(common, EEPROM, "ch=%d f=%d low=%d %d h=%d %d\n", + ath_dbg(common, EEPROM, + "ch=%d f=%d low=%d %d h=%d %d n=%d %d p=%d %d\n", ichain, frequency, lfrequency[ichain], lcorrection[ichain], hfrequency[ichain], - hcorrection[ichain]); + hcorrection[ichain], lnf_cal[ichain], + hnf_cal[ichain], lnf_pwr[ichain], + hnf_pwr[ichain]); /* they're the same, so just pick one */ if (hfrequency[ichain] == lfrequency[ichain]) { correction[ichain] = lcorrection[ichain]; voltage[ichain] = lvoltage[ichain]; temperature[ichain] = ltemperature[ichain]; + nf_cal[ichain] = lnf_cal[ichain]; + nf_pwr[ichain] = lnf_pwr[ichain]; } /* the low frequency is good */ else if (frequency - lfrequency[ichain] < 1000) { @@ -4992,12 +5079,26 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency) hfrequency[ichain], lvoltage[ichain], hvoltage[ichain]); + + nf_cal[ichain] = interpolate(frequency, + lfrequency[ichain], + hfrequency[ichain], + lnf_cal[ichain], + hnf_cal[ichain]); + + nf_pwr[ichain] = interpolate(frequency, + lfrequency[ichain], + hfrequency[ichain], + lnf_pwr[ichain], + hnf_pwr[ichain]); } /* only low is good, use it */ else { correction[ichain] = lcorrection[ichain]; temperature[ichain] = ltemperature[ichain]; voltage[ichain] = lvoltage[ichain]; + nf_cal[ichain] = lnf_cal[ichain]; + nf_pwr[ichain] = lnf_pwr[ichain]; } } /* only high is good, use it */ @@ -5005,10 +5106,14 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency) correction[ichain] = hcorrection[ichain]; temperature[ichain] = htemperature[ichain]; voltage[ichain] = hvoltage[ichain]; + nf_cal[ichain] = hnf_cal[ichain]; + nf_pwr[ichain] = hnf_pwr[ichain]; } else { /* nothing is good, presume 0???? */ correction[ichain] = 0; temperature[ichain] = 0; voltage[ichain] = 0; + nf_cal[ichain] = 0; + nf_pwr[ichain] = 0; } } @@ -5019,6 +5124,16 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency) "for frequency=%d, calibration correction = %d %d %d\n", frequency, correction[0], correction[1], correction[2]); + /* Store calibrated noise floor values */ + for (ichain = 0; ichain < AR5416_MAX_CHAINS; ichain++) + if (mode) { + ah->nf_5g.cal[ichain] = nf_cal[ichain]; + ah->nf_5g.pwr[ichain] = nf_pwr[ichain]; + } else { + ah->nf_2g.cal[ichain] = nf_cal[ichain]; + ah->nf_2g.pwr[ichain] = nf_pwr[ichain]; + } + return 0; } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h index bd2269c7de6b..e8fda54acfe3 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h @@ -62,6 +62,16 @@ */ #define AR9300_PWR_TABLE_OFFSET 0 +/* Noise power data definitions + * units are: 4 x dBm - NOISE_PWR_DATA_OFFSET + * (e.g. -25 = (-25/4 - 90) = -96.25 dBm) + * range (for 6 signed bits) is (-32 to 31) + offset => -122dBm to -59dBm + * resolution (2 bits) is 0.25dBm + */ +#define NOISE_PWR_DATA_OFFSET -90 +#define NOISE_PWR_DBM_2_INT(_p) ((((_p) + 3) >> 2) + NOISE_PWR_DATA_OFFSET) +#define N2DBM(_p) NOISE_PWR_DBM_2_INT(_p) + /* byte addressable */ #define AR9300_EEPROM_SIZE (16*1024) diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c index 13ab6bc46775..3d9447e21025 100644 --- a/drivers/net/wireless/ath/ath9k/calib.c +++ b/drivers/net/wireless/ath/ath9k/calib.c @@ -58,19 +58,25 @@ static struct ath_nf_limits *ath9k_hw_get_nf_limits(struct ath_hw *ah, } static s16 ath9k_hw_get_default_nf(struct ath_hw *ah, - struct ath9k_channel *chan) + struct ath9k_channel *chan, + int chain) { - return ath9k_hw_get_nf_limits(ah, chan)->nominal; + s16 calib_nf = ath9k_hw_get_nf_limits(ah, chan)->cal[chain]; + + if (calib_nf) + return calib_nf; + else + return ath9k_hw_get_nf_limits(ah, chan)->nominal; } s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan, s16 nf) { - s8 noise = ATH_DEFAULT_NOISE_FLOOR; + s8 noise = ath9k_hw_get_default_nf(ah, chan, 0); if (nf) { s8 delta = nf - ATH9K_NF_CAL_NOISE_THRESH - - ath9k_hw_get_default_nf(ah, chan); + ath9k_hw_get_default_nf(ah, chan, 0); if (delta > 0) noise += delta; } @@ -240,7 +246,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) unsigned i, j; u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask; struct ath_common *common = ath9k_hw_common(ah); - s16 default_nf = ath9k_hw_get_default_nf(ah, chan); + s16 default_nf = ath9k_hw_get_nf_limits(ah, chan)->nominal; u32 bb_agc_ctl = REG_READ(ah, AR_PHY_AGC_CONTROL); if (ah->caldata) @@ -258,8 +264,13 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) nfval = ah->nf_override; else if (h) nfval = h[i].privNF; - else - nfval = default_nf; + else { + /* Try to get calibrated noise floor value */ + nfval = + ath9k_hw_get_nf_limits(ah, chan)->cal[i]; + if (nfval > -60 || nfval < -127) + nfval = default_nf; + } REG_RMW(ah, ah->nf_regs[i], (((u32) nfval << 1) & 0x1ff), 0x1ff); @@ -429,20 +440,19 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah, struct ath9k_channel *chan) { struct ath9k_nfcal_hist *h; - s16 default_nf; - int i, j; + int i, j, k = 0; ah->caldata->channel = chan->channel; ah->caldata->channelFlags = chan->channelFlags; h = ah->caldata->nfCalHist; - default_nf = ath9k_hw_get_default_nf(ah, chan); for (i = 0; i < NUM_NF_READINGS; i++) { h[i].currIndex = 0; - h[i].privNF = default_nf; + h[i].privNF = ath9k_hw_get_default_nf(ah, chan, k); h[i].invalidNFcount = AR_PHY_CCA_FILTERWINDOW_LENGTH; - for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) { - h[i].nfCalBuffer[j] = default_nf; - } + for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) + h[i].nfCalBuffer[j] = h[i].privNF; + if (++k >= AR5416_MAX_CHAINS) + k = 0; } } diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.h b/drivers/net/wireless/ath/ath9k/common-spectral.h index 5d1a51d83aa6..303ab470ce34 100644 --- a/drivers/net/wireless/ath/ath9k/common-spectral.h +++ b/drivers/net/wireless/ath/ath9k/common-spectral.h @@ -151,7 +151,7 @@ static inline u8 spectral_bitmap_weight(u8 *bins) return bins[0] & 0x3f; } -#ifdef CONFIG_ATH9K_COMMON_DEBUG +#ifdef CONFIG_ATH9K_COMMON_SPECTRAL void ath9k_cmn_spectral_init_debug(struct ath_spec_scan_priv *spec_priv, struct dentry *debugfs_phy); void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv); @@ -183,6 +183,6 @@ static inline int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, { return 0; } -#endif /* CONFIG_ATH9K_COMMON_DEBUG */ +#endif /* CONFIG_ATH9K_COMMON_SPECTRAL */ #endif /* SPECTRAL_H */ diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c index 40a397fd0e0e..6fee9a464cce 100644 --- a/drivers/net/wireless/ath/ath9k/dfs.c +++ b/drivers/net/wireless/ath/ath9k/dfs.c @@ -123,11 +123,9 @@ static bool ath9k_check_chirping(struct ath_softc *sc, u8 *data, fft = (struct ath9k_dfs_fft_40 *) (data + 2); ath_dbg(common, DFS, "fixing datalen by 2\n"); } - if (IS_CHAN_HT40MINUS(ah->curchan)) { - int temp = is_ctl; - is_ctl = is_ext; - is_ext = temp; - } + if (IS_CHAN_HT40MINUS(ah->curchan)) + swap(is_ctl, is_ext); + for (i = 0; i < FFT_NUM_SAMPLES; i++) max_bin[i] = ath9k_get_max_index_ht40(fft + i, is_ctl, is_ext); diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index f808e5833d7e..a82ad739ab80 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -1683,6 +1683,10 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw, ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; case IEEE80211_AMPDU_TX_OPERATIONAL: + if (tid >= ATH9K_HTC_MAX_TID) { + ret = -EINVAL; + break; + } ista = (struct ath9k_htc_sta *) sta->drv_priv; spin_lock_bh(&priv->tx.tx_lock); ista->tid_state[tid] = AGGR_OPERATIONAL; diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 8c5c2dd8fa7f..cd0f023ccf77 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -922,6 +922,7 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah, AR_IMR_RXERR | AR_IMR_RXORN | AR_IMR_BCNMISC; + u32 msi_cfg = 0; if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) || AR_SREV_9561(ah)) @@ -929,22 +930,30 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah, if (AR_SREV_9300_20_OR_LATER(ah)) { imr_reg |= AR_IMR_RXOK_HP; - if (ah->config.rx_intr_mitigation) + if (ah->config.rx_intr_mitigation) { imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR; - else + msi_cfg |= AR_INTCFG_MSI_RXINTM | AR_INTCFG_MSI_RXMINTR; + } else { imr_reg |= AR_IMR_RXOK_LP; - + msi_cfg |= AR_INTCFG_MSI_RXOK; + } } else { - if (ah->config.rx_intr_mitigation) + if (ah->config.rx_intr_mitigation) { imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR; - else + msi_cfg |= AR_INTCFG_MSI_RXINTM | AR_INTCFG_MSI_RXMINTR; + } else { imr_reg |= AR_IMR_RXOK; + msi_cfg |= AR_INTCFG_MSI_RXOK; + } } - if (ah->config.tx_intr_mitigation) + if (ah->config.tx_intr_mitigation) { imr_reg |= AR_IMR_TXINTM | AR_IMR_TXMINTR; - else + msi_cfg |= AR_INTCFG_MSI_TXINTM | AR_INTCFG_MSI_TXMINTR; + } else { imr_reg |= AR_IMR_TXOK; + msi_cfg |= AR_INTCFG_MSI_TXOK; + } ENABLE_REGWRITE_BUFFER(ah); @@ -952,6 +961,16 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah, ah->imrs2_reg |= AR_IMR_S2_GTT; REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg); + if (ah->msi_enabled) { + ah->msi_reg = REG_READ(ah, AR_PCIE_MSI); + ah->msi_reg |= AR_PCIE_MSI_HW_DBI_WR_EN; + ah->msi_reg &= AR_PCIE_MSI_HW_INT_PENDING_ADDR_MSI_64; + REG_WRITE(ah, AR_INTCFG, msi_cfg); + ath_dbg(ath9k_hw_common(ah), ANY, + "value of AR_INTCFG=0x%X, msi_cfg=0x%X\n", + REG_READ(ah, AR_INTCFG), msi_cfg); + } + if (!AR_SREV_9100(ah)) { REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF); REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default); diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 4ac70827d142..9804a24a2dc0 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -754,6 +754,8 @@ struct ath_nf_limits { s16 max; s16 min; s16 nominal; + s16 cal[AR5416_MAX_CHAINS]; + s16 pwr[AR5416_MAX_CHAINS]; }; enum ath_cal_list { @@ -977,6 +979,9 @@ struct ath_hw { bool tpc_enabled; u8 tx_power[Ar5416RateSize]; u8 tx_power_stbc[Ar5416RateSize]; + bool msi_enabled; + u32 msi_mask; + u32 msi_reg; }; struct ath_bus_ops { diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index fa58a32227f5..e479fae5aab9 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -23,6 +23,7 @@ #include <linux/of.h> #include <linux/of_net.h> #include <linux/relay.h> +#include <linux/dmi.h> #include <net/ieee80211_radiotap.h> #include "ath9k.h" @@ -75,6 +76,10 @@ MODULE_PARM_DESC(use_chanctx, "Enable channel context for concurrency"); #endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */ +int ath9k_use_msi; +module_param_named(use_msi, ath9k_use_msi, int, 0444); +MODULE_PARM_DESC(use_msi, "Use MSI instead of INTx if possible"); + bool is_ath9k_unloaded; #ifdef CONFIG_MAC80211_LEDS @@ -92,6 +97,56 @@ static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = { }; #endif +static int __init set_use_msi(const struct dmi_system_id *dmi) +{ + ath9k_use_msi = 1; + return 1; +} + +static const struct dmi_system_id ath9k_quirks[] __initconst = { + { + .callback = set_use_msi, + .ident = "Dell Inspiron 24-3460", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 24-3460"), + }, + }, + { + .callback = set_use_msi, + .ident = "Dell Vostro 3262", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3262"), + }, + }, + { + .callback = set_use_msi, + .ident = "Dell Inspiron 3472", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 3472"), + }, + }, + { + .callback = set_use_msi, + .ident = "Dell Vostro 15-3572", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 15-3572"), + }, + }, + { + .callback = set_use_msi, + .ident = "Dell Inspiron 14-3473", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 14-3473"), + }, + }, + {} +}; + static void ath9k_deinit_softc(struct ath_softc *sc); static void ath9k_op_ps_wakeup(struct ath_common *common) @@ -1100,6 +1155,8 @@ static int __init ath9k_init(void) goto err_pci_exit; } + dmi_check_system(ath9k_quirks); + return 0; err_pci_exit: diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index 77c94f9e7b61..58d02c19b6d0 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c @@ -832,6 +832,43 @@ static void __ath9k_hw_enable_interrupts(struct ath_hw *ah) } ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n", REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER)); + + if (ah->msi_enabled) { + u32 _msi_reg = 0; + u32 i = 0; + u32 msi_pend_addr_mask = AR_PCIE_MSI_HW_INT_PENDING_ADDR_MSI_64; + + ath_dbg(ath9k_hw_common(ah), INTERRUPT, + "Enabling MSI, msi_mask=0x%X\n", ah->msi_mask); + + REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, ah->msi_mask); + REG_WRITE(ah, AR_INTR_PRIO_ASYNC_MASK, ah->msi_mask); + ath_dbg(ath9k_hw_common(ah), INTERRUPT, + "AR_INTR_PRIO_ASYNC_ENABLE=0x%X, AR_INTR_PRIO_ASYNC_MASK=0x%X\n", + REG_READ(ah, AR_INTR_PRIO_ASYNC_ENABLE), + REG_READ(ah, AR_INTR_PRIO_ASYNC_MASK)); + + if (ah->msi_reg == 0) + ah->msi_reg = REG_READ(ah, AR_PCIE_MSI); + + ath_dbg(ath9k_hw_common(ah), INTERRUPT, + "AR_PCIE_MSI=0x%X, ah->msi_reg = 0x%X\n", + AR_PCIE_MSI, ah->msi_reg); + + i = 0; + do { + REG_WRITE(ah, AR_PCIE_MSI, + (ah->msi_reg | AR_PCIE_MSI_ENABLE) + & msi_pend_addr_mask); + _msi_reg = REG_READ(ah, AR_PCIE_MSI); + i++; + } while ((_msi_reg & AR_PCIE_MSI_ENABLE) == 0 && i < 200); + + if (i >= 200) + ath_err(ath9k_hw_common(ah), + "%s: _msi_reg = 0x%X\n", + __func__, _msi_reg); + } } void ath9k_hw_resume_interrupts(struct ath_hw *ah) @@ -878,12 +915,21 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah) if (!(ints & ATH9K_INT_GLOBAL)) ath9k_hw_disable_interrupts(ah); + if (ah->msi_enabled) { + ath_dbg(common, INTERRUPT, "Clearing AR_INTR_PRIO_ASYNC_ENABLE\n"); + + REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, 0); + REG_READ(ah, AR_INTR_PRIO_ASYNC_ENABLE); + } + ath_dbg(common, INTERRUPT, "New interrupt mask 0x%x\n", ints); mask = ints & ATH9K_INT_COMMON; mask2 = 0; + ah->msi_mask = 0; if (ints & ATH9K_INT_TX) { + ah->msi_mask |= AR_INTR_PRIO_TX; if (ah->config.tx_intr_mitigation) mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM; else { @@ -898,6 +944,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah) mask |= AR_IMR_TXEOL; } if (ints & ATH9K_INT_RX) { + ah->msi_mask |= AR_INTR_PRIO_RXLP | AR_INTR_PRIO_RXHP; if (AR_SREV_9300_20_OR_LATER(ah)) { mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP; if (ah->config.rx_intr_mitigation) { diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 223606311261..645f0fbd9179 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -22,6 +22,8 @@ #include <linux/module.h> #include "ath9k.h" +extern int ath9k_use_msi; + static const struct pci_device_id ath_pci_id_table[] = { { PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */ { PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */ @@ -889,6 +891,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) u32 val; int ret = 0; char hw_name[64]; + int msi_enabled = 0; if (pcim_enable_device(pdev)) return -EIO; @@ -960,7 +963,20 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) sc->mem = pcim_iomap_table(pdev)[0]; sc->driver_data = id->driver_data; - ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc); + if (ath9k_use_msi) { + if (pci_enable_msi(pdev) == 0) { + msi_enabled = 1; + dev_err(&pdev->dev, "Using MSI\n"); + } else { + dev_err(&pdev->dev, "Using INTx\n"); + } + } + + if (!msi_enabled) + ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc); + else + ret = request_irq(pdev->irq, ath_isr, 0, "ath9k", sc); + if (ret) { dev_err(&pdev->dev, "request_irq failed\n"); goto err_irq; @@ -974,6 +990,9 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_init; } + sc->sc_ah->msi_enabled = msi_enabled; + sc->sc_ah->msi_reg = 0; + ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name)); wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n", hw_name, (unsigned long)sc->mem, pdev->irq); diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 2197aee2bb72..a8ac42c96d71 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -826,9 +826,9 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, sc->rx.discard_next = false; /* - * Discard zero-length packets. + * Discard zero-length packets and packets smaller than an ACK */ - if (!rx_stats->rs_datalen) { + if (rx_stats->rs_datalen < 10) { RX_STAT_INC(rx_len_err); goto corrupt; } diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h index 80ff69f99229..653e79611830 100644 --- a/drivers/net/wireless/ath/ath9k/reg.h +++ b/drivers/net/wireless/ath/ath9k/reg.h @@ -146,6 +146,14 @@ #define AR_MACMISC_MISC_OBS_BUS_MSB_S 15 #define AR_MACMISC_MISC_OBS_BUS_1 1 +#define AR_INTCFG 0x005C +#define AR_INTCFG_MSI_RXOK 0x00000000 +#define AR_INTCFG_MSI_RXINTM 0x00000004 +#define AR_INTCFG_MSI_RXMINTR 0x00000006 +#define AR_INTCFG_MSI_TXOK 0x00000000 +#define AR_INTCFG_MSI_TXINTM 0x00000010 +#define AR_INTCFG_MSI_TXMINTR 0x00000018 + #define AR_DATABUF_SIZE 0x0060 #define AR_DATABUF_SIZE_MASK 0x00000FFF @@ -1256,6 +1264,13 @@ enum { #define AR_PCIE_MSI (AR_SREV_9340(ah) ? 0x40d8 : \ (AR_SREV_9300_20_OR_LATER(ah) ? 0x40a4 : 0x4094)) #define AR_PCIE_MSI_ENABLE 0x00000001 +#define AR_PCIE_MSI_HW_DBI_WR_EN 0x02000000 +#define AR_PCIE_MSI_HW_INT_PENDING_ADDR 0xFFA0C1FF /* bits 8..11: value must be 0x5060 */ +#define AR_PCIE_MSI_HW_INT_PENDING_ADDR_MSI_64 0xFFA0C9FF /* bits 8..11: value must be 0x5064 */ + +#define AR_INTR_PRIO_TX 0x00000001 +#define AR_INTR_PRIO_RXLP 0x00000002 +#define AR_INTR_PRIO_RXHP 0x00000004 #define AR_INTR_PRIO_SYNC_ENABLE (AR_SREV_9340(ah) ? 0x4088 : 0x40c4) #define AR_INTR_PRIO_ASYNC_MASK (AR_SREV_9340(ah) ? 0x408c : 0x40c8) diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index d5c810a8cc52..a3f1f7d042a4 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c @@ -236,6 +236,14 @@ static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn return 0; } +static void wcn36xx_dxe_deinit_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch) +{ + size_t size; + + size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc); + dma_free_coherent(dev, size,wcn_ch->cpu_addr, wcn_ch->dma_addr); +} + static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch, struct wcn36xx_dxe_mem_pool *pool) { @@ -722,7 +730,11 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) /***************************************/ /* Init descriptors for TX LOW channel */ /***************************************/ - wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_l_ch); + ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_l_ch); + if (ret) { + dev_err(wcn->dev, "Error allocating descriptor\n"); + return ret; + } wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool); /* Write channel head to a NEXT register */ @@ -740,7 +752,12 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) /***************************************/ /* Init descriptors for TX HIGH channel */ /***************************************/ - wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_h_ch); + ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_h_ch); + if (ret) { + dev_err(wcn->dev, "Error allocating descriptor\n"); + goto out_err_txh_ch; + } + wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool); /* Write channel head to a NEXT register */ @@ -760,7 +777,12 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) /***************************************/ /* Init descriptors for RX LOW channel */ /***************************************/ - wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_l_ch); + ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_l_ch); + if (ret) { + dev_err(wcn->dev, "Error allocating descriptor\n"); + goto out_err_rxl_ch; + } + /* For RX we need to preallocated buffers */ wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch); @@ -790,7 +812,11 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) /***************************************/ /* Init descriptors for RX HIGH channel */ /***************************************/ - wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_h_ch); + ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_h_ch); + if (ret) { + dev_err(wcn->dev, "Error allocating descriptor\n"); + goto out_err_rxh_ch; + } /* For RX we need to prealocat buffers */ wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch); @@ -819,11 +845,19 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) ret = wcn36xx_dxe_request_irqs(wcn); if (ret < 0) - goto out_err; + goto out_err_irq; return 0; -out_err: +out_err_irq: + wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch); +out_err_rxh_ch: + wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch); +out_err_rxl_ch: + wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch); +out_err_txh_ch: + wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch); + return ret; } diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h index b765c647319d..182963522941 100644 --- a/drivers/net/wireless/ath/wcn36xx/hal.h +++ b/drivers/net/wireless/ath/wcn36xx/hal.h @@ -348,6 +348,13 @@ enum wcn36xx_hal_host_msg_type { WCN36XX_HAL_DHCP_START_IND = 189, WCN36XX_HAL_DHCP_STOP_IND = 190, + /* Scan Offload(hw) APIs */ + WCN36XX_HAL_START_SCAN_OFFLOAD_REQ = 204, + WCN36XX_HAL_START_SCAN_OFFLOAD_RSP = 205, + WCN36XX_HAL_STOP_SCAN_OFFLOAD_REQ = 206, + WCN36XX_HAL_STOP_SCAN_OFFLOAD_RSP = 207, + WCN36XX_HAL_SCAN_OFFLOAD_IND = 210, + WCN36XX_HAL_AVOID_FREQ_RANGE_IND = 233, WCN36XX_HAL_PRINT_REG_INFO_IND = 259, @@ -1115,6 +1122,101 @@ struct wcn36xx_hal_finish_scan_rsp_msg { } __packed; +enum wcn36xx_hal_scan_type { + WCN36XX_HAL_SCAN_TYPE_PASSIVE = 0x00, + WCN36XX_HAL_SCAN_TYPE_ACTIVE = WCN36XX_HAL_MAX_ENUM_SIZE +}; + +struct wcn36xx_hal_mac_ssid { + u8 length; + u8 ssid[32]; +} __packed; + +struct wcn36xx_hal_start_scan_offload_req_msg { + struct wcn36xx_hal_msg_header header; + + /* BSSIDs hot list */ + u8 num_bssid; + u8 bssids[4][ETH_ALEN]; + + /* Directed probe-requests will be sent for listed SSIDs (max 10)*/ + u8 num_ssid; + struct wcn36xx_hal_mac_ssid ssids[10]; + + /* Report AP with hidden ssid */ + u8 scan_hidden; + + /* Self MAC address */ + u8 mac[ETH_ALEN]; + + /* BSS type */ + enum wcn36xx_hal_bss_type bss_type; + + /* Scan type */ + enum wcn36xx_hal_scan_type scan_type; + + /* Minimum scanning time on each channel (ms) */ + u32 min_ch_time; + + /* Maximum scanning time on each channel */ + u32 max_ch_time; + + /* Is a p2p search */ + u8 p2p_search; + + /* Channels to scan */ + u8 num_channel; + u8 channels[80]; + + /* IE field */ + u16 ie_len; + u8 ie[0]; +} __packed; + +struct wcn36xx_hal_start_scan_offload_rsp_msg { + struct wcn36xx_hal_msg_header header; + + /* success or failure */ + u32 status; +} __packed; + +enum wcn36xx_hal_scan_offload_ind_type { + /* Scan has been started */ + WCN36XX_HAL_SCAN_IND_STARTED = 0x01, + /* Scan has been completed */ + WCN36XX_HAL_SCAN_IND_COMPLETED = 0x02, + /* Moved to foreign channel */ + WCN36XX_HAL_SCAN_IND_FOREIGN_CHANNEL = 0x08, + /* scan request has been dequeued */ + WCN36XX_HAL_SCAN_IND_DEQUEUED = 0x10, + /* preempted by other high priority scan */ + WCN36XX_HAL_SCAN_IND_PREEMPTED = 0x20, + /* scan start failed */ + WCN36XX_HAL_SCAN_IND_FAILED = 0x40, + /*scan restarted */ + WCN36XX_HAL_SCAN_IND_RESTARTED = 0x80, + WCN36XX_HAL_SCAN_IND_MAX = WCN36XX_HAL_MAX_ENUM_SIZE +}; + +struct wcn36xx_hal_scan_offload_ind { + struct wcn36xx_hal_msg_header header; + + u32 type; + u32 channel_mhz; + u32 scan_id; +} __packed; + +struct wcn36xx_hal_stop_scan_offload_req_msg { + struct wcn36xx_hal_msg_header header; +} __packed; + +struct wcn36xx_hal_stop_scan_offload_rsp_msg { + struct wcn36xx_hal_msg_header header; + + /* success or failure */ + u32 status; +} __packed; + enum wcn36xx_hal_rate_index { HW_RATE_INDEX_1MBPS = 0x82, HW_RATE_INDEX_2MBPS = 0x84, @@ -1507,11 +1609,6 @@ struct wcn36xx_hal_edca_param_record { u16 txop_limit; } __packed; -struct wcn36xx_hal_mac_ssid { - u8 length; - u8 ssid[32]; -} __packed; - /* Concurrency role. These are generic IDs that identify the various roles * in the software system. */ enum wcn36xx_hal_con_mode { diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 987f1252a3cf..ab5be6d2c691 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -641,7 +641,6 @@ static int wcn36xx_hw_scan(struct ieee80211_hw *hw, struct ieee80211_scan_request *hw_req) { struct wcn36xx *wcn = hw->priv; - mutex_lock(&wcn->scan_lock); if (wcn->scan_req) { mutex_unlock(&wcn->scan_lock); @@ -650,11 +649,16 @@ static int wcn36xx_hw_scan(struct ieee80211_hw *hw, wcn->scan_aborted = false; wcn->scan_req = &hw_req->req; + mutex_unlock(&wcn->scan_lock); - schedule_work(&wcn->scan_work); + if (!get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) { + /* legacy manual/sw scan */ + schedule_work(&wcn->scan_work); + return 0; + } - return 0; + return wcn36xx_smd_start_hw_scan(wcn, vif, &hw_req->req); } static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw, @@ -662,6 +666,12 @@ static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw, { struct wcn36xx *wcn = hw->priv; + if (!wcn36xx_smd_stop_hw_scan(wcn)) { + struct cfg80211_scan_info scan_info = { .aborted = true }; + + ieee80211_scan_completed(wcn->hw, &scan_info); + } + mutex_lock(&wcn->scan_lock); wcn->scan_aborted = true; mutex_unlock(&wcn->scan_lock); diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 9c6590d5348a..2a4871ca9c72 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -73,6 +73,8 @@ static struct wcn36xx_cfg_val wcn36xx_cfg_vals[] = { WCN36XX_CFG_VAL(TX_PWR_CTRL_ENABLE, 1), WCN36XX_CFG_VAL(ENABLE_CLOSE_LOOP, 1), WCN36XX_CFG_VAL(ENABLE_LPWR_IMG_TRANSITION, 0), + WCN36XX_CFG_VAL(BTC_STATIC_LEN_LE_BT, 120000), + WCN36XX_CFG_VAL(BTC_STATIC_LEN_LE_WLAN, 30000), WCN36XX_CFG_VAL(MAX_ASSOC_LIMIT, 10), WCN36XX_CFG_VAL(ENABLE_MCC_ADAPTIVE_SCHEDULER, 0), }; @@ -613,6 +615,85 @@ out: return ret; } +int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif, + struct cfg80211_scan_request *req) +{ + struct wcn36xx_hal_start_scan_offload_req_msg msg_body; + int ret, i; + + mutex_lock(&wcn->hal_mutex); + INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_OFFLOAD_REQ); + + msg_body.scan_type = WCN36XX_HAL_SCAN_TYPE_ACTIVE; + msg_body.min_ch_time = 30; + msg_body.max_ch_time = 100; + msg_body.scan_hidden = 1; + memcpy(msg_body.mac, vif->addr, ETH_ALEN); + msg_body.p2p_search = vif->p2p; + + msg_body.num_ssid = min_t(u8, req->n_ssids, ARRAY_SIZE(msg_body.ssids)); + for (i = 0; i < msg_body.num_ssid; i++) { + msg_body.ssids[i].length = min_t(u8, req->ssids[i].ssid_len, + sizeof(msg_body.ssids[i].ssid)); + memcpy(msg_body.ssids[i].ssid, req->ssids[i].ssid, + msg_body.ssids[i].length); + } + + msg_body.num_channel = min_t(u8, req->n_channels, + sizeof(msg_body.channels)); + for (i = 0; i < msg_body.num_channel; i++) + msg_body.channels[i] = req->channels[i]->hw_value; + + PREPARE_HAL_BUF(wcn->hal_buf, msg_body); + + wcn36xx_dbg(WCN36XX_DBG_HAL, + "hal start hw-scan (channels: %u; ssids: %u; p2p: %s)\n", + msg_body.num_channel, msg_body.num_ssid, + msg_body.p2p_search ? "yes" : "no"); + + ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len); + if (ret) { + wcn36xx_err("Sending hal_start_scan_offload failed\n"); + goto out; + } + ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len); + if (ret) { + wcn36xx_err("hal_start_scan_offload response failed err=%d\n", + ret); + goto out; + } +out: + mutex_unlock(&wcn->hal_mutex); + return ret; +} + +int wcn36xx_smd_stop_hw_scan(struct wcn36xx *wcn) +{ + struct wcn36xx_hal_stop_scan_offload_req_msg msg_body; + int ret; + + mutex_lock(&wcn->hal_mutex); + INIT_HAL_MSG(msg_body, WCN36XX_HAL_STOP_SCAN_OFFLOAD_REQ); + PREPARE_HAL_BUF(wcn->hal_buf, msg_body); + + wcn36xx_dbg(WCN36XX_DBG_HAL, "hal stop hw-scan\n"); + + ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len); + if (ret) { + wcn36xx_err("Sending hal_stop_scan_offload failed\n"); + goto out; + } + ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len); + if (ret) { + wcn36xx_err("hal_stop_scan_offload response failed err=%d\n", + ret); + goto out; + } +out: + mutex_unlock(&wcn->hal_mutex); + return ret; +} + static int wcn36xx_smd_switch_channel_rsp(void *buf, size_t len) { struct wcn36xx_hal_switch_channel_rsp_msg *rsp; @@ -2039,6 +2120,40 @@ static int wcn36xx_smd_tx_compl_ind(struct wcn36xx *wcn, void *buf, size_t len) return 0; } +static int wcn36xx_smd_hw_scan_ind(struct wcn36xx *wcn, void *buf, size_t len) +{ + struct wcn36xx_hal_scan_offload_ind *rsp = buf; + struct cfg80211_scan_info scan_info = {}; + + if (len != sizeof(*rsp)) { + wcn36xx_warn("Corrupted delete scan indication\n"); + return -EIO; + } + + wcn36xx_dbg(WCN36XX_DBG_HAL, "scan indication (type %x)", rsp->type); + + switch (rsp->type) { + case WCN36XX_HAL_SCAN_IND_FAILED: + scan_info.aborted = true; + case WCN36XX_HAL_SCAN_IND_COMPLETED: + mutex_lock(&wcn->scan_lock); + wcn->scan_req = NULL; + mutex_unlock(&wcn->scan_lock); + ieee80211_scan_completed(wcn->hw, &scan_info); + break; + case WCN36XX_HAL_SCAN_IND_STARTED: + case WCN36XX_HAL_SCAN_IND_FOREIGN_CHANNEL: + case WCN36XX_HAL_SCAN_IND_DEQUEUED: + case WCN36XX_HAL_SCAN_IND_PREEMPTED: + case WCN36XX_HAL_SCAN_IND_RESTARTED: + break; + default: + wcn36xx_warn("Unknown scan indication type %x\n", rsp->type); + } + + return 0; +} + static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn, void *buf, size_t len) @@ -2250,6 +2365,8 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev, case WCN36XX_HAL_CH_SWITCH_RSP: case WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_RSP: case WCN36XX_HAL_8023_MULTICAST_LIST_RSP: + case WCN36XX_HAL_START_SCAN_OFFLOAD_RSP: + case WCN36XX_HAL_STOP_SCAN_OFFLOAD_RSP: memcpy(wcn->hal_buf, buf, len); wcn->hal_rsp_len = len; complete(&wcn->hal_rsp_compl); @@ -2262,6 +2379,7 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev, case WCN36XX_HAL_MISSED_BEACON_IND: case WCN36XX_HAL_DELETE_STA_CONTEXT_IND: case WCN36XX_HAL_PRINT_REG_INFO_IND: + case WCN36XX_HAL_SCAN_OFFLOAD_IND: msg_ind = kmalloc(sizeof(*msg_ind) + len, GFP_ATOMIC); if (!msg_ind) { wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n", @@ -2298,6 +2416,8 @@ static void wcn36xx_ind_smd_work(struct work_struct *work) hal_ind_msg = list_first_entry(&wcn->hal_ind_queue, struct wcn36xx_hal_ind_msg, list); + list_del(wcn->hal_ind_queue.next); + spin_unlock_irqrestore(&wcn->hal_ind_lock, flags); msg_header = (struct wcn36xx_hal_msg_header *)hal_ind_msg->msg; @@ -2326,12 +2446,14 @@ static void wcn36xx_ind_smd_work(struct work_struct *work) hal_ind_msg->msg, hal_ind_msg->msg_len); break; + case WCN36XX_HAL_SCAN_OFFLOAD_IND: + wcn36xx_smd_hw_scan_ind(wcn, hal_ind_msg->msg, + hal_ind_msg->msg_len); + break; default: wcn36xx_err("SMD_EVENT (%d) not supported\n", msg_header->msg_type); } - list_del(wcn->hal_ind_queue.next); - spin_unlock_irqrestore(&wcn->hal_ind_lock, flags); kfree(hal_ind_msg); } int wcn36xx_smd_open(struct wcn36xx *wcn) diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h index 013fc9546f56..8076edf40ac8 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.h +++ b/drivers/net/wireless/ath/wcn36xx/smd.h @@ -65,6 +65,9 @@ int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel); int wcn36xx_smd_finish_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode); int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn, u8 *channels, size_t channel_count); +int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif, + struct cfg80211_scan_request *req); +int wcn36xx_smd_stop_hw_scan(struct wcn36xx *wcn); int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif); int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr); int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index); diff --git a/drivers/net/wireless/ath/wil6210/boot_loader.h b/drivers/net/wireless/ath/wil6210/boot_loader.h index c131b5e1292f..d32c1f4e533a 100644 --- a/drivers/net/wireless/ath/wil6210/boot_loader.h +++ b/drivers/net/wireless/ath/wil6210/boot_loader.h @@ -1,4 +1,5 @@ /* Copyright (c) 2015 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -39,7 +40,8 @@ struct bl_dedicated_registers_v1 { /* valid only for version 2 and above */ __le32 bl_assert_code; /* 0x880A58 BL Assert code */ __le32 bl_assert_blink; /* 0x880A5C BL Assert Branch */ - __le32 bl_reserved[22]; /* 0x880A60 - 0x880AB4 */ + __le32 bl_shutdown_handshake; /* 0x880A60 BL cleaner shutdown */ + __le32 bl_reserved[21]; /* 0x880A64 - 0x880AB4 */ __le32 bl_magic_number; /* 0x880AB8 BL Magic number */ } __packed; @@ -58,4 +60,9 @@ struct bl_dedicated_registers_v0 { u8 mac_address[6]; /* 0x880A4c BL mac address */ } __packed; +/* bits for bl_shutdown_handshake */ +#define BL_SHUTDOWN_HS_GRTD BIT(0) +#define BL_SHUTDOWN_HS_RTD BIT(1) +#define BL_SHUTDOWN_HS_PROT_VER(x) WIL_GET_BITS(x, 28, 31) + #endif /* BOOT_LOADER_EXPORT_H_ */ diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 85d5c04618eb..768f63f38341 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -901,7 +901,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, u64 *cookie) { const u8 *buf = params->buf; - size_t len = params->len; + size_t len = params->len, total; struct wil6210_priv *wil = wiphy_to_wil(wiphy); int rc; bool tx_status = false; @@ -926,7 +926,11 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, if (len < sizeof(struct ieee80211_hdr_3addr)) return -EINVAL; - cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL); + total = sizeof(*cmd) + len; + if (total < len) + return -EINVAL; + + cmd = kmalloc(total, GFP_KERNEL); if (!cmd) { rc = -ENOMEM; goto out; @@ -936,7 +940,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, cmd->len = cpu_to_le16(len); memcpy(cmd->payload, buf, len); - rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, cmd, sizeof(*cmd) + len, + rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, cmd, total, WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000); if (rc == 0) tx_status = !evt.evt.status; @@ -952,9 +956,8 @@ static int wil_cfg80211_set_channel(struct wiphy *wiphy, struct cfg80211_chan_def *chandef) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); - struct wireless_dev *wdev = wil_to_wdev(wil); - wdev->preset_chandef = *chandef; + wil->monitor_chandef = *chandef; return 0; } @@ -1727,9 +1730,12 @@ static int wil_cfg80211_suspend(struct wiphy *wiphy, wil_dbg_pm(wil, "suspending\n"); - wil_p2p_stop_discovery(wil); - + mutex_lock(&wil->mutex); + mutex_lock(&wil->p2p_wdev_mutex); + wil_p2p_stop_radio_operations(wil); wil_abort_scan(wil, true); + mutex_unlock(&wil->p2p_wdev_mutex); + mutex_unlock(&wil->mutex); out: return rc; @@ -1744,6 +1750,69 @@ static int wil_cfg80211_resume(struct wiphy *wiphy) return 0; } +static int +wil_cfg80211_sched_scan_start(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_sched_scan_request *request) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + int i, rc; + + wil_dbg_misc(wil, + "sched scan start: n_ssids %d, ie_len %zu, flags 0x%x\n", + request->n_ssids, request->ie_len, request->flags); + for (i = 0; i < request->n_ssids; i++) { + wil_dbg_misc(wil, "SSID[%d]:", i); + wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1, + request->ssids[i].ssid, + request->ssids[i].ssid_len, true); + } + wil_dbg_misc(wil, "channels:"); + for (i = 0; i < request->n_channels; i++) + wil_dbg_misc(wil, " %d%s", request->channels[i]->hw_value, + i == request->n_channels - 1 ? "\n" : ""); + wil_dbg_misc(wil, "n_match_sets %d, min_rssi_thold %d, delay %d\n", + request->n_match_sets, request->min_rssi_thold, + request->delay); + for (i = 0; i < request->n_match_sets; i++) { + struct cfg80211_match_set *ms = &request->match_sets[i]; + + wil_dbg_misc(wil, "MATCHSET[%d]: rssi_thold %d\n", + i, ms->rssi_thold); + wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1, + ms->ssid.ssid, + ms->ssid.ssid_len, true); + } + wil_dbg_misc(wil, "n_scan_plans %d\n", request->n_scan_plans); + for (i = 0; i < request->n_scan_plans; i++) { + struct cfg80211_sched_scan_plan *sp = &request->scan_plans[i]; + + wil_dbg_misc(wil, "SCAN PLAN[%d]: interval %d iterations %d\n", + i, sp->interval, sp->iterations); + } + + rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, request->ie); + if (rc) + return rc; + return wmi_start_sched_scan(wil, request); +} + +static int +wil_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev, + u64 reqid) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + int rc; + + rc = wmi_stop_sched_scan(wil); + /* device would return error if it thinks PNO is already stopped. + * ignore the return code so user space and driver gets back in-sync + */ + wil_dbg_misc(wil, "sched scan stopped (%d)\n", rc); + + return 0; +} + static const struct cfg80211_ops wil_cfg80211_ops = { .add_virtual_intf = wil_cfg80211_add_iface, .del_virtual_intf = wil_cfg80211_del_iface, @@ -1777,6 +1846,8 @@ static const struct cfg80211_ops wil_cfg80211_ops = { .set_power_mgmt = wil_cfg80211_set_power_mgmt, .suspend = wil_cfg80211_suspend, .resume = wil_cfg80211_resume, + .sched_scan_start = wil_cfg80211_sched_scan_start, + .sched_scan_stop = wil_cfg80211_sched_scan_stop, }; static void wil_wiphy_init(struct wiphy *wiphy) diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index e58dc6dc1f9c..4a4888246e8c 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -242,12 +242,19 @@ static void wil_print_ring(struct seq_file *s, const char *prefix, static int wil_mbox_debugfs_show(struct seq_file *s, void *data) { struct wil6210_priv *wil = s->private; + int ret; + + ret = wil_pm_runtime_get(wil); + if (ret < 0) + return ret; wil_print_ring(s, "tx", wil->csr + HOST_MBOX + offsetof(struct wil6210_mbox_ctl, tx)); wil_print_ring(s, "rx", wil->csr + HOST_MBOX + offsetof(struct wil6210_mbox_ctl, rx)); + wil_pm_runtime_put(wil); + return 0; } @@ -265,15 +272,37 @@ static const struct file_operations fops_mbox = { static int wil_debugfs_iomem_x32_set(void *data, u64 val) { - writel(val, (void __iomem *)data); + struct wil_debugfs_iomem_data *d = (struct + wil_debugfs_iomem_data *)data; + struct wil6210_priv *wil = d->wil; + int ret; + + ret = wil_pm_runtime_get(wil); + if (ret < 0) + return ret; + + writel(val, (void __iomem *)d->offset); wmb(); /* make sure write propagated to HW */ + wil_pm_runtime_put(wil); + return 0; } static int wil_debugfs_iomem_x32_get(void *data, u64 *val) { - *val = readl((void __iomem *)data); + struct wil_debugfs_iomem_data *d = (struct + wil_debugfs_iomem_data *)data; + struct wil6210_priv *wil = d->wil; + int ret; + + ret = wil_pm_runtime_get(wil); + if (ret < 0) + return ret; + + *val = readl((void __iomem *)d->offset); + + wil_pm_runtime_put(wil); return 0; } @@ -284,10 +313,21 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, wil_debugfs_iomem_x32_get, static struct dentry *wil_debugfs_create_iomem_x32(const char *name, umode_t mode, struct dentry *parent, - void *value) + void *value, + struct wil6210_priv *wil) { - return debugfs_create_file(name, mode, parent, value, - &fops_iomem_x32); + struct dentry *file; + struct wil_debugfs_iomem_data *data = &wil->dbg_data.data_arr[ + wil->dbg_data.iomem_data_count]; + + data->wil = wil; + data->offset = value; + + file = debugfs_create_file(name, mode, parent, data, &fops_iomem_x32); + if (!IS_ERR_OR_NULL(file)) + wil->dbg_data.iomem_data_count++; + + return file; } static int wil_debugfs_ulong_set(void *data, u64 val) @@ -346,7 +386,8 @@ static void wil6210_debugfs_init_offset(struct wil6210_priv *wil, case doff_io32: f = wil_debugfs_create_iomem_x32(tbl[i].name, tbl[i].mode, dbg, - base + tbl[i].off); + base + tbl[i].off, + wil); break; case doff_u8: f = debugfs_create_u8(tbl[i].name, tbl[i].mode, dbg, @@ -475,13 +516,22 @@ static int wil6210_debugfs_create_ITR_CNT(struct wil6210_priv *wil, static int wil_memread_debugfs_show(struct seq_file *s, void *data) { struct wil6210_priv *wil = s->private; - void __iomem *a = wmi_buffer(wil, cpu_to_le32(mem_addr)); + void __iomem *a; + int ret; + + ret = wil_pm_runtime_get(wil); + if (ret < 0) + return ret; + + a = wmi_buffer(wil, cpu_to_le32(mem_addr)); if (a) seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, readl(a)); else seq_printf(s, "[0x%08x] = INVALID\n", mem_addr); + wil_pm_runtime_put(wil); + return 0; } @@ -502,10 +552,12 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf, { enum { max_count = 4096 }; struct wil_blob_wrapper *wil_blob = file->private_data; + struct wil6210_priv *wil = wil_blob->wil; loff_t pos = *ppos; size_t available = wil_blob->blob.size; void *buf; size_t ret; + int rc; if (test_bit(wil_status_suspending, wil_blob->wil->status) || test_bit(wil_status_suspended, wil_blob->wil->status)) @@ -526,10 +578,19 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf, if (!buf) return -ENOMEM; + rc = wil_pm_runtime_get(wil); + if (rc < 0) { + kfree(buf); + return rc; + } + wil_memcpy_fromio_32(buf, (const void __iomem *) wil_blob->blob.data + pos, count); ret = copy_to_user(user_buf, buf, count); + + wil_pm_runtime_put(wil); + kfree(buf); if (ret == count) return -EFAULT; @@ -808,7 +869,6 @@ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf, params.buf = frame; params.len = len; - params.chan = wdev->preset_chandef.chan; rc = wil_cfg80211_mgmt_tx(wiphy, wdev, ¶ms, NULL); @@ -1571,8 +1631,6 @@ static ssize_t wil_write_suspend_stats(struct file *file, struct wil6210_priv *wil = file->private_data; memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats)); - wil->suspend_stats.min_suspend_time = ULONG_MAX; - wil->suspend_stats.collection_start = ktime_get(); return len; } @@ -1582,33 +1640,41 @@ static ssize_t wil_read_suspend_stats(struct file *file, size_t count, loff_t *ppos) { struct wil6210_priv *wil = file->private_data; - static char text[400]; - int n; - unsigned long long stats_collection_time = - ktime_to_us(ktime_sub(ktime_get(), - wil->suspend_stats.collection_start)); + char *text; + int n, ret, text_size = 500; - n = snprintf(text, sizeof(text), - "Suspend statistics:\n" + text = kmalloc(text_size, GFP_KERNEL); + if (!text) + return -ENOMEM; + + n = snprintf(text, text_size, + "Radio on suspend statistics:\n" + "successful suspends:%ld failed suspends:%ld\n" + "successful resumes:%ld failed resumes:%ld\n" + "rejected by device:%ld\n" + "Radio off suspend statistics:\n" "successful suspends:%ld failed suspends:%ld\n" "successful resumes:%ld failed resumes:%ld\n" - "rejected by host:%ld rejected by device:%ld\n" - "total suspend time:%lld min suspend time:%lld\n" - "max suspend time:%lld stats collection time: %lld\n", - wil->suspend_stats.successful_suspends, - wil->suspend_stats.failed_suspends, - wil->suspend_stats.successful_resumes, - wil->suspend_stats.failed_resumes, - wil->suspend_stats.rejected_by_host, + "General statistics:\n" + "rejected by host:%ld\n", + wil->suspend_stats.r_on.successful_suspends, + wil->suspend_stats.r_on.failed_suspends, + wil->suspend_stats.r_on.successful_resumes, + wil->suspend_stats.r_on.failed_resumes, wil->suspend_stats.rejected_by_device, - wil->suspend_stats.total_suspend_time, - wil->suspend_stats.min_suspend_time, - wil->suspend_stats.max_suspend_time, - stats_collection_time); + wil->suspend_stats.r_off.successful_suspends, + wil->suspend_stats.r_off.failed_suspends, + wil->suspend_stats.r_off.successful_resumes, + wil->suspend_stats.r_off.failed_resumes, + wil->suspend_stats.rejected_by_host); + + n = min_t(int, n, text_size); - n = min_t(int, n, sizeof(text)); + ret = simple_read_from_buffer(user_buf, count, ppos, text, n); - return simple_read_from_buffer(user_buf, count, ppos, text, n); + kfree(text); + + return ret; } static const struct file_operations fops_suspend_stats = { @@ -1736,14 +1802,31 @@ static const struct dbg_off dbg_statics[] = { {}, }; +static const int dbg_off_count = 4 * (ARRAY_SIZE(isr_off) - 1) + + ARRAY_SIZE(dbg_wil_regs) - 1 + + ARRAY_SIZE(pseudo_isr_off) - 1 + + ARRAY_SIZE(lgc_itr_cnt_off) - 1 + + ARRAY_SIZE(tx_itr_cnt_off) - 1 + + ARRAY_SIZE(rx_itr_cnt_off) - 1; + int wil6210_debugfs_init(struct wil6210_priv *wil) { struct dentry *dbg = wil->debug = debugfs_create_dir(WIL_NAME, wil_to_wiphy(wil)->debugfsdir); - if (IS_ERR_OR_NULL(dbg)) return -ENODEV; + wil->dbg_data.data_arr = kcalloc(dbg_off_count, + sizeof(struct wil_debugfs_iomem_data), + GFP_KERNEL); + if (!wil->dbg_data.data_arr) { + debugfs_remove_recursive(dbg); + wil->debug = NULL; + return -ENOMEM; + } + + wil->dbg_data.iomem_data_count = 0; + wil_pmc_init(wil); wil6210_debugfs_init_files(wil, dbg); @@ -1758,8 +1841,6 @@ int wil6210_debugfs_init(struct wil6210_priv *wil) wil6210_debugfs_create_ITR_CNT(wil, dbg); - wil->suspend_stats.collection_start = ktime_get(); - return 0; } @@ -1768,6 +1849,8 @@ void wil6210_debugfs_remove(struct wil6210_priv *wil) debugfs_remove_recursive(wil->debug); wil->debug = NULL; + kfree(wil->dbg_data.data_arr); + /* free pmc memory without sending command to fw, as it will * be reset on the way down anyway */ diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c index adcfef4dabf7..66200f616a37 100644 --- a/drivers/net/wireless/ath/wil6210/ethtool.c +++ b/drivers/net/wireless/ath/wil6210/ethtool.c @@ -47,9 +47,14 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev, struct wil6210_priv *wil = ndev_to_wil(ndev); u32 tx_itr_en, tx_itr_val = 0; u32 rx_itr_en, rx_itr_val = 0; + int ret; wil_dbg_misc(wil, "ethtoolops_get_coalesce\n"); + ret = wil_pm_runtime_get(wil); + if (ret < 0) + return ret; + tx_itr_en = wil_r(wil, RGF_DMA_ITR_TX_CNT_CTL); if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN) tx_itr_val = wil_r(wil, RGF_DMA_ITR_TX_CNT_TRSH); @@ -58,6 +63,8 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev, if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN) rx_itr_val = wil_r(wil, RGF_DMA_ITR_RX_CNT_TRSH); + wil_pm_runtime_put(wil); + cp->tx_coalesce_usecs = tx_itr_val; cp->rx_coalesce_usecs = rx_itr_val; return 0; @@ -67,6 +74,7 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *cp) { struct wil6210_priv *wil = ndev_to_wil(ndev); + int ret; wil_dbg_misc(wil, "ethtoolops_set_coalesce: rx %d usec, tx %d usec\n", cp->rx_coalesce_usecs, cp->tx_coalesce_usecs); @@ -86,8 +94,15 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev, wil->tx_max_burst_duration = cp->tx_coalesce_usecs; wil->rx_max_burst_duration = cp->rx_coalesce_usecs; + + ret = wil_pm_runtime_get(wil); + if (ret < 0) + return ret; + wil_configure_interrupt_moderation(wil); + wil_pm_runtime_put(wil); + return 0; out_bad: diff --git a/drivers/net/wireless/ath/wil6210/fw.h b/drivers/net/wireless/ath/wil6210/fw.h index 2f2b910501ba..2c7b24f61587 100644 --- a/drivers/net/wireless/ath/wil6210/fw.h +++ b/drivers/net/wireless/ath/wil6210/fw.h @@ -1,5 +1,6 @@ /* * Copyright (c) 2014,2016 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -58,15 +59,30 @@ struct wil_fw_record_comment { /* type == wil_fw_type_comment */ u8 data[0]; /* free-form data [data_size], see above */ } __packed; +/* Comment header - common for all comment record types */ +struct wil_fw_record_comment_hdr { + __le32 magic; +}; + /* FW capabilities encoded inside a comment record */ #define WIL_FW_CAPABILITIES_MAGIC (0xabcddcba) struct wil_fw_record_capabilities { /* type == wil_fw_type_comment */ /* identifies capabilities record */ - __le32 magic; + struct wil_fw_record_comment_hdr hdr; /* capabilities (variable size), see enum wmi_fw_capability */ u8 capabilities[0]; }; +/* brd file info encoded inside a comment record */ +#define WIL_BRD_FILE_MAGIC (0xabcddcbb) +struct wil_fw_record_brd_file { /* type == wil_fw_type_comment */ + /* identifies brd file record */ + struct wil_fw_record_comment_hdr hdr; + __le32 version; + __le32 base_addr; + __le32 max_size_bytes; +} __packed; + /* perform action * data_size = @head.size - offsetof(struct wil_fw_record_action, data) */ diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c index e01acac88825..914c0106e94b 100644 --- a/drivers/net/wireless/ath/wil6210/fw_inc.c +++ b/drivers/net/wireless/ath/wil6210/fw_inc.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2014-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -26,14 +27,17 @@ prefix_type, rowsize, \ groupsize, buf, len, ascii) -#define FW_ADDR_CHECK(ioaddr, val, msg) do { \ - ioaddr = wmi_buffer(wil, val); \ - if (!ioaddr) { \ - wil_err_fw(wil, "bad " msg ": 0x%08x\n", \ - le32_to_cpu(val)); \ - return -EINVAL; \ - } \ - } while (0) +static bool wil_fw_addr_check(struct wil6210_priv *wil, + void __iomem **ioaddr, __le32 val, + u32 size, const char *msg) +{ + *ioaddr = wmi_buffer_block(wil, val, size); + if (!(*ioaddr)) { + wil_err_fw(wil, "bad %s: 0x%08x\n", msg, le32_to_cpu(val)); + return false; + } + return true; +} /** * wil_fw_verify - verify firmware file validity @@ -124,14 +128,6 @@ static int fw_ignore_section(struct wil6210_priv *wil, const void *data, return 0; } -static int fw_handle_comment(struct wil6210_priv *wil, const void *data, - size_t size) -{ - wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1, data, size, true); - - return 0; -} - static int fw_handle_capabilities(struct wil6210_priv *wil, const void *data, size_t size) @@ -139,9 +135,11 @@ fw_handle_capabilities(struct wil6210_priv *wil, const void *data, const struct wil_fw_record_capabilities *rec = data; size_t capa_size; - if (size < sizeof(*rec) || - le32_to_cpu(rec->magic) != WIL_FW_CAPABILITIES_MAGIC) + if (size < sizeof(*rec)) { + wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1, + data, size, true); return 0; + } capa_size = size - offsetof(struct wil_fw_record_capabilities, capabilities); @@ -153,8 +151,56 @@ fw_handle_capabilities(struct wil6210_priv *wil, const void *data, return 0; } -static int fw_handle_data(struct wil6210_priv *wil, const void *data, - size_t size) +static int +fw_handle_brd_file(struct wil6210_priv *wil, const void *data, + size_t size) +{ + const struct wil_fw_record_brd_file *rec = data; + + if (size < sizeof(*rec)) { + wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1, + data, size, true); + return 0; + } + + wil->brd_file_addr = le32_to_cpu(rec->base_addr); + wil->brd_file_max_size = le32_to_cpu(rec->max_size_bytes); + + wil_dbg_fw(wil, "brd_file_addr 0x%x, brd_file_max_size %d\n", + wil->brd_file_addr, wil->brd_file_max_size); + + return 0; +} + +static int +fw_handle_comment(struct wil6210_priv *wil, const void *data, + size_t size) +{ + const struct wil_fw_record_comment_hdr *hdr = data; + u32 magic; + int rc = 0; + + if (size < sizeof(*hdr)) + return 0; + + magic = le32_to_cpu(hdr->magic); + + switch (magic) { + case WIL_FW_CAPABILITIES_MAGIC: + wil_dbg_fw(wil, "magic is WIL_FW_CAPABILITIES_MAGIC\n"); + rc = fw_handle_capabilities(wil, data, size); + break; + case WIL_BRD_FILE_MAGIC: + wil_dbg_fw(wil, "magic is WIL_BRD_FILE_MAGIC\n"); + rc = fw_handle_brd_file(wil, data, size); + break; + } + + return rc; +} + +static int __fw_handle_data(struct wil6210_priv *wil, const void *data, + size_t size, __le32 addr) { const struct wil_fw_record_data *d = data; void __iomem *dst; @@ -165,15 +211,23 @@ static int fw_handle_data(struct wil6210_priv *wil, const void *data, return -EINVAL; } - FW_ADDR_CHECK(dst, d->addr, "address"); - wil_dbg_fw(wil, "write [0x%08x] <== %zu bytes\n", le32_to_cpu(d->addr), - s); + if (!wil_fw_addr_check(wil, &dst, addr, s, "address")) + return -EINVAL; + wil_dbg_fw(wil, "write [0x%08x] <== %zu bytes\n", le32_to_cpu(addr), s); wil_memcpy_toio_32(dst, d->data, s); wmb(); /* finish before processing next record */ return 0; } +static int fw_handle_data(struct wil6210_priv *wil, const void *data, + size_t size) +{ + const struct wil_fw_record_data *d = data; + + return __fw_handle_data(wil, data, size, d->addr); +} + static int fw_handle_fill(struct wil6210_priv *wil, const void *data, size_t size) { @@ -197,7 +251,8 @@ static int fw_handle_fill(struct wil6210_priv *wil, const void *data, return -EINVAL; } - FW_ADDR_CHECK(dst, d->addr, "address"); + if (!wil_fw_addr_check(wil, &dst, d->addr, s, "address")) + return -EINVAL; v = le32_to_cpu(d->value); wil_dbg_fw(wil, "fill [0x%08x] <== 0x%08x, %zu bytes\n", @@ -253,7 +308,8 @@ static int fw_handle_direct_write(struct wil6210_priv *wil, const void *data, u32 v = le32_to_cpu(block[i].value); u32 x, y; - FW_ADDR_CHECK(dst, block[i].addr, "address"); + if (!wil_fw_addr_check(wil, &dst, block[i].addr, 0, "address")) + return -EINVAL; x = readl(dst); y = (x & m) | (v & ~m); @@ -319,10 +375,15 @@ static int fw_handle_gateway_data(struct wil6210_priv *wil, const void *data, wil_dbg_fw(wil, "gw write record [%3d] blocks, cmd 0x%08x\n", n, gw_cmd); - FW_ADDR_CHECK(gwa_addr, d->gateway_addr_addr, "gateway_addr_addr"); - FW_ADDR_CHECK(gwa_val, d->gateway_value_addr, "gateway_value_addr"); - FW_ADDR_CHECK(gwa_cmd, d->gateway_cmd_addr, "gateway_cmd_addr"); - FW_ADDR_CHECK(gwa_ctl, d->gateway_ctrl_address, "gateway_ctrl_address"); + if (!wil_fw_addr_check(wil, &gwa_addr, d->gateway_addr_addr, 0, + "gateway_addr_addr") || + !wil_fw_addr_check(wil, &gwa_val, d->gateway_value_addr, 0, + "gateway_value_addr") || + !wil_fw_addr_check(wil, &gwa_cmd, d->gateway_cmd_addr, 0, + "gateway_cmd_addr") || + !wil_fw_addr_check(wil, &gwa_ctl, d->gateway_ctrl_address, 0, + "gateway_ctrl_address")) + return -EINVAL; wil_dbg_fw(wil, "gw addresses: addr 0x%08x val 0x%08x" " cmd 0x%08x ctl 0x%08x\n", @@ -378,12 +439,19 @@ static int fw_handle_gateway_data4(struct wil6210_priv *wil, const void *data, wil_dbg_fw(wil, "gw4 write record [%3d] blocks, cmd 0x%08x\n", n, gw_cmd); - FW_ADDR_CHECK(gwa_addr, d->gateway_addr_addr, "gateway_addr_addr"); + if (!wil_fw_addr_check(wil, &gwa_addr, d->gateway_addr_addr, 0, + "gateway_addr_addr")) + return -EINVAL; for (k = 0; k < ARRAY_SIZE(block->value); k++) - FW_ADDR_CHECK(gwa_val[k], d->gateway_value_addr[k], - "gateway_value_addr"); - FW_ADDR_CHECK(gwa_cmd, d->gateway_cmd_addr, "gateway_cmd_addr"); - FW_ADDR_CHECK(gwa_ctl, d->gateway_ctrl_address, "gateway_ctrl_address"); + if (!wil_fw_addr_check(wil, &gwa_val[k], + d->gateway_value_addr[k], + 0, "gateway_value_addr")) + return -EINVAL; + if (!wil_fw_addr_check(wil, &gwa_cmd, d->gateway_cmd_addr, 0, + "gateway_cmd_addr") || + !wil_fw_addr_check(wil, &gwa_ctl, d->gateway_ctrl_address, 0, + "gateway_ctrl_address")) + return -EINVAL; wil_dbg_fw(wil, "gw4 addresses: addr 0x%08x cmd 0x%08x ctl 0x%08x\n", le32_to_cpu(d->gateway_addr_addr), @@ -422,7 +490,7 @@ static const struct { int (*parse_handler)(struct wil6210_priv *wil, const void *data, size_t size); } wil_fw_handlers[] = { - {wil_fw_type_comment, fw_handle_comment, fw_handle_capabilities}, + {wil_fw_type_comment, fw_handle_comment, fw_handle_comment}, {wil_fw_type_data, fw_handle_data, fw_ignore_section}, {wil_fw_type_fill, fw_handle_fill, fw_ignore_section}, /* wil_fw_type_action */ @@ -517,7 +585,7 @@ int wil_request_firmware(struct wil6210_priv *wil, const char *name, rc = request_firmware(&fw, name, wil_to_dev(wil)); if (rc) { - wil_err_fw(wil, "Failed to load firmware %s\n", name); + wil_err_fw(wil, "Failed to load firmware %s rc %d\n", name, rc); return rc; } wil_dbg_fw(wil, "Loading <%s>, %zu bytes\n", name, fw->size); @@ -539,6 +607,100 @@ out: } /** + * wil_brd_process - process section from BRD file + * + * Return error code + */ +static int wil_brd_process(struct wil6210_priv *wil, const void *data, + size_t size) +{ + int rc = 0; + const struct wil_fw_record_head *hdr = data; + size_t s, hdr_sz; + u16 type; + + /* Assuming the board file includes only one header record and one data + * record. Each record starts with wil_fw_record_head. + */ + if (size < sizeof(*hdr)) + return -EINVAL; + s = sizeof(*hdr) + le32_to_cpu(hdr->size); + if (s > size) + return -EINVAL; + + /* Skip the header record and handle the data record */ + hdr = (const void *)hdr + s; + size -= s; + if (size < sizeof(*hdr)) + return -EINVAL; + hdr_sz = le32_to_cpu(hdr->size); + + if (wil->brd_file_max_size && hdr_sz > wil->brd_file_max_size) + return -EINVAL; + if (sizeof(*hdr) + hdr_sz > size) + return -EINVAL; + if (hdr_sz % 4) { + wil_err_fw(wil, "unaligned record size: %zu\n", + hdr_sz); + return -EINVAL; + } + type = le16_to_cpu(hdr->type); + if (type != wil_fw_type_data) { + wil_err_fw(wil, "invalid record type for board file: %d\n", + type); + return -EINVAL; + } + if (hdr_sz < sizeof(struct wil_fw_record_data)) { + wil_err_fw(wil, "data record too short: %zu\n", hdr_sz); + return -EINVAL; + } + + wil_dbg_fw(wil, "using addr from fw file: [0x%08x]\n", + wil->brd_file_addr); + + rc = __fw_handle_data(wil, &hdr[1], hdr_sz, + cpu_to_le32(wil->brd_file_addr)); + + return rc; +} + +/** + * wil_request_board - Request board file + * + * Request board image from the file + * board file address and max size are read from FW file + * during initialization. + * brd file shall include one header and one data section. + * + * Return error code + */ +int wil_request_board(struct wil6210_priv *wil, const char *name) +{ + int rc, dlen; + const struct firmware *brd; + + rc = request_firmware(&brd, name, wil_to_dev(wil)); + if (rc) { + wil_err_fw(wil, "Failed to load brd %s\n", name); + return rc; + } + wil_dbg_fw(wil, "Loading <%s>, %zu bytes\n", name, brd->size); + + /* Verify the header */ + dlen = wil_fw_verify(wil, brd->data, brd->size); + if (dlen < 0) { + rc = dlen; + goto out; + } + /* Process the data record */ + rc = wil_brd_process(wil, brd->data, dlen); + +out: + release_firmware(brd); + return rc; +} + +/** * wil_fw_verify_file_exists - checks if firmware file exist * * @wil: driver context diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c index 59def4f3fcf3..1835187ea075 100644 --- a/drivers/net/wireless/ath/wil6210/interrupt.c +++ b/drivers/net/wireless/ath/wil6210/interrupt.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -358,6 +359,25 @@ static void wil_cache_mbox_regs(struct wil6210_priv *wil) wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx); } +static bool wil_validate_mbox_regs(struct wil6210_priv *wil) +{ + size_t min_size = sizeof(struct wil6210_mbox_hdr) + + sizeof(struct wmi_cmd_hdr); + + if (wil->mbox_ctl.rx.entry_size < min_size) { + wil_err(wil, "rx mbox entry too small (%d)\n", + wil->mbox_ctl.rx.entry_size); + return false; + } + if (wil->mbox_ctl.tx.entry_size < min_size) { + wil_err(wil, "tx mbox entry too small (%d)\n", + wil->mbox_ctl.tx.entry_size); + return false; + } + + return true; +} + static irqreturn_t wil6210_irq_misc(int irq, void *cookie) { struct wil6210_priv *wil = cookie; @@ -376,8 +396,9 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie) wil6210_mask_irq_misc(wil, false); if (isr & ISR_MISC_FW_ERROR) { - u32 fw_assert_code = wil_r(wil, RGF_FW_ASSERT_CODE); - u32 ucode_assert_code = wil_r(wil, RGF_UCODE_ASSERT_CODE); + u32 fw_assert_code = wil_r(wil, wil->rgf_fw_assert_code_addr); + u32 ucode_assert_code = + wil_r(wil, wil->rgf_ucode_assert_code_addr); wil_err(wil, "Firmware error detected, assert codes FW 0x%08x, UCODE 0x%08x\n", @@ -393,7 +414,8 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie) if (isr & ISR_MISC_FW_READY) { wil_dbg_irq(wil, "IRQ: FW ready\n"); wil_cache_mbox_regs(wil); - set_bit(wil_status_mbox_ready, wil->status); + if (wil_validate_mbox_regs(wil)) + set_bit(wil_status_mbox_ready, wil->status); /** * Actual FW ready indicated by the * WMI_FW_READY_EVENTID @@ -545,7 +567,7 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie) if (unlikely((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff))) return IRQ_NONE; - /* FIXME: IRQ mask debug */ + /* IRQ mask debug */ if (unlikely(wil6210_debug_irq_mask(wil, pseudo_cause))) return IRQ_NONE; diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 885924abf61c..0c61a6c13991 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -579,7 +580,6 @@ int wil_priv_init(struct wil6210_priv *wil) wil->wakeup_trigger = WMI_WAKEUP_TRIGGER_UCAST | WMI_WAKEUP_TRIGGER_BCAST; memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats)); - wil->suspend_stats.min_suspend_time = ULONG_MAX; wil->vring_idle_trsh = 16; return 0; @@ -638,6 +638,98 @@ void wil_priv_deinit(struct wil6210_priv *wil) destroy_workqueue(wil->wmi_wq); } +static void wil_shutdown_bl(struct wil6210_priv *wil) +{ + u32 val; + + wil_s(wil, RGF_USER_BL + + offsetof(struct bl_dedicated_registers_v1, + bl_shutdown_handshake), BL_SHUTDOWN_HS_GRTD); + + usleep_range(100, 150); + + val = wil_r(wil, RGF_USER_BL + + offsetof(struct bl_dedicated_registers_v1, + bl_shutdown_handshake)); + if (val & BL_SHUTDOWN_HS_RTD) { + wil_dbg_misc(wil, "BL is ready for halt\n"); + return; + } + + wil_err(wil, "BL did not report ready for halt\n"); +} + +/* this format is used by ARC embedded CPU for instruction memory */ +static inline u32 ARC_me_imm32(u32 d) +{ + return ((d & 0xffff0000) >> 16) | ((d & 0x0000ffff) << 16); +} + +/* defines access to interrupt vectors for wil_freeze_bl */ +#define ARC_IRQ_VECTOR_OFFSET(N) ((N) * 8) +/* ARC long jump instruction */ +#define ARC_JAL_INST (0x20200f80) + +static void wil_freeze_bl(struct wil6210_priv *wil) +{ + u32 jal, upc, saved; + u32 ivt3 = ARC_IRQ_VECTOR_OFFSET(3); + + jal = wil_r(wil, wil->iccm_base + ivt3); + if (jal != ARC_me_imm32(ARC_JAL_INST)) { + wil_dbg_misc(wil, "invalid IVT entry found, skipping\n"); + return; + } + + /* prevent the target from entering deep sleep + * and disabling memory access + */ + saved = wil_r(wil, RGF_USER_USAGE_8); + wil_w(wil, RGF_USER_USAGE_8, saved | BIT_USER_PREVENT_DEEP_SLEEP); + usleep_range(20, 25); /* let the BL process the bit */ + + /* redirect to endless loop in the INT_L1 context and let it trap */ + wil_w(wil, wil->iccm_base + ivt3 + 4, ARC_me_imm32(ivt3)); + usleep_range(20, 25); /* let the BL get into the trap */ + + /* verify the BL is frozen */ + upc = wil_r(wil, RGF_USER_CPU_PC); + if (upc < ivt3 || (upc > (ivt3 + 8))) + wil_dbg_misc(wil, "BL freeze failed, PC=0x%08X\n", upc); + + wil_w(wil, RGF_USER_USAGE_8, saved); +} + +static void wil_bl_prepare_halt(struct wil6210_priv *wil) +{ + u32 tmp, ver; + + /* before halting device CPU driver must make sure BL is not accessing + * host memory. This is done differently depending on BL version: + * 1. For very old BL versions the procedure is skipped + * (not supported). + * 2. For old BL version we use a special trick to freeze the BL + * 3. For new BL versions we shutdown the BL using handshake procedure. + */ + tmp = wil_r(wil, RGF_USER_BL + + offsetof(struct bl_dedicated_registers_v0, + boot_loader_struct_version)); + if (!tmp) { + wil_dbg_misc(wil, "old BL, skipping halt preperation\n"); + return; + } + + tmp = wil_r(wil, RGF_USER_BL + + offsetof(struct bl_dedicated_registers_v1, + bl_shutdown_handshake)); + ver = BL_SHUTDOWN_HS_PROT_VER(tmp); + + if (ver > 0) + wil_shutdown_bl(wil); + else + wil_freeze_bl(wil); +} + static inline void wil_halt_cpu(struct wil6210_priv *wil) { wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST); @@ -671,7 +763,7 @@ static void wil_set_oob_mode(struct wil6210_priv *wil, u8 mode) } } -static int wil_target_reset(struct wil6210_priv *wil) +static int wil_target_reset(struct wil6210_priv *wil, int no_flash) { int delay = 0; u32 x, x1 = 0; @@ -685,9 +777,16 @@ static int wil_target_reset(struct wil6210_priv *wil) wil_halt_cpu(wil); - /* clear all boot loader "ready" bits */ - wil_w(wil, RGF_USER_BL + - offsetof(struct bl_dedicated_registers_v0, boot_loader_ready), 0); + if (!no_flash) { + /* clear all boot loader "ready" bits */ + wil_w(wil, RGF_USER_BL + + offsetof(struct bl_dedicated_registers_v0, + boot_loader_ready), 0); + /* this should be safe to write even with old BLs */ + wil_w(wil, RGF_USER_BL + + offsetof(struct bl_dedicated_registers_v1, + bl_shutdown_handshake), 0); + } /* Clear Fw Download notification */ wil_c(wil, RGF_USER_USAGE_6, BIT(0)); @@ -728,21 +827,33 @@ static int wil_target_reset(struct wil6210_priv *wil) wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0); /* wait until device ready. typical time is 20..80 msec */ - do { - msleep(RST_DELAY); - x = wil_r(wil, RGF_USER_BL + - offsetof(struct bl_dedicated_registers_v0, - boot_loader_ready)); - if (x1 != x) { - wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n", x1, x); - x1 = x; - } - if (delay++ > RST_COUNT) { - wil_err(wil, "Reset not completed, bl.ready 0x%08x\n", - x); - return -ETIME; - } - } while (x != BL_READY); + if (no_flash) + do { + msleep(RST_DELAY); + x = wil_r(wil, USER_EXT_USER_PMU_3); + if (delay++ > RST_COUNT) { + wil_err(wil, "Reset not completed, PMU_3 0x%08x\n", + x); + return -ETIME; + } + } while ((x & BIT_PMU_DEVICE_RDY) == 0); + else + do { + msleep(RST_DELAY); + x = wil_r(wil, RGF_USER_BL + + offsetof(struct bl_dedicated_registers_v0, + boot_loader_ready)); + if (x1 != x) { + wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n", + x1, x); + x1 = x; + } + if (delay++ > RST_COUNT) { + wil_err(wil, "Reset not completed, bl.ready 0x%08x\n", + x); + return -ETIME; + } + } while (x != BL_READY); wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD); @@ -750,6 +861,21 @@ static int wil_target_reset(struct wil6210_priv *wil) wil_s(wil, RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN | BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC); + if (no_flash) { + /* Reset OTP HW vectors to fit 40MHz */ + wil_w(wil, RGF_USER_XPM_IFC_RD_TIME1, 0x60001); + wil_w(wil, RGF_USER_XPM_IFC_RD_TIME2, 0x20027); + wil_w(wil, RGF_USER_XPM_IFC_RD_TIME3, 0x1); + wil_w(wil, RGF_USER_XPM_IFC_RD_TIME4, 0x20027); + wil_w(wil, RGF_USER_XPM_IFC_RD_TIME5, 0x30003); + wil_w(wil, RGF_USER_XPM_IFC_RD_TIME6, 0x20002); + wil_w(wil, RGF_USER_XPM_IFC_RD_TIME7, 0x60001); + wil_w(wil, RGF_USER_XPM_IFC_RD_TIME8, 0x60001); + wil_w(wil, RGF_USER_XPM_IFC_RD_TIME9, 0x60001); + wil_w(wil, RGF_USER_XPM_IFC_RD_TIME10, 0x60001); + wil_w(wil, RGF_USER_XPM_RD_DOUT_SAMPLE_TIME, 0x57); + } + wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY); return 0; } @@ -760,6 +886,8 @@ static void wil_collect_fw_info(struct wil6210_priv *wil) u8 retry_short; int rc; + wil_refresh_fw_capabilities(wil); + rc = wmi_get_mgmt_retry(wil, &retry_short); if (!rc) { wiphy->retry_short = retry_short; @@ -767,6 +895,43 @@ static void wil_collect_fw_info(struct wil6210_priv *wil) } } +void wil_refresh_fw_capabilities(struct wil6210_priv *wil) +{ + struct wiphy *wiphy = wil_to_wiphy(wil); + int features; + + wil->keep_radio_on_during_sleep = + test_bit(WIL_PLATFORM_CAPA_RADIO_ON_IN_SUSPEND, + wil->platform_capa) && + test_bit(WMI_FW_CAPABILITY_D3_SUSPEND, wil->fw_capabilities); + + wil_info(wil, "keep_radio_on_during_sleep (%d)\n", + wil->keep_radio_on_during_sleep); + + if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities)) + wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; + else + wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; + + if (test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities)) { + wiphy->max_sched_scan_reqs = 1; + wiphy->max_sched_scan_ssids = WMI_MAX_PNO_SSID_NUM; + wiphy->max_match_sets = WMI_MAX_PNO_SSID_NUM; + wiphy->max_sched_scan_ie_len = WMI_MAX_IE_LEN; + wiphy->max_sched_scan_plans = WMI_MAX_PLANS_NUM; + } + + if (wil->platform_ops.set_features) { + features = (test_bit(WMI_FW_CAPABILITY_REF_CLOCK_CONTROL, + wil->fw_capabilities) && + test_bit(WIL_PLATFORM_CAPA_EXT_CLK, + wil->platform_capa)) ? + BIT(WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL) : 0; + + wil->platform_ops.set_features(wil->platform_handle, features); + } +} + void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r) { le32_to_cpus(&r->base); @@ -868,6 +1033,27 @@ static void wil_bl_crash_info(struct wil6210_priv *wil, bool is_err) } } +static int wil_get_otp_info(struct wil6210_priv *wil) +{ + struct net_device *ndev = wil_to_ndev(wil); + struct wiphy *wiphy = wil_to_wiphy(wil); + u8 mac[8]; + + wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(RGF_OTP_MAC), + sizeof(mac)); + if (!is_valid_ether_addr(mac)) { + wil_err(wil, "Invalid MAC %pM\n", mac); + return -EINVAL; + } + + ether_addr_copy(ndev->perm_addr, mac); + ether_addr_copy(wiphy->perm_addr, mac); + if (!is_valid_ether_addr(ndev->dev_addr)) + ether_addr_copy(ndev->dev_addr, mac); + + return 0; +} + static int wil_wait_for_fw_ready(struct wil6210_priv *wil) { ulong to = msecs_to_jiffies(1000); @@ -960,6 +1146,8 @@ static void wil_pre_fw_config(struct wil6210_priv *wil) int wil_reset(struct wil6210_priv *wil, bool load_fw) { int rc; + unsigned long status_flags = BIT(wil_status_resetting); + int no_flash; wil_dbg_misc(wil, "reset\n"); @@ -980,6 +1168,16 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) if (wil->hw_version == HW_VER_UNKNOWN) return -ENODEV; + if (test_bit(WIL_PLATFORM_CAPA_T_PWR_ON_0, wil->platform_capa)) { + wil_dbg_misc(wil, "Notify FW to set T_POWER_ON=0\n"); + wil_s(wil, RGF_USER_USAGE_8, BIT_USER_SUPPORT_T_POWER_ON_0); + } + + if (test_bit(WIL_PLATFORM_CAPA_EXT_CLK, wil->platform_capa)) { + wil_dbg_misc(wil, "Notify FW on ext clock configuration\n"); + wil_s(wil, RGF_USER_USAGE_8, BIT_USER_EXT_CLK); + } + if (wil->platform_ops.notify) { rc = wil->platform_ops.notify(wil->platform_handle, WIL_PLATFORM_EVT_PRE_RESET); @@ -989,6 +1187,14 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) } set_bit(wil_status_resetting, wil->status); + if (test_bit(wil_status_collecting_dumps, wil->status)) { + /* Device collects crash dump, cancel the reset. + * following crash dump collection, reset would take place. + */ + wil_dbg_misc(wil, "reject reset while collecting crash dump\n"); + rc = -EBUSY; + goto out; + } cancel_work_sync(&wil->disconnect_worker); wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false); @@ -1003,7 +1209,11 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) /* prevent NAPI from being scheduled and prevent wmi commands */ mutex_lock(&wil->wmi_mutex); - bitmap_zero(wil->status, wil_status_last); + if (test_bit(wil_status_suspending, wil->status)) + status_flags |= BIT(wil_status_suspending); + bitmap_and(wil->status, wil->status, &status_flags, + wil_status_last); + wil_dbg_misc(wil, "wil->status (0x%lx)\n", *wil->status); mutex_unlock(&wil->wmi_mutex); wil_mask_irq(wil); @@ -1013,37 +1223,53 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) flush_workqueue(wil->wq_service); flush_workqueue(wil->wmi_wq); - wil_bl_crash_info(wil, false); + no_flash = test_bit(hw_capa_no_flash, wil->hw_capa); + if (!no_flash) + wil_bl_crash_info(wil, false); wil_disable_irq(wil); - rc = wil_target_reset(wil); + rc = wil_target_reset(wil, no_flash); wil6210_clear_irq(wil); wil_enable_irq(wil); wil_rx_fini(wil); if (rc) { - wil_bl_crash_info(wil, true); - return rc; + if (!no_flash) + wil_bl_crash_info(wil, true); + goto out; } - rc = wil_get_bl_info(wil); - if (rc == -EAGAIN && !load_fw) /* ignore RF error if not going up */ - rc = 0; + if (no_flash) { + rc = wil_get_otp_info(wil); + } else { + rc = wil_get_bl_info(wil); + if (rc == -EAGAIN && !load_fw) + /* ignore RF error if not going up */ + rc = 0; + } if (rc) - return rc; + goto out; wil_set_oob_mode(wil, oob_mode); if (load_fw) { wil_info(wil, "Use firmware <%s> + board <%s>\n", wil->wil_fw_name, WIL_BOARD_FILE_NAME); + if (!no_flash) + wil_bl_prepare_halt(wil); + wil_halt_cpu(wil); memset(wil->fw_version, 0, sizeof(wil->fw_version)); /* Loading f/w from the file */ rc = wil_request_firmware(wil, wil->wil_fw_name, true); if (rc) - return rc; - rc = wil_request_firmware(wil, WIL_BOARD_FILE_NAME, true); + goto out; + if (wil->brd_file_addr) + rc = wil_request_board(wil, WIL_BOARD_FILE_NAME); + else + rc = wil_request_firmware(wil, + WIL_BOARD_FILE_NAME, + true); if (rc) - return rc; + goto out; wil_pre_fw_config(wil); wil_release_cpu(wil); @@ -1055,6 +1281,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) reinit_completion(&wil->wmi_call); reinit_completion(&wil->halp.comp); + clear_bit(wil_status_resetting, wil->status); + if (load_fw) { wil_configure_interrupt_moderation(wil); wil_unmask_irq(wil); @@ -1071,11 +1299,11 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) return rc; } + wil_collect_fw_info(wil); + if (wil->ps_profile != WMI_PS_PROFILE_TYPE_DEFAULT) wil_ps_update(wil, wil->ps_profile); - wil_collect_fw_info(wil); - if (wil->platform_ops.notify) { rc = wil->platform_ops.notify(wil->platform_handle, WIL_PLATFORM_EVT_FW_RDY); @@ -1088,6 +1316,10 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) } return rc; + +out: + clear_bit(wil_status_resetting, wil->status); + return rc; } void wil_fw_error_recovery(struct wil6210_priv *wil) @@ -1193,9 +1425,7 @@ int __wil_down(struct wil6210_priv *wil) wil_abort_scan(wil, false); mutex_unlock(&wil->p2p_wdev_mutex); - wil_reset(wil, false); - - return 0; + return wil_reset(wil, false); } int wil_down(struct wil6210_priv *wil) diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index 4a6ab2d0fdf1..7ba4e0af8f57 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -21,6 +21,7 @@ static int wil_open(struct net_device *ndev) { struct wil6210_priv *wil = ndev_to_wil(ndev); + int rc; wil_dbg_misc(wil, "open\n"); @@ -30,16 +31,29 @@ static int wil_open(struct net_device *ndev) return -EINVAL; } - return wil_up(wil); + rc = wil_pm_runtime_get(wil); + if (rc < 0) + return rc; + + rc = wil_up(wil); + if (rc) + wil_pm_runtime_put(wil); + + return rc; } static int wil_stop(struct net_device *ndev) { struct wil6210_priv *wil = ndev_to_wil(ndev); + int rc; wil_dbg_misc(wil, "stop\n"); - return wil_down(wil); + rc = wil_down(wil); + if (!rc) + wil_pm_runtime_put(wil); + + return rc; } static const struct net_device_ops wil_netdev_ops = { @@ -136,7 +150,7 @@ void *wil_if_alloc(struct device *dev) wdev->iftype = NL80211_IFTYPE_STATION; /* TODO */ /* default monitor channel */ ch = wdev->wiphy->bands[NL80211_BAND_60GHZ]->channels; - cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT); + cfg80211_chandef_create(&wil->monitor_chandef, ch, NL80211_CHAN_NO_HT); ndev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, wil_dev_setup); if (!ndev) { diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index 6a3ab4bf916d..809092a49192 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -21,6 +22,7 @@ #include <linux/suspend.h> #include "wil6210.h" #include <linux/rtnetlink.h> +#include <linux/pm_runtime.h> static bool use_msi = true; module_param(use_msi, bool, 0444); @@ -30,29 +32,30 @@ static bool ftm_mode; module_param(ftm_mode, bool, 0444); MODULE_PARM_DESC(ftm_mode, " Set factory test mode, default - false"); -#ifdef CONFIG_PM -#ifdef CONFIG_PM_SLEEP static int wil6210_pm_notify(struct notifier_block *notify_block, unsigned long mode, void *unused); -#endif /* CONFIG_PM_SLEEP */ -#endif /* CONFIG_PM */ static -void wil_set_capabilities(struct wil6210_priv *wil) +int wil_set_capabilities(struct wil6210_priv *wil) { const char *wil_fw_name; u32 jtag_id = wil_r(wil, RGF_USER_JTAG_DEV_ID); u8 chip_revision = (wil_r(wil, RGF_USER_REVISION_ID) & RGF_USER_REVISION_ID_MASK); + int platform_capa; + struct fw_map *iccm_section, *sct; - bitmap_zero(wil->hw_capabilities, hw_capability_last); + bitmap_zero(wil->hw_capa, hw_capa_last); bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX); + bitmap_zero(wil->platform_capa, WIL_PLATFORM_CAPA_MAX); wil->wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_DEFAULT : WIL_FW_NAME_DEFAULT; wil->chip_revision = chip_revision; switch (jtag_id) { case JTAG_DEV_ID_SPARROW: + memcpy(fw_mapping, sparrow_fw_mapping, + sizeof(sparrow_fw_mapping)); switch (chip_revision) { case REVISION_ID_SPARROW_D0: wil->hw_name = "Sparrow D0"; @@ -62,6 +65,12 @@ void wil_set_capabilities(struct wil6210_priv *wil) if (wil_fw_verify_file_exists(wil, wil_fw_name)) wil->wil_fw_name = wil_fw_name; + sct = wil_find_fw_mapping("mac_rgf_ext"); + if (!sct) { + wil_err(wil, "mac_rgf_ext section not found in fw_mapping\n"); + return -EINVAL; + } + memcpy(sct, &sparrow_d0_mac_rgf_ext, sizeof(*sct)); break; case REVISION_ID_SPARROW_B0: wil->hw_name = "Sparrow B0"; @@ -72,21 +81,50 @@ void wil_set_capabilities(struct wil6210_priv *wil) wil->hw_version = HW_VER_UNKNOWN; break; } + wil->rgf_fw_assert_code_addr = SPARROW_RGF_FW_ASSERT_CODE; + wil->rgf_ucode_assert_code_addr = SPARROW_RGF_UCODE_ASSERT_CODE; + break; + case JTAG_DEV_ID_TALYN: + wil->hw_name = "Talyn"; + wil->hw_version = HW_VER_TALYN; + memcpy(fw_mapping, talyn_fw_mapping, sizeof(talyn_fw_mapping)); + wil->rgf_fw_assert_code_addr = TALYN_RGF_FW_ASSERT_CODE; + wil->rgf_ucode_assert_code_addr = TALYN_RGF_UCODE_ASSERT_CODE; + if (wil_r(wil, RGF_USER_OTP_HW_RD_MACHINE_1) & + BIT_NO_FLASH_INDICATION) + set_bit(hw_capa_no_flash, wil->hw_capa); break; default: wil_err(wil, "Unknown board hardware, chip_id 0x%08x, chip_revision 0x%08x\n", jtag_id, chip_revision); wil->hw_name = "Unknown"; wil->hw_version = HW_VER_UNKNOWN; + return -EINVAL; + } + + iccm_section = wil_find_fw_mapping("fw_code"); + if (!iccm_section) { + wil_err(wil, "fw_code section not found in fw_mapping\n"); + return -EINVAL; } + wil->iccm_base = iccm_section->host; - wil_info(wil, "Board hardware is %s\n", wil->hw_name); + wil_info(wil, "Board hardware is %s, flash %sexist\n", wil->hw_name, + test_bit(hw_capa_no_flash, wil->hw_capa) ? "doesn't " : ""); + + /* Get platform capabilities */ + if (wil->platform_ops.get_capa) { + platform_capa = + wil->platform_ops.get_capa(wil->platform_handle); + memcpy(wil->platform_capa, &platform_capa, + min(sizeof(wil->platform_capa), sizeof(platform_capa))); + } /* extract FW capabilities from file without loading the FW */ wil_request_firmware(wil, wil->wil_fw_name, false); + wil_refresh_fw_capabilities(wil); - if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities)) - wil_to_wiphy(wil)->signal_type = CFG80211_SIGNAL_TYPE_MBM; + return 0; } void wil_disable_irq(struct wil6210_priv *wil) @@ -209,6 +247,8 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) .fw_recovery = wil_platform_rop_fw_recovery, }; u32 bar_size = pci_resource_len(pdev, 0); + int dma_addr_size[] = {48, 40, 32}; /* keep descending order */ + int i; /* check HW */ dev_info(&pdev->dev, WIL_NAME @@ -244,21 +284,23 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) } /* rollback to err_plat */ - /* device supports 48bit addresses */ - rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); - if (rc) { - dev_err(dev, "dma_set_mask_and_coherent(48) failed: %d\n", rc); - rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + /* device supports >32bit addresses */ + for (i = 0; i < ARRAY_SIZE(dma_addr_size); i++) { + rc = dma_set_mask_and_coherent(dev, + DMA_BIT_MASK(dma_addr_size[i])); if (rc) { - dev_err(dev, - "dma_set_mask_and_coherent(32) failed: %d\n", - rc); - goto err_plat; + dev_err(dev, "dma_set_mask_and_coherent(%d) failed: %d\n", + dma_addr_size[i], rc); + continue; } - } else { - wil->use_extended_dma_addr = 1; + dev_info(dev, "using dma mask %d", dma_addr_size[i]); + wil->dma_addr_size = dma_addr_size[i]; + break; } + if (wil->dma_addr_size == 0) + goto err_plat; + rc = pci_enable_device(pdev); if (rc && pdev->msi_enabled == 0) { wil_err(wil, @@ -293,18 +335,13 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* rollback to err_iounmap */ wil_info(wil, "CSR at %pR -> 0x%p\n", &pdev->resource[0], wil->csr); - wil_set_capabilities(wil); + rc = wil_set_capabilities(wil); + if (rc) { + wil_err(wil, "wil_set_capabilities failed, rc %d\n", rc); + goto err_iounmap; + } wil6210_clear_irq(wil); - wil->keep_radio_on_during_sleep = - wil->platform_ops.keep_radio_on_during_sleep && - wil->platform_ops.keep_radio_on_during_sleep( - wil->platform_handle) && - test_bit(WMI_FW_CAPABILITY_D3_SUSPEND, wil->fw_capabilities); - - wil_info(wil, "keep_radio_on_during_sleep (%d)\n", - wil->keep_radio_on_during_sleep); - /* FW should raise IRQ when ready */ rc = wil_if_pcie_enable(wil); if (rc) { @@ -319,20 +356,19 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto bus_disable; } -#ifdef CONFIG_PM -#ifdef CONFIG_PM_SLEEP - wil->pm_notify.notifier_call = wil6210_pm_notify; + if (IS_ENABLED(CONFIG_PM)) + wil->pm_notify.notifier_call = wil6210_pm_notify; + rc = register_pm_notifier(&wil->pm_notify); if (rc) /* Do not fail the driver initialization, as suspend can * be prevented in a later phase if needed */ wil_err(wil, "register_pm_notifier failed: %d\n", rc); -#endif /* CONFIG_PM_SLEEP */ -#endif /* CONFIG_PM */ wil6210_debugfs_init(wil); + wil_pm_runtime_allow(wil); return 0; @@ -359,11 +395,9 @@ static void wil_pcie_remove(struct pci_dev *pdev) wil_dbg_misc(wil, "pcie_remove\n"); -#ifdef CONFIG_PM -#ifdef CONFIG_PM_SLEEP unregister_pm_notifier(&wil->pm_notify); -#endif /* CONFIG_PM_SLEEP */ -#endif /* CONFIG_PM */ + + wil_pm_runtime_forbid(wil); wil6210_debugfs_remove(wil); rtnl_lock(); @@ -381,18 +415,19 @@ static void wil_pcie_remove(struct pci_dev *pdev) static const struct pci_device_id wil6210_pcie_ids[] = { { PCI_DEVICE(0x1ae9, 0x0310) }, { PCI_DEVICE(0x1ae9, 0x0302) }, /* same as above, firmware broken */ + { PCI_DEVICE(0x17cb, 0x1201) }, /* Talyn */ { /* end: all zeroes */ }, }; MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids); -#ifdef CONFIG_PM -#ifdef CONFIG_PM_SLEEP - static int wil6210_suspend(struct device *dev, bool is_runtime) { int rc = 0; struct pci_dev *pdev = to_pci_dev(dev); struct wil6210_priv *wil = pci_get_drvdata(pdev); + struct net_device *ndev = wil_to_ndev(wil); + bool keep_radio_on = ndev->flags & IFF_UP && + wil->keep_radio_on_during_sleep; wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system"); @@ -400,16 +435,18 @@ static int wil6210_suspend(struct device *dev, bool is_runtime) if (rc) goto out; - rc = wil_suspend(wil, is_runtime); + rc = wil_suspend(wil, is_runtime, keep_radio_on); if (!rc) { - wil->suspend_stats.successful_suspends++; - - /* If platform device supports keep_radio_on_during_sleep - * it will control PCIe master + /* In case radio stays on, platform device will control + * PCIe master */ - if (!wil->keep_radio_on_during_sleep) + if (!keep_radio_on) { /* disable bus mastering */ pci_clear_master(pdev); + wil->suspend_stats.r_off.successful_suspends++; + } else { + wil->suspend_stats.r_on.successful_suspends++; + } } out: return rc; @@ -420,23 +457,32 @@ static int wil6210_resume(struct device *dev, bool is_runtime) int rc = 0; struct pci_dev *pdev = to_pci_dev(dev); struct wil6210_priv *wil = pci_get_drvdata(pdev); + struct net_device *ndev = wil_to_ndev(wil); + bool keep_radio_on = ndev->flags & IFF_UP && + wil->keep_radio_on_during_sleep; wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system"); - /* If platform device supports keep_radio_on_during_sleep it will - * control PCIe master + /* In case radio stays on, platform device will control + * PCIe master */ - if (!wil->keep_radio_on_during_sleep) + if (!keep_radio_on) /* allow master */ pci_set_master(pdev); - rc = wil_resume(wil, is_runtime); + rc = wil_resume(wil, is_runtime, keep_radio_on); if (rc) { wil_err(wil, "device failed to resume (%d)\n", rc); - wil->suspend_stats.failed_resumes++; - if (!wil->keep_radio_on_during_sleep) + if (!keep_radio_on) { pci_clear_master(pdev); + wil->suspend_stats.r_off.failed_resumes++; + } else { + wil->suspend_stats.r_on.failed_resumes++; + } } else { - wil->suspend_stats.successful_resumes++; + if (keep_radio_on) + wil->suspend_stats.r_on.successful_resumes++; + else + wil->suspend_stats.r_off.successful_resumes++; } return rc; @@ -481,21 +527,49 @@ static int wil6210_pm_notify(struct notifier_block *notify_block, return rc; } -static int wil6210_pm_suspend(struct device *dev) +static int __maybe_unused wil6210_pm_suspend(struct device *dev) { return wil6210_suspend(dev, false); } -static int wil6210_pm_resume(struct device *dev) +static int __maybe_unused wil6210_pm_resume(struct device *dev) { return wil6210_resume(dev, false); } -#endif /* CONFIG_PM_SLEEP */ -#endif /* CONFIG_PM */ +static int __maybe_unused wil6210_pm_runtime_idle(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct wil6210_priv *wil = pci_get_drvdata(pdev); + + wil_dbg_pm(wil, "Runtime idle\n"); + + return wil_can_suspend(wil, true); +} + +static int __maybe_unused wil6210_pm_runtime_resume(struct device *dev) +{ + return wil6210_resume(dev, true); +} + +static int __maybe_unused wil6210_pm_runtime_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct wil6210_priv *wil = pci_get_drvdata(pdev); + + if (test_bit(wil_status_suspended, wil->status)) { + wil_dbg_pm(wil, "trying to suspend while suspended\n"); + return 1; + } + + return wil6210_suspend(dev, true); +} static const struct dev_pm_ops wil6210_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(wil6210_pm_suspend, wil6210_pm_resume) + SET_RUNTIME_PM_OPS(wil6210_pm_runtime_suspend, + wil6210_pm_runtime_resume, + wil6210_pm_runtime_idle) }; static struct pci_driver wil6210_driver = { diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c index 8f5d1b447aaa..0a96518a566f 100644 --- a/drivers/net/wireless/ath/wil6210/pm.c +++ b/drivers/net/wireless/ath/wil6210/pm.c @@ -16,15 +16,30 @@ #include "wil6210.h" #include <linux/jiffies.h> +#include <linux/pm_runtime.h> + +#define WIL6210_AUTOSUSPEND_DELAY_MS (1000) int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime) { int rc = 0; struct wireless_dev *wdev = wil->wdev; struct net_device *ndev = wil_to_ndev(wil); + bool wmi_only = test_bit(WMI_FW_CAPABILITY_WMI_ONLY, + wil->fw_capabilities); wil_dbg_pm(wil, "can_suspend: %s\n", is_runtime ? "runtime" : "system"); + if (wmi_only || debug_fw) { + wil_dbg_pm(wil, "Deny any suspend - %s mode\n", + wmi_only ? "wmi_only" : "debug_fw"); + rc = -EBUSY; + goto out; + } + if (is_runtime && !wil->platform_ops.suspend) { + rc = -EBUSY; + goto out; + } if (!(ndev->flags & IFF_UP)) { /* can always sleep when down */ wil_dbg_pm(wil, "Interface is down\n"); @@ -44,6 +59,10 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime) /* interface is running */ switch (wdev->iftype) { case NL80211_IFTYPE_MONITOR: + wil_dbg_pm(wil, "Sniffer\n"); + rc = -EBUSY; + goto out; + /* for STA-like interface, don't runtime suspend */ case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: if (test_bit(wil_status_fwconnecting, wil->status)) { @@ -51,6 +70,12 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime) rc = -EBUSY; goto out; } + /* Runtime pm not supported in case the interface is up */ + if (is_runtime) { + wil_dbg_pm(wil, "STA-like interface\n"); + rc = -EBUSY; + goto out; + } break; /* AP-like interface - can't suspend */ default: @@ -120,6 +145,13 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil) /* Prevent handling of new tx and wmi commands */ set_bit(wil_status_suspending, wil->status); + if (test_bit(wil_status_collecting_dumps, wil->status)) { + /* Device collects crash dump, cancel the suspend */ + wil_dbg_pm(wil, "reject suspend while collecting crash dump\n"); + clear_bit(wil_status_suspending, wil->status); + wil->suspend_stats.rejected_by_host++; + return -EBUSY; + } wil_update_net_queues_bh(wil, NULL, true); if (!wil_is_tx_idle(wil)) { @@ -158,7 +190,7 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil) break; wil_err(wil, "TO waiting for idle RX, suspend failed\n"); - wil->suspend_stats.failed_suspends++; + wil->suspend_stats.r_on.failed_suspends++; goto resume_after_fail; } wil_dbg_ratelimited(wil, "rx vring is not empty -> NAPI\n"); @@ -174,7 +206,7 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil) */ if (!wil_is_wmi_idle(wil)) { wil_err(wil, "suspend failed due to pending WMI events\n"); - wil->suspend_stats.failed_suspends++; + wil->suspend_stats.r_on.failed_suspends++; goto resume_after_fail; } @@ -188,7 +220,7 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil) if (rc) { wil_err(wil, "platform device failed to suspend (%d)\n", rc); - wil->suspend_stats.failed_suspends++; + wil->suspend_stats.r_on.failed_suspends++; wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD); wil_unmask_irq(wil); goto resume_after_fail; @@ -230,11 +262,21 @@ static int wil_suspend_radio_off(struct wil6210_priv *wil) wil_dbg_pm(wil, "suspend radio off\n"); + set_bit(wil_status_suspending, wil->status); + if (test_bit(wil_status_collecting_dumps, wil->status)) { + /* Device collects crash dump, cancel the suspend */ + wil_dbg_pm(wil, "reject suspend while collecting crash dump\n"); + clear_bit(wil_status_suspending, wil->status); + wil->suspend_stats.rejected_by_host++; + return -EBUSY; + } + /* if netif up, hardware is alive, shut it down */ if (ndev->flags & IFF_UP) { rc = wil_down(wil); if (rc) { wil_err(wil, "wil_down : %d\n", rc); + wil->suspend_stats.r_off.failed_suspends++; goto out; } } @@ -247,6 +289,7 @@ static int wil_suspend_radio_off(struct wil6210_priv *wil) rc = wil->platform_ops.suspend(wil->platform_handle, false); if (rc) { wil_enable_irq(wil); + wil->suspend_stats.r_off.failed_suspends++; goto out; } } @@ -254,6 +297,7 @@ static int wil_suspend_radio_off(struct wil6210_priv *wil) set_bit(wil_status_suspended, wil->status); out: + clear_bit(wil_status_suspending, wil->status); wil_dbg_pm(wil, "suspend radio off: %d\n", rc); return rc; @@ -279,12 +323,9 @@ static int wil_resume_radio_off(struct wil6210_priv *wil) return rc; } -int wil_suspend(struct wil6210_priv *wil, bool is_runtime) +int wil_suspend(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on) { int rc = 0; - struct net_device *ndev = wil_to_ndev(wil); - bool keep_radio_on = ndev->flags & IFF_UP && - wil->keep_radio_on_during_sleep; wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system"); @@ -301,19 +342,12 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime) wil_dbg_pm(wil, "suspend: %s => %d\n", is_runtime ? "runtime" : "system", rc); - if (!rc) - wil->suspend_stats.suspend_start_time = ktime_get(); - return rc; } -int wil_resume(struct wil6210_priv *wil, bool is_runtime) +int wil_resume(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on) { int rc = 0; - struct net_device *ndev = wil_to_ndev(wil); - bool keep_radio_on = ndev->flags & IFF_UP && - wil->keep_radio_on_during_sleep; - unsigned long long suspend_time_usec = 0; wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system"); @@ -331,20 +365,49 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime) else rc = wil_resume_radio_off(wil); - if (rc) - goto out; - - suspend_time_usec = - ktime_to_us(ktime_sub(ktime_get(), - wil->suspend_stats.suspend_start_time)); - wil->suspend_stats.total_suspend_time += suspend_time_usec; - if (suspend_time_usec < wil->suspend_stats.min_suspend_time) - wil->suspend_stats.min_suspend_time = suspend_time_usec; - if (suspend_time_usec > wil->suspend_stats.max_suspend_time) - wil->suspend_stats.max_suspend_time = suspend_time_usec; - out: - wil_dbg_pm(wil, "resume: %s => %d, suspend time %lld usec\n", - is_runtime ? "runtime" : "system", rc, suspend_time_usec); + wil_dbg_pm(wil, "resume: %s => %d\n", is_runtime ? "runtime" : "system", + rc); return rc; } + +void wil_pm_runtime_allow(struct wil6210_priv *wil) +{ + struct device *dev = wil_to_dev(wil); + + pm_runtime_put_noidle(dev); + pm_runtime_set_autosuspend_delay(dev, WIL6210_AUTOSUSPEND_DELAY_MS); + pm_runtime_use_autosuspend(dev); + pm_runtime_allow(dev); +} + +void wil_pm_runtime_forbid(struct wil6210_priv *wil) +{ + struct device *dev = wil_to_dev(wil); + + pm_runtime_forbid(dev); + pm_runtime_get_noresume(dev); +} + +int wil_pm_runtime_get(struct wil6210_priv *wil) +{ + int rc; + struct device *dev = wil_to_dev(wil); + + rc = pm_runtime_get_sync(dev); + if (rc < 0) { + wil_err(wil, "pm_runtime_get_sync() failed, rc = %d\n", rc); + pm_runtime_put_noidle(dev); + return rc; + } + + return 0; +} + +void wil_pm_runtime_put(struct wil6210_priv *wil) +{ + struct device *dev = wil_to_dev(wil); + + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); +} diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c index 2e301b6b32a9..4ea27b0bd278 100644 --- a/drivers/net/wireless/ath/wil6210/pmc.c +++ b/drivers/net/wireless/ath/wil6210/pmc.c @@ -111,14 +111,14 @@ void wil_pmc_alloc(struct wil6210_priv *wil, * * HW has limitation that all vrings addresses must share the same * upper 16 msb bits part of 48 bits address. To workaround that, - * if we are using 48 bit addresses switch to 32 bit allocation - * before allocating vring memory. + * if we are using more than 32 bit addresses switch to 32 bit + * allocation before allocating vring memory. * * There's no check for the return value of dma_set_mask_and_coherent, * since we assume if we were able to set the mask during * initialization in this system it will not fail if we set it again */ - if (wil->use_extended_dma_addr) + if (wil->dma_addr_size > 32) dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); pmc->pring_va = dma_alloc_coherent(dev, @@ -126,8 +126,9 @@ void wil_pmc_alloc(struct wil6210_priv *wil, &pmc->pring_pa, GFP_KERNEL); - if (wil->use_extended_dma_addr) - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); + if (wil->dma_addr_size > 32) + dma_set_mask_and_coherent(dev, + DMA_BIT_MASK(wil->dma_addr_size)); wil_dbg_misc(wil, "pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n", diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 389c718cd257..16b8a4e5201f 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -178,14 +178,14 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring) * * HW has limitation that all vrings addresses must share the same * upper 16 msb bits part of 48 bits address. To workaround that, - * if we are using 48 bit addresses switch to 32 bit allocation - * before allocating vring memory. + * if we are using more than 32 bit addresses switch to 32 bit + * allocation before allocating vring memory. * * There's no check for the return value of dma_set_mask_and_coherent, * since we assume if we were able to set the mask during * initialization in this system it will not fail if we set it again */ - if (wil->use_extended_dma_addr) + if (wil->dma_addr_size > 32) dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL); @@ -195,8 +195,9 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring) return -ENOMEM; } - if (wil->use_extended_dma_addr) - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); + if (wil->dma_addr_size > 32) + dma_set_mask_and_coherent(dev, + DMA_BIT_MASK(wil->dma_addr_size)); /* initially, all descriptors are SW owned * For Tx and Rx, ownership bit is at the same location, thus @@ -347,7 +348,6 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring, static void wil_rx_add_radiotap_header(struct wil6210_priv *wil, struct sk_buff *skb) { - struct wireless_dev *wdev = wil->wdev; struct wil6210_rtap { struct ieee80211_radiotap_header rthdr; /* fields should be in the order of bits in rthdr.it_present */ @@ -374,7 +374,7 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil, int rtap_len = sizeof(struct wil6210_rtap); int phy_length = 0; /* phy info header size, bytes */ static char phy_data[128]; - struct ieee80211_channel *ch = wdev->preset_chandef.chan; + struct ieee80211_channel *ch = wil->monitor_chandef.chan; if (rtap_include_phy_info) { rtap_len = sizeof(*rtap_vendor) + sizeof(*d); diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 1e340d04bd70..0df2aada6659 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -1,5 +1,6 @@ /* * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -82,18 +83,18 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1) */ #define WIL_MAX_MPDU_OVERHEAD (62) -struct wil_suspend_stats { +struct wil_suspend_count_stats { unsigned long successful_suspends; - unsigned long failed_suspends; unsigned long successful_resumes; + unsigned long failed_suspends; unsigned long failed_resumes; - unsigned long rejected_by_device; +}; + +struct wil_suspend_stats { + struct wil_suspend_count_stats r_off; + struct wil_suspend_count_stats r_on; + unsigned long rejected_by_device; /* only radio on */ unsigned long rejected_by_host; - unsigned long long total_suspend_time; - unsigned long long min_suspend_time; - unsigned long long max_suspend_time; - ktime_t collection_start; - ktime_t suspend_start_time; }; /* Calculate MAC buffer size for the firmware. It includes all overhead, @@ -161,10 +162,15 @@ struct RGF_ICR { #define RGF_USER_USAGE_6 (0x880018) #define BIT_USER_OOB_MODE BIT(31) #define BIT_USER_OOB_R2_MODE BIT(30) +#define RGF_USER_USAGE_8 (0x880020) + #define BIT_USER_PREVENT_DEEP_SLEEP BIT(0) + #define BIT_USER_SUPPORT_T_POWER_ON_0 BIT(1) + #define BIT_USER_EXT_CLK BIT(2) #define RGF_USER_HW_MACHINE_STATE (0x8801dc) #define HW_MACHINE_BOOT_DONE (0x3fffffd) #define RGF_USER_USER_CPU_0 (0x8801e0) #define BIT_USER_USER_CPU_MAN_RST BIT(1) /* user_cpu_man_rst */ +#define RGF_USER_CPU_PC (0x8801e8) #define RGF_USER_MAC_CPU_0 (0x8801fc) #define BIT_USER_MAC_CPU_MAN_RST BIT(1) /* mac_cpu_man_rst */ #define RGF_USER_USER_SCRATCH_PAD (0x8802bc) @@ -190,6 +196,19 @@ struct RGF_ICR { #define RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1 (0x880c2c) #define RGF_USER_SPARROW_M_4 (0x880c50) /* Sparrow */ #define BIT_SPARROW_M_4_SEL_SLEEP_OR_REF BIT(2) +#define RGF_USER_OTP_HW_RD_MACHINE_1 (0x880ce0) + #define BIT_NO_FLASH_INDICATION BIT(8) +#define RGF_USER_XPM_IFC_RD_TIME1 (0x880cec) +#define RGF_USER_XPM_IFC_RD_TIME2 (0x880cf0) +#define RGF_USER_XPM_IFC_RD_TIME3 (0x880cf4) +#define RGF_USER_XPM_IFC_RD_TIME4 (0x880cf8) +#define RGF_USER_XPM_IFC_RD_TIME5 (0x880cfc) +#define RGF_USER_XPM_IFC_RD_TIME6 (0x880d00) +#define RGF_USER_XPM_IFC_RD_TIME7 (0x880d04) +#define RGF_USER_XPM_IFC_RD_TIME8 (0x880d08) +#define RGF_USER_XPM_IFC_RD_TIME9 (0x880d0c) +#define RGF_USER_XPM_IFC_RD_TIME10 (0x880d10) +#define RGF_USER_XPM_RD_DOUT_SAMPLE_TIME (0x880d64) #define RGF_DMA_EP_TX_ICR (0x881bb4) /* struct RGF_ICR */ #define BIT_DMA_EP_TX_ICR_TX_DONE BIT(0) @@ -280,22 +299,33 @@ struct RGF_ICR { #define RGF_CAF_PLL_LOCK_STATUS (0x88afec) #define BIT_CAF_OSC_DIG_XTAL_STABLE BIT(0) +#define USER_EXT_USER_PMU_3 (0x88d00c) + #define BIT_PMU_DEVICE_RDY BIT(0) + #define RGF_USER_JTAG_DEV_ID (0x880b34) /* device ID */ #define JTAG_DEV_ID_SPARROW (0x2632072f) + #define JTAG_DEV_ID_TALYN (0x7e0e1) #define RGF_USER_REVISION_ID (0x88afe4) #define RGF_USER_REVISION_ID_MASK (3) #define REVISION_ID_SPARROW_B0 (0x0) #define REVISION_ID_SPARROW_D0 (0x3) +#define RGF_OTP_MAC (0x8a0620) + /* crash codes for FW/Ucode stored here */ -#define RGF_FW_ASSERT_CODE (0x91f020) -#define RGF_UCODE_ASSERT_CODE (0x91f028) + +/* ASSERT RGFs */ +#define SPARROW_RGF_FW_ASSERT_CODE (0x91f020) +#define SPARROW_RGF_UCODE_ASSERT_CODE (0x91f028) +#define TALYN_RGF_FW_ASSERT_CODE (0xa37020) +#define TALYN_RGF_UCODE_ASSERT_CODE (0xa37028) enum { HW_VER_UNKNOWN, HW_VER_SPARROW_B0, /* REVISION_ID_SPARROW_B0 */ HW_VER_SPARROW_D0, /* REVISION_ID_SPARROW_D0 */ + HW_VER_TALYN, /* JTAG_DEV_ID_TALYN */ }; /* popular locations */ @@ -311,6 +341,10 @@ enum { #define WIL_DATA_COMPLETION_TO_MS 200 /* Hardware definitions end */ +#define SPARROW_FW_MAPPING_TABLE_SIZE 10 +#define TALYN_FW_MAPPING_TABLE_SIZE 13 +#define MAX_FW_MAPPING_TABLE_SIZE 13 + struct fw_map { u32 from; /* linker address - from, inclusive */ u32 to; /* linker address - to, exclusive */ @@ -320,7 +354,10 @@ struct fw_map { }; /* array size should be in sync with actual definition in the wmi.c */ -extern const struct fw_map fw_mapping[10]; +extern const struct fw_map sparrow_fw_mapping[SPARROW_FW_MAPPING_TABLE_SIZE]; +extern const struct fw_map sparrow_d0_mac_rgf_ext; +extern const struct fw_map talyn_fw_mapping[TALYN_FW_MAPPING_TABLE_SIZE]; +extern struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE]; /** * mk_cidxtid - construct @cidxtid field @@ -435,12 +472,13 @@ enum { /* for wil6210_priv.status */ wil_status_fwconnected, wil_status_dontscan, wil_status_mbox_ready, /* MBOX structures ready */ - wil_status_irqen, /* FIXME: interrupts enabled - for debug */ + wil_status_irqen, /* interrupts enabled - for debug */ wil_status_napi_en, /* NAPI enabled protected by wil->mutex */ wil_status_resetting, /* reset in progress */ wil_status_suspending, /* suspend in progress */ wil_status_suspended, /* suspend completed, device is suspended */ wil_status_resuming, /* resume in progress */ + wil_status_collecting_dumps, /* crashdump collection in progress */ wil_status_last /* keep last */ }; @@ -565,7 +603,8 @@ enum { }; enum { - hw_capability_last + hw_capa_no_flash, + hw_capa_last }; struct wil_probe_client_req { @@ -616,6 +655,16 @@ struct blink_on_off_time { u32 off_ms; }; +struct wil_debugfs_iomem_data { + void *offset; + struct wil6210_priv *wil; +}; + +struct wil_debugfs_data { + struct wil_debugfs_iomem_data *data_arr; + int iomem_data_count; +}; + extern struct blink_on_off_time led_blink_time[WIL_LED_TIME_LAST]; extern u8 led_id; extern u8 led_polarity; @@ -631,14 +680,19 @@ struct wil6210_priv { u8 chip_revision; const char *hw_name; const char *wil_fw_name; - DECLARE_BITMAP(hw_capabilities, hw_capability_last); + char *board_file; + u32 brd_file_addr; + u32 brd_file_max_size; + DECLARE_BITMAP(hw_capa, hw_capa_last); DECLARE_BITMAP(fw_capabilities, WMI_FW_CAPABILITY_MAX); + DECLARE_BITMAP(platform_capa, WIL_PLATFORM_CAPA_MAX); u8 n_mids; /* number of additional MIDs as reported by FW */ u32 recovery_count; /* num of FW recovery attempts in a short time */ u32 recovery_state; /* FW recovery state machine */ unsigned long last_fw_recovery; /* jiffies of last fw recovery */ wait_queue_head_t wq; /* for all wait_event() use */ /* profile */ + struct cfg80211_chan_def monitor_chandef; u32 monitor_flags; u32 privacy; /* secure connection? */ u8 hidden_ssid; /* relevant in AP mode */ @@ -694,7 +748,7 @@ struct wil6210_priv { struct wil_sta_info sta[WIL6210_MAX_CID]; int bcast_vring; u32 vring_idle_trsh; /* HW fetches up to 16 descriptors at once */ - bool use_extended_dma_addr; /* indicates whether we are using 48 bits */ + u32 dma_addr_size; /* indicates dma addr size */ /* scan */ struct cfg80211_scan_request *scan_request; @@ -703,11 +757,12 @@ struct wil6210_priv { atomic_t isr_count_rx, isr_count_tx; /* debugfs */ struct dentry *debug; - struct wil_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)]; + struct wil_blob_wrapper blobs[MAX_FW_MAPPING_TABLE_SIZE]; u8 discovery_mode; u8 abft_len; u8 wakeup_trigger; struct wil_suspend_stats suspend_stats; + struct wil_debugfs_data dbg_data; void *platform_handle; struct wil_platform_ops platform_ops; @@ -731,16 +786,16 @@ struct wil6210_priv { int fw_calib_result; -#ifdef CONFIG_PM -#ifdef CONFIG_PM_SLEEP struct notifier_block pm_notify; -#endif /* CONFIG_PM_SLEEP */ -#endif /* CONFIG_PM */ bool suspend_resp_rcvd; bool suspend_resp_comp; u32 bus_request_kbps; u32 bus_request_kbps_pre_suspend; + + u32 rgf_fw_assert_code_addr; + u32 rgf_ucode_assert_code_addr; + u32 iccm_base; }; #define wil_to_wiphy(i) (i->wdev->wiphy) @@ -861,10 +916,13 @@ int wil_up(struct wil6210_priv *wil); int __wil_up(struct wil6210_priv *wil); int wil_down(struct wil6210_priv *wil); int __wil_down(struct wil6210_priv *wil); +void wil_refresh_fw_capabilities(struct wil6210_priv *wil); void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r); int wil_find_cid(struct wil6210_priv *wil, const u8 *mac); void wil_set_ethtoolops(struct net_device *ndev); +struct fw_map *wil_find_fw_mapping(const char *section); +void __iomem *wmi_buffer_block(struct wil6210_priv *wil, __le32 ptr, u32 size); void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr); void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr); int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr, @@ -997,11 +1055,17 @@ int wil_iftype_nl2wmi(enum nl80211_iftype type); int wil_request_firmware(struct wil6210_priv *wil, const char *name, bool load); +int wil_request_board(struct wil6210_priv *wil, const char *name); bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name); +void wil_pm_runtime_allow(struct wil6210_priv *wil); +void wil_pm_runtime_forbid(struct wil6210_priv *wil); +int wil_pm_runtime_get(struct wil6210_priv *wil); +void wil_pm_runtime_put(struct wil6210_priv *wil); + int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime); -int wil_suspend(struct wil6210_priv *wil, bool is_runtime); -int wil_resume(struct wil6210_priv *wil, bool is_runtime); +int wil_suspend(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on); +int wil_resume(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on); bool wil_is_wmi_idle(struct wil6210_priv *wil); int wmi_resume(struct wil6210_priv *wil); int wmi_suspend(struct wil6210_priv *wil); @@ -1016,4 +1080,8 @@ void wil_halp_unvote(struct wil6210_priv *wil); void wil6210_set_halp(struct wil6210_priv *wil); void wil6210_clear_halp(struct wil6210_priv *wil); +int wmi_start_sched_scan(struct wil6210_priv *wil, + struct cfg80211_sched_scan_request *request); +int wmi_stop_sched_scan(struct wil6210_priv *wil); + #endif /* __WIL6210_H__ */ diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c index e53cf0cf7031..1ed330674d9b 100644 --- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c +++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c @@ -72,6 +72,15 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size) return -EINVAL; } + set_bit(wil_status_collecting_dumps, wil->status); + if (test_bit(wil_status_suspending, wil->status) || + test_bit(wil_status_suspended, wil->status) || + test_bit(wil_status_resetting, wil->status)) { + wil_err(wil, "cannot collect fw dump during suspend/reset\n"); + clear_bit(wil_status_collecting_dumps, wil->status); + return -EINVAL; + } + /* copy to crash dump area */ for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) { map = &fw_mapping[i]; @@ -91,6 +100,8 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size) (const void __iomem * __force)data, len); } + clear_bit(wil_status_collecting_dumps, wil->status); + return 0; } diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h index 5d9e4bfcb045..177026e5323b 100644 --- a/drivers/net/wireless/ath/wil6210/wil_platform.h +++ b/drivers/net/wireless/ath/wil6210/wil_platform.h @@ -27,6 +27,18 @@ enum wil_platform_event { WIL_PLATFORM_EVT_POST_SUSPEND = 4, }; +enum wil_platform_features { + WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL = 0, + WIL_PLATFORM_FEATURE_MAX, +}; + +enum wil_platform_capa { + WIL_PLATFORM_CAPA_RADIO_ON_IN_SUSPEND = 0, + WIL_PLATFORM_CAPA_T_PWR_ON_0 = 1, + WIL_PLATFORM_CAPA_EXT_CLK = 2, + WIL_PLATFORM_CAPA_MAX, +}; + /** * struct wil_platform_ops - wil platform module calls from this * driver to platform driver @@ -37,7 +49,8 @@ struct wil_platform_ops { int (*resume)(void *handle, bool device_powered_on); void (*uninit)(void *handle); int (*notify)(void *handle, enum wil_platform_event evt); - bool (*keep_radio_on_during_sleep)(void *handle); + int (*get_capa)(void *handle); + void (*set_features)(void *handle, int features); }; /** diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index ffdd2fa401b1..b31e2514f8c2 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -38,6 +39,7 @@ MODULE_PARM_DESC(led_id, " 60G device led enablement. Set the led ID (0-2) to enable"); #define WIL_WAIT_FOR_SUSPEND_RESUME_COMP 200 +#define WIL_WMI_CALL_GENERAL_TO_MS 100 /** * WMI event receiving - theory of operations @@ -69,23 +71,23 @@ MODULE_PARM_DESC(led_id, * On the PCI bus, there is one BAR (BAR0) of 2Mb size, exposing * AHB addresses starting from 0x880000 * - * Internally, firmware uses addresses that allows faster access but + * Internally, firmware uses addresses that allow faster access but * are invisible from the host. To read from these addresses, alternative * AHB address must be used. - * - * Memory mapping - * Linker address PCI/Host address - * 0x880000 .. 0xa80000 2Mb BAR0 - * 0x800000 .. 0x807000 0x900000 .. 0x907000 28k DCCM - * 0x840000 .. 0x857000 0x908000 .. 0x91f000 92k PERIPH */ /** - * @fw_mapping provides memory remapping table + * @sparrow_fw_mapping provides memory remapping table for sparrow * * array size should be in sync with the declaration in the wil6210.h + * + * Sparrow memory mapping: + * Linker address PCI/Host address + * 0x880000 .. 0xa80000 2Mb BAR0 + * 0x800000 .. 0x808000 0x900000 .. 0x908000 32k DCCM + * 0x840000 .. 0x860000 0x908000 .. 0x928000 128k PERIPH */ -const struct fw_map fw_mapping[] = { +const struct fw_map sparrow_fw_mapping[] = { /* FW code RAM 256k */ {0x000000, 0x040000, 0x8c0000, "fw_code", true}, /* FW data RAM 32k */ @@ -111,6 +113,59 @@ const struct fw_map fw_mapping[] = { {0x800000, 0x804000, 0x940000, "uc_data", false}, }; +/** + * @sparrow_d0_mac_rgf_ext - mac_rgf_ext section for Sparrow D0 + * it is a bit larger to support extra features + */ +const struct fw_map sparrow_d0_mac_rgf_ext = { + 0x88c000, 0x88c500, 0x88c000, "mac_rgf_ext", true +}; + +/** + * @talyn_fw_mapping provides memory remapping table for Talyn + * + * array size should be in sync with the declaration in the wil6210.h + * + * Talyn memory mapping: + * Linker address PCI/Host address + * 0x880000 .. 0xc80000 4Mb BAR0 + * 0x800000 .. 0x820000 0xa00000 .. 0xa20000 128k DCCM + * 0x840000 .. 0x858000 0xa20000 .. 0xa38000 96k PERIPH + */ +const struct fw_map talyn_fw_mapping[] = { + /* FW code RAM 1M */ + {0x000000, 0x100000, 0x900000, "fw_code", true}, + /* FW data RAM 128k */ + {0x800000, 0x820000, 0xa00000, "fw_data", true}, + /* periph. data RAM 96k */ + {0x840000, 0x858000, 0xa20000, "fw_peri", true}, + /* various RGF 40k */ + {0x880000, 0x88a000, 0x880000, "rgf", true}, + /* AGC table 4k */ + {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true}, + /* Pcie_ext_rgf 4k */ + {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true}, + /* mac_ext_rgf 1344b */ + {0x88c000, 0x88c540, 0x88c000, "mac_rgf_ext", true}, + /* ext USER RGF 4k */ + {0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true}, + /* OTP 4k */ + {0x8a0000, 0x8a1000, 0x8a0000, "otp", true}, + /* DMA EXT RGF 64k */ + {0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true}, + /* upper area 1536k */ + {0x900000, 0xa80000, 0x900000, "upper", true}, + /* UCODE areas - accessible by debugfs blobs but not by + * wmi_addr_remap. UCODE areas MUST be added AFTER FW areas! + */ + /* ucode code RAM 256k */ + {0x000000, 0x040000, 0xa38000, "uc_code", false}, + /* ucode data RAM 32k */ + {0x800000, 0x808000, 0xa78000, "uc_data", false}, +}; + +struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE]; + struct blink_on_off_time led_blink_time[] = { {WIL_LED_BLINK_ON_SLOW_MS, WIL_LED_BLINK_OFF_SLOW_MS}, {WIL_LED_BLINK_ON_MED_MS, WIL_LED_BLINK_OFF_MED_MS}, @@ -138,15 +193,35 @@ static u32 wmi_addr_remap(u32 x) } /** + * find fw_mapping entry by section name + * @section - section name + * + * Return pointer to section or NULL if not found + */ +struct fw_map *wil_find_fw_mapping(const char *section) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) + if (fw_mapping[i].name && + !strcmp(section, fw_mapping[i].name)) + return &fw_mapping[i]; + + return NULL; +} + +/** * Check address validity for WMI buffer; remap if needed * @ptr - internal (linker) fw/ucode address + * @size - if non zero, validate the block does not + * exceed the device memory (bar) * * Valid buffer should be DWORD aligned * * return address for accessing buffer from the host; * if buffer is not valid, return NULL. */ -void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_) +void __iomem *wmi_buffer_block(struct wil6210_priv *wil, __le32 ptr_, u32 size) { u32 off; u32 ptr = le32_to_cpu(ptr_); @@ -161,10 +236,17 @@ void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_) off = HOSTADDR(ptr); if (off > wil->bar_size - 4) return NULL; + if (size && ((off + size > wil->bar_size) || (off + size < off))) + return NULL; return wil->csr + off; } +void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_) +{ + return wmi_buffer_block(wil, ptr_, 0); +} + /** * Check address validity */ @@ -198,6 +280,242 @@ int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr, return 0; } +static const char *cmdid2name(u16 cmdid) +{ + switch (cmdid) { + case WMI_NOTIFY_REQ_CMDID: + return "WMI_NOTIFY_REQ_CMD"; + case WMI_START_SCAN_CMDID: + return "WMI_START_SCAN_CMD"; + case WMI_CONNECT_CMDID: + return "WMI_CONNECT_CMD"; + case WMI_DISCONNECT_CMDID: + return "WMI_DISCONNECT_CMD"; + case WMI_SW_TX_REQ_CMDID: + return "WMI_SW_TX_REQ_CMD"; + case WMI_GET_RF_SECTOR_PARAMS_CMDID: + return "WMI_GET_RF_SECTOR_PARAMS_CMD"; + case WMI_SET_RF_SECTOR_PARAMS_CMDID: + return "WMI_SET_RF_SECTOR_PARAMS_CMD"; + case WMI_GET_SELECTED_RF_SECTOR_INDEX_CMDID: + return "WMI_GET_SELECTED_RF_SECTOR_INDEX_CMD"; + case WMI_SET_SELECTED_RF_SECTOR_INDEX_CMDID: + return "WMI_SET_SELECTED_RF_SECTOR_INDEX_CMD"; + case WMI_BRP_SET_ANT_LIMIT_CMDID: + return "WMI_BRP_SET_ANT_LIMIT_CMD"; + case WMI_TOF_SESSION_START_CMDID: + return "WMI_TOF_SESSION_START_CMD"; + case WMI_AOA_MEAS_CMDID: + return "WMI_AOA_MEAS_CMD"; + case WMI_PMC_CMDID: + return "WMI_PMC_CMD"; + case WMI_TOF_GET_TX_RX_OFFSET_CMDID: + return "WMI_TOF_GET_TX_RX_OFFSET_CMD"; + case WMI_TOF_SET_TX_RX_OFFSET_CMDID: + return "WMI_TOF_SET_TX_RX_OFFSET_CMD"; + case WMI_VRING_CFG_CMDID: + return "WMI_VRING_CFG_CMD"; + case WMI_BCAST_VRING_CFG_CMDID: + return "WMI_BCAST_VRING_CFG_CMD"; + case WMI_TRAFFIC_SUSPEND_CMDID: + return "WMI_TRAFFIC_SUSPEND_CMD"; + case WMI_TRAFFIC_RESUME_CMDID: + return "WMI_TRAFFIC_RESUME_CMD"; + case WMI_ECHO_CMDID: + return "WMI_ECHO_CMD"; + case WMI_SET_MAC_ADDRESS_CMDID: + return "WMI_SET_MAC_ADDRESS_CMD"; + case WMI_LED_CFG_CMDID: + return "WMI_LED_CFG_CMD"; + case WMI_PCP_START_CMDID: + return "WMI_PCP_START_CMD"; + case WMI_PCP_STOP_CMDID: + return "WMI_PCP_STOP_CMD"; + case WMI_SET_SSID_CMDID: + return "WMI_SET_SSID_CMD"; + case WMI_GET_SSID_CMDID: + return "WMI_GET_SSID_CMD"; + case WMI_SET_PCP_CHANNEL_CMDID: + return "WMI_SET_PCP_CHANNEL_CMD"; + case WMI_GET_PCP_CHANNEL_CMDID: + return "WMI_GET_PCP_CHANNEL_CMD"; + case WMI_P2P_CFG_CMDID: + return "WMI_P2P_CFG_CMD"; + case WMI_START_LISTEN_CMDID: + return "WMI_START_LISTEN_CMD"; + case WMI_START_SEARCH_CMDID: + return "WMI_START_SEARCH_CMD"; + case WMI_DISCOVERY_STOP_CMDID: + return "WMI_DISCOVERY_STOP_CMD"; + case WMI_DELETE_CIPHER_KEY_CMDID: + return "WMI_DELETE_CIPHER_KEY_CMD"; + case WMI_ADD_CIPHER_KEY_CMDID: + return "WMI_ADD_CIPHER_KEY_CMD"; + case WMI_SET_APPIE_CMDID: + return "WMI_SET_APPIE_CMD"; + case WMI_CFG_RX_CHAIN_CMDID: + return "WMI_CFG_RX_CHAIN_CMD"; + case WMI_TEMP_SENSE_CMDID: + return "WMI_TEMP_SENSE_CMD"; + case WMI_DEL_STA_CMDID: + return "WMI_DEL_STA_CMD"; + case WMI_DISCONNECT_STA_CMDID: + return "WMI_DISCONNECT_STA_CMD"; + case WMI_VRING_BA_EN_CMDID: + return "WMI_VRING_BA_EN_CMD"; + case WMI_VRING_BA_DIS_CMDID: + return "WMI_VRING_BA_DIS_CMD"; + case WMI_RCP_DELBA_CMDID: + return "WMI_RCP_DELBA_CMD"; + case WMI_RCP_ADDBA_RESP_CMDID: + return "WMI_RCP_ADDBA_RESP_CMD"; + case WMI_PS_DEV_PROFILE_CFG_CMDID: + return "WMI_PS_DEV_PROFILE_CFG_CMD"; + case WMI_SET_MGMT_RETRY_LIMIT_CMDID: + return "WMI_SET_MGMT_RETRY_LIMIT_CMD"; + case WMI_GET_MGMT_RETRY_LIMIT_CMDID: + return "WMI_GET_MGMT_RETRY_LIMIT_CMD"; + case WMI_ABORT_SCAN_CMDID: + return "WMI_ABORT_SCAN_CMD"; + case WMI_NEW_STA_CMDID: + return "WMI_NEW_STA_CMD"; + case WMI_SET_THERMAL_THROTTLING_CFG_CMDID: + return "WMI_SET_THERMAL_THROTTLING_CFG_CMD"; + case WMI_GET_THERMAL_THROTTLING_CFG_CMDID: + return "WMI_GET_THERMAL_THROTTLING_CFG_CMD"; + case WMI_LINK_MAINTAIN_CFG_WRITE_CMDID: + return "WMI_LINK_MAINTAIN_CFG_WRITE_CMD"; + case WMI_LO_POWER_CALIB_FROM_OTP_CMDID: + return "WMI_LO_POWER_CALIB_FROM_OTP_CMD"; + case WMI_START_SCHED_SCAN_CMDID: + return "WMI_START_SCHED_SCAN_CMD"; + case WMI_STOP_SCHED_SCAN_CMDID: + return "WMI_STOP_SCHED_SCAN_CMD"; + default: + return "Untracked CMD"; + } +} + +static const char *eventid2name(u16 eventid) +{ + switch (eventid) { + case WMI_NOTIFY_REQ_DONE_EVENTID: + return "WMI_NOTIFY_REQ_DONE_EVENT"; + case WMI_DISCONNECT_EVENTID: + return "WMI_DISCONNECT_EVENT"; + case WMI_SW_TX_COMPLETE_EVENTID: + return "WMI_SW_TX_COMPLETE_EVENT"; + case WMI_GET_RF_SECTOR_PARAMS_DONE_EVENTID: + return "WMI_GET_RF_SECTOR_PARAMS_DONE_EVENT"; + case WMI_SET_RF_SECTOR_PARAMS_DONE_EVENTID: + return "WMI_SET_RF_SECTOR_PARAMS_DONE_EVENT"; + case WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID: + return "WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENT"; + case WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID: + return "WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENT"; + case WMI_BRP_SET_ANT_LIMIT_EVENTID: + return "WMI_BRP_SET_ANT_LIMIT_EVENT"; + case WMI_FW_READY_EVENTID: + return "WMI_FW_READY_EVENT"; + case WMI_TRAFFIC_RESUME_EVENTID: + return "WMI_TRAFFIC_RESUME_EVENT"; + case WMI_TOF_GET_TX_RX_OFFSET_EVENTID: + return "WMI_TOF_GET_TX_RX_OFFSET_EVENT"; + case WMI_TOF_SET_TX_RX_OFFSET_EVENTID: + return "WMI_TOF_SET_TX_RX_OFFSET_EVENT"; + case WMI_VRING_CFG_DONE_EVENTID: + return "WMI_VRING_CFG_DONE_EVENT"; + case WMI_READY_EVENTID: + return "WMI_READY_EVENT"; + case WMI_RX_MGMT_PACKET_EVENTID: + return "WMI_RX_MGMT_PACKET_EVENT"; + case WMI_TX_MGMT_PACKET_EVENTID: + return "WMI_TX_MGMT_PACKET_EVENT"; + case WMI_SCAN_COMPLETE_EVENTID: + return "WMI_SCAN_COMPLETE_EVENT"; + case WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID: + return "WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENT"; + case WMI_CONNECT_EVENTID: + return "WMI_CONNECT_EVENT"; + case WMI_EAPOL_RX_EVENTID: + return "WMI_EAPOL_RX_EVENT"; + case WMI_BA_STATUS_EVENTID: + return "WMI_BA_STATUS_EVENT"; + case WMI_RCP_ADDBA_REQ_EVENTID: + return "WMI_RCP_ADDBA_REQ_EVENT"; + case WMI_DELBA_EVENTID: + return "WMI_DELBA_EVENT"; + case WMI_VRING_EN_EVENTID: + return "WMI_VRING_EN_EVENT"; + case WMI_DATA_PORT_OPEN_EVENTID: + return "WMI_DATA_PORT_OPEN_EVENT"; + case WMI_AOA_MEAS_EVENTID: + return "WMI_AOA_MEAS_EVENT"; + case WMI_TOF_SESSION_END_EVENTID: + return "WMI_TOF_SESSION_END_EVENT"; + case WMI_TOF_GET_CAPABILITIES_EVENTID: + return "WMI_TOF_GET_CAPABILITIES_EVENT"; + case WMI_TOF_SET_LCR_EVENTID: + return "WMI_TOF_SET_LCR_EVENT"; + case WMI_TOF_SET_LCI_EVENTID: + return "WMI_TOF_SET_LCI_EVENT"; + case WMI_TOF_FTM_PER_DEST_RES_EVENTID: + return "WMI_TOF_FTM_PER_DEST_RES_EVENT"; + case WMI_TOF_CHANNEL_INFO_EVENTID: + return "WMI_TOF_CHANNEL_INFO_EVENT"; + case WMI_TRAFFIC_SUSPEND_EVENTID: + return "WMI_TRAFFIC_SUSPEND_EVENT"; + case WMI_ECHO_RSP_EVENTID: + return "WMI_ECHO_RSP_EVENT"; + case WMI_LED_CFG_DONE_EVENTID: + return "WMI_LED_CFG_DONE_EVENT"; + case WMI_PCP_STARTED_EVENTID: + return "WMI_PCP_STARTED_EVENT"; + case WMI_PCP_STOPPED_EVENTID: + return "WMI_PCP_STOPPED_EVENT"; + case WMI_GET_SSID_EVENTID: + return "WMI_GET_SSID_EVENT"; + case WMI_GET_PCP_CHANNEL_EVENTID: + return "WMI_GET_PCP_CHANNEL_EVENT"; + case WMI_P2P_CFG_DONE_EVENTID: + return "WMI_P2P_CFG_DONE_EVENT"; + case WMI_LISTEN_STARTED_EVENTID: + return "WMI_LISTEN_STARTED_EVENT"; + case WMI_SEARCH_STARTED_EVENTID: + return "WMI_SEARCH_STARTED_EVENT"; + case WMI_DISCOVERY_STOPPED_EVENTID: + return "WMI_DISCOVERY_STOPPED_EVENT"; + case WMI_CFG_RX_CHAIN_DONE_EVENTID: + return "WMI_CFG_RX_CHAIN_DONE_EVENT"; + case WMI_TEMP_SENSE_DONE_EVENTID: + return "WMI_TEMP_SENSE_DONE_EVENT"; + case WMI_RCP_ADDBA_RESP_SENT_EVENTID: + return "WMI_RCP_ADDBA_RESP_SENT_EVENT"; + case WMI_PS_DEV_PROFILE_CFG_EVENTID: + return "WMI_PS_DEV_PROFILE_CFG_EVENT"; + case WMI_SET_MGMT_RETRY_LIMIT_EVENTID: + return "WMI_SET_MGMT_RETRY_LIMIT_EVENT"; + case WMI_GET_MGMT_RETRY_LIMIT_EVENTID: + return "WMI_GET_MGMT_RETRY_LIMIT_EVENT"; + case WMI_SET_THERMAL_THROTTLING_CFG_EVENTID: + return "WMI_SET_THERMAL_THROTTLING_CFG_EVENT"; + case WMI_GET_THERMAL_THROTTLING_CFG_EVENTID: + return "WMI_GET_THERMAL_THROTTLING_CFG_EVENT"; + case WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID: + return "WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENT"; + case WMI_LO_POWER_CALIB_FROM_OTP_EVENTID: + return "WMI_LO_POWER_CALIB_FROM_OTP_EVENT"; + case WMI_START_SCHED_SCAN_EVENTID: + return "WMI_START_SCHED_SCAN_EVENT"; + case WMI_STOP_SCHED_SCAN_EVENTID: + return "WMI_STOP_SCHED_SCAN_EVENT"; + case WMI_SCHED_SCAN_RESULT_EVENTID: + return "WMI_SCHED_SCAN_RESULT_EVENT"; + default: + return "Untracked EVENT"; + } +} + static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) { struct { @@ -222,7 +540,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) uint retry; int rc = 0; - if (sizeof(cmd) + len > r->entry_size) { + if (len > r->entry_size - sizeof(cmd)) { wil_err(wil, "WMI size too large: %d bytes, max is %d\n", (int)(sizeof(cmd) + len), r->entry_size); return -ERANGE; @@ -294,7 +612,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) } cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq); /* set command */ - wil_dbg_wmi(wil, "WMI command 0x%04x [%d]\n", cmdid, len); + wil_dbg_wmi(wil, "sending %s (0x%04x) [%d]\n", + cmdid2name(cmdid), cmdid, len); wil_hex_dump_wmi("Cmd ", DUMP_PREFIX_OFFSET, 16, 1, &cmd, sizeof(cmd), true); wil_hex_dump_wmi("cmd ", DUMP_PREFIX_OFFSET, 16, 1, buf, @@ -566,8 +885,6 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) } } - /* FIXME FW can transmit only ucast frames to peer */ - /* FIXME real ring_id instead of hard coded 0 */ ether_addr_copy(wil->sta[evt->cid].addr, evt->bssid); wil->sta[evt->cid].status = wil_sta_conn_pending; @@ -830,6 +1147,75 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) spin_unlock_bh(&sta->tid_rx_lock); } +static void +wmi_evt_sched_scan_result(struct wil6210_priv *wil, int id, void *d, int len) +{ + struct wmi_sched_scan_result_event *data = d; + struct wiphy *wiphy = wil_to_wiphy(wil); + struct ieee80211_mgmt *rx_mgmt_frame = + (struct ieee80211_mgmt *)data->payload; + int flen = len - offsetof(struct wmi_sched_scan_result_event, payload); + int ch_no; + u32 freq; + struct ieee80211_channel *channel; + s32 signal; + __le16 fc; + u32 d_len; + struct cfg80211_bss *bss; + + if (flen < 0) { + wil_err(wil, "sched scan result event too short, len %d\n", + len); + return; + } + + d_len = le32_to_cpu(data->info.len); + if (d_len != flen) { + wil_err(wil, + "sched scan result length mismatch, d_len %d should be %d\n", + d_len, flen); + return; + } + + fc = rx_mgmt_frame->frame_control; + if (!ieee80211_is_probe_resp(fc)) { + wil_err(wil, "sched scan result invalid frame, fc 0x%04x\n", + fc); + return; + } + + ch_no = data->info.channel + 1; + freq = ieee80211_channel_to_frequency(ch_no, NL80211_BAND_60GHZ); + channel = ieee80211_get_channel(wiphy, freq); + if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities)) + signal = 100 * data->info.rssi; + else + signal = data->info.sqi; + + wil_dbg_wmi(wil, "sched scan result: channel %d MCS %d RSSI %d\n", + data->info.channel, data->info.mcs, data->info.rssi); + wil_dbg_wmi(wil, "len %d qid %d mid %d cid %d\n", + d_len, data->info.qid, data->info.mid, data->info.cid); + wil_hex_dump_wmi("PROBE ", DUMP_PREFIX_OFFSET, 16, 1, rx_mgmt_frame, + d_len, true); + + if (!channel) { + wil_err(wil, "Frame on unsupported channel\n"); + return; + } + + bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame, + d_len, signal, GFP_KERNEL); + if (bss) { + wil_dbg_wmi(wil, "Added BSS %pM\n", rx_mgmt_frame->bssid); + cfg80211_put_bss(wiphy, bss); + } else { + wil_err(wil, "cfg80211_inform_bss_frame() failed\n"); + } + + cfg80211_sched_scan_results(wiphy, 0); +} + /** * Some events are ignored for purpose; and need not be interpreted as * "unhandled events" @@ -857,6 +1243,7 @@ static const struct { {WMI_DELBA_EVENTID, wmi_evt_delba}, {WMI_VRING_EN_EVENTID, wmi_evt_vring_en}, {WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_ignore}, + {WMI_SCHED_SCAN_RESULT_EVENTID, wmi_evt_sched_scan_result}, }; /* @@ -963,8 +1350,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil) } spin_unlock_irqrestore(&wil->wmi_ev_lock, flags); - wil_dbg_wmi(wil, "WMI event 0x%04x MID %d @%d msec\n", - id, wmi->mid, tstamp); + wil_dbg_wmi(wil, "recv %s (0x%04x) MID %d @%d msec\n", + eventid2name(id), id, wmi->mid, tstamp); trace_wil6210_wmi_event(wmi, &wmi[1], len - sizeof(*wmi)); } @@ -1380,8 +1767,14 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie) }; int rc; u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len; - struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL); + struct wmi_set_appie_cmd *cmd; + + if (len < ie_len) { + rc = -EINVAL; + goto out; + } + cmd = kzalloc(len, GFP_KERNEL); if (!cmd) { rc = -ENOMEM; goto out; @@ -1461,7 +1854,7 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring) int rc; if (wdev->iftype == NL80211_IFTYPE_MONITOR) { - struct ieee80211_channel *ch = wdev->preset_chandef.chan; + struct ieee80211_channel *ch = wil->monitor_chandef.chan; cmd.sniffer_cfg.mode = cpu_to_le32(WMI_SNIFFER_ON); if (ch) @@ -1801,6 +2194,16 @@ void wmi_event_flush(struct wil6210_priv *wil) spin_unlock_irqrestore(&wil->wmi_ev_lock, flags); } +static const char *suspend_status2name(u8 status) +{ + switch (status) { + case WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE: + return "LINK_NOT_IDLE"; + default: + return "Untracked status"; + } +} + int wmi_suspend(struct wil6210_priv *wil) { int rc; @@ -1816,7 +2219,7 @@ int wmi_suspend(struct wil6210_priv *wil) wil->suspend_resp_rcvd = false; wil->suspend_resp_comp = false; - reply.evt.status = WMI_TRAFFIC_SUSPEND_REJECTED; + reply.evt.status = WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE; rc = wmi_call(wil, WMI_TRAFFIC_SUSPEND_CMDID, &cmd, sizeof(cmd), WMI_TRAFFIC_SUSPEND_EVENTID, &reply, sizeof(reply), @@ -1848,8 +2251,9 @@ int wmi_suspend(struct wil6210_priv *wil) } wil_dbg_wmi(wil, "suspend_response_completed rcvd\n"); - if (reply.evt.status == WMI_TRAFFIC_SUSPEND_REJECTED) { - wil_dbg_pm(wil, "device rejected the suspend\n"); + if (reply.evt.status != WMI_TRAFFIC_SUSPEND_APPROVED) { + wil_dbg_pm(wil, "device rejected the suspend, %s\n", + suspend_status2name(reply.evt.status)); wil->suspend_stats.rejected_by_device++; } rc = reply.evt.status; @@ -1861,21 +2265,50 @@ out: return rc; } +static void resume_triggers2string(u32 triggers, char *string, int str_size) +{ + string[0] = '\0'; + + if (!triggers) { + strlcat(string, " UNKNOWN", str_size); + return; + } + + if (triggers & WMI_RESUME_TRIGGER_HOST) + strlcat(string, " HOST", str_size); + + if (triggers & WMI_RESUME_TRIGGER_UCAST_RX) + strlcat(string, " UCAST_RX", str_size); + + if (triggers & WMI_RESUME_TRIGGER_BCAST_RX) + strlcat(string, " BCAST_RX", str_size); + + if (triggers & WMI_RESUME_TRIGGER_WMI_EVT) + strlcat(string, " WMI_EVT", str_size); +} + int wmi_resume(struct wil6210_priv *wil) { int rc; + char string[100]; struct { struct wmi_cmd_hdr wmi; struct wmi_traffic_resume_event evt; } __packed reply; reply.evt.status = WMI_TRAFFIC_RESUME_FAILED; + reply.evt.resume_triggers = WMI_RESUME_TRIGGER_UNKNOWN; rc = wmi_call(wil, WMI_TRAFFIC_RESUME_CMDID, NULL, 0, WMI_TRAFFIC_RESUME_EVENTID, &reply, sizeof(reply), WIL_WAIT_FOR_SUSPEND_RESUME_COMP); if (rc) return rc; + resume_triggers2string(le32_to_cpu(reply.evt.resume_triggers), string, + sizeof(string)); + wil_dbg_pm(wil, "device resume %s, resume triggers:%s (0x%x)\n", + reply.evt.status ? "failed" : "passed", string, + le32_to_cpu(reply.evt.resume_triggers)); return reply.evt.status; } @@ -1906,8 +2339,8 @@ static void wmi_event_handle(struct wil6210_priv *wil, void *evt_data = (void *)(&wmi[1]); u16 id = le16_to_cpu(wmi->command_id); - wil_dbg_wmi(wil, "Handle WMI 0x%04x (reply_id 0x%04x)\n", - id, wil->reply_id); + wil_dbg_wmi(wil, "Handle %s (0x%04x) (reply_id 0x%04x)\n", + eventid2name(id), id, wil->reply_id); /* check if someone waits for this event */ if (wil->reply_id && wil->reply_id == id) { WARN_ON(wil->reply_buf); @@ -2002,3 +2435,159 @@ out: spin_unlock_irqrestore(&wil->wmi_ev_lock, flags); return rc; } + +static void +wmi_sched_scan_set_ssids(struct wil6210_priv *wil, + struct wmi_start_sched_scan_cmd *cmd, + struct cfg80211_ssid *ssids, int n_ssids, + struct cfg80211_match_set *match_sets, + int n_match_sets) +{ + int i; + + if (n_match_sets > WMI_MAX_PNO_SSID_NUM) { + wil_dbg_wmi(wil, "too many match sets (%d), use first %d\n", + n_match_sets, WMI_MAX_PNO_SSID_NUM); + n_match_sets = WMI_MAX_PNO_SSID_NUM; + } + cmd->num_of_ssids = n_match_sets; + + for (i = 0; i < n_match_sets; i++) { + struct wmi_sched_scan_ssid_match *wmi_match = + &cmd->ssid_for_match[i]; + struct cfg80211_match_set *cfg_match = &match_sets[i]; + int j; + + wmi_match->ssid_len = cfg_match->ssid.ssid_len; + memcpy(wmi_match->ssid, cfg_match->ssid.ssid, + min_t(u8, wmi_match->ssid_len, WMI_MAX_SSID_LEN)); + wmi_match->rssi_threshold = S8_MIN; + if (cfg_match->rssi_thold >= S8_MIN && + cfg_match->rssi_thold <= S8_MAX) + wmi_match->rssi_threshold = cfg_match->rssi_thold; + + for (j = 0; j < n_ssids; j++) + if (wmi_match->ssid_len == ssids[j].ssid_len && + memcmp(wmi_match->ssid, ssids[j].ssid, + wmi_match->ssid_len) == 0) + wmi_match->add_ssid_to_probe = true; + } +} + +static void +wmi_sched_scan_set_channels(struct wil6210_priv *wil, + struct wmi_start_sched_scan_cmd *cmd, + u32 n_channels, + struct ieee80211_channel **channels) +{ + int i; + + if (n_channels > WMI_MAX_CHANNEL_NUM) { + wil_dbg_wmi(wil, "too many channels (%d), use first %d\n", + n_channels, WMI_MAX_CHANNEL_NUM); + n_channels = WMI_MAX_CHANNEL_NUM; + } + cmd->num_of_channels = n_channels; + + for (i = 0; i < n_channels; i++) { + struct ieee80211_channel *cfg_chan = channels[i]; + + cmd->channel_list[i] = cfg_chan->hw_value - 1; + } +} + +static void +wmi_sched_scan_set_plans(struct wil6210_priv *wil, + struct wmi_start_sched_scan_cmd *cmd, + struct cfg80211_sched_scan_plan *scan_plans, + int n_scan_plans) +{ + int i; + + if (n_scan_plans > WMI_MAX_PLANS_NUM) { + wil_dbg_wmi(wil, "too many plans (%d), use first %d\n", + n_scan_plans, WMI_MAX_PLANS_NUM); + n_scan_plans = WMI_MAX_PLANS_NUM; + } + + for (i = 0; i < n_scan_plans; i++) { + struct cfg80211_sched_scan_plan *cfg_plan = &scan_plans[i]; + + cmd->scan_plans[i].interval_sec = + cpu_to_le16(cfg_plan->interval); + cmd->scan_plans[i].num_of_iterations = + cpu_to_le16(cfg_plan->iterations); + } +} + +int wmi_start_sched_scan(struct wil6210_priv *wil, + struct cfg80211_sched_scan_request *request) +{ + int rc; + struct wmi_start_sched_scan_cmd cmd = { + .min_rssi_threshold = S8_MIN, + .initial_delay_sec = cpu_to_le16(request->delay), + }; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_start_sched_scan_event evt; + } __packed reply; + + if (!test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities)) + return -ENOTSUPP; + + if (request->min_rssi_thold >= S8_MIN && + request->min_rssi_thold <= S8_MAX) + cmd.min_rssi_threshold = request->min_rssi_thold; + + wmi_sched_scan_set_ssids(wil, &cmd, request->ssids, request->n_ssids, + request->match_sets, request->n_match_sets); + wmi_sched_scan_set_channels(wil, &cmd, + request->n_channels, request->channels); + wmi_sched_scan_set_plans(wil, &cmd, + request->scan_plans, request->n_scan_plans); + + reply.evt.result = WMI_PNO_REJECT; + + rc = wmi_call(wil, WMI_START_SCHED_SCAN_CMDID, &cmd, sizeof(cmd), + WMI_START_SCHED_SCAN_EVENTID, &reply, sizeof(reply), + WIL_WMI_CALL_GENERAL_TO_MS); + if (rc) + return rc; + + if (reply.evt.result != WMI_PNO_SUCCESS) { + wil_err(wil, "start sched scan failed, result %d\n", + reply.evt.result); + return -EINVAL; + } + + return 0; +} + +int wmi_stop_sched_scan(struct wil6210_priv *wil) +{ + int rc; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_stop_sched_scan_event evt; + } __packed reply; + + if (!test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities)) + return -ENOTSUPP; + + reply.evt.result = WMI_PNO_REJECT; + + rc = wmi_call(wil, WMI_STOP_SCHED_SCAN_CMDID, NULL, 0, + WMI_STOP_SCHED_SCAN_EVENTID, &reply, sizeof(reply), + WIL_WMI_CALL_GENERAL_TO_MS); + if (rc) + return rc; + + if (reply.evt.result != WMI_PNO_SUCCESS) { + wil_err(wil, "stop sched scan failed, result %d\n", + reply.evt.result); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index 5263ee717a4f..d3e75f0ff245 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -71,6 +71,8 @@ enum wmi_fw_capability { WMI_FW_CAPABILITY_RSSI_REPORTING = 12, WMI_FW_CAPABILITY_SET_SILENT_RSSI_TABLE = 13, WMI_FW_CAPABILITY_LO_POWER_CALIB_FROM_OTP = 14, + WMI_FW_CAPABILITY_PNO = 15, + WMI_FW_CAPABILITY_REF_CLOCK_CONTROL = 18, WMI_FW_CAPABILITY_MAX, }; @@ -87,6 +89,8 @@ enum wmi_command_id { WMI_CONNECT_CMDID = 0x01, WMI_DISCONNECT_CMDID = 0x03, WMI_DISCONNECT_STA_CMDID = 0x04, + WMI_START_SCHED_SCAN_CMDID = 0x05, + WMI_STOP_SCHED_SCAN_CMDID = 0x06, WMI_START_SCAN_CMDID = 0x07, WMI_SET_BSS_FILTER_CMDID = 0x09, WMI_SET_PROBED_SSID_CMDID = 0x0A, @@ -385,6 +389,38 @@ struct wmi_start_scan_cmd { } channel_list[0]; } __packed; +#define WMI_MAX_PNO_SSID_NUM (16) +#define WMI_MAX_CHANNEL_NUM (6) +#define WMI_MAX_PLANS_NUM (2) + +/* WMI_START_SCHED_SCAN_CMDID */ +struct wmi_sched_scan_ssid_match { + u8 ssid_len; + u8 ssid[WMI_MAX_SSID_LEN]; + s8 rssi_threshold; + /* boolean */ + u8 add_ssid_to_probe; + u8 reserved; +} __packed; + +/* WMI_START_SCHED_SCAN_CMDID */ +struct wmi_sched_scan_plan { + __le16 interval_sec; + __le16 num_of_iterations; +} __packed; + +/* WMI_START_SCHED_SCAN_CMDID */ +struct wmi_start_sched_scan_cmd { + struct wmi_sched_scan_ssid_match ssid_for_match[WMI_MAX_PNO_SSID_NUM]; + u8 num_of_ssids; + s8 min_rssi_threshold; + u8 channel_list[WMI_MAX_CHANNEL_NUM]; + u8 num_of_channels; + u8 reserved; + __le16 initial_delay_sec; + struct wmi_sched_scan_plan scan_plans[WMI_MAX_PLANS_NUM]; +} __packed; + /* WMI_SET_PROBED_SSID_CMDID */ #define MAX_PROBED_SSID_INDEX (3) @@ -1238,6 +1274,9 @@ enum wmi_event_id { WMI_READY_EVENTID = 0x1001, WMI_CONNECT_EVENTID = 0x1002, WMI_DISCONNECT_EVENTID = 0x1003, + WMI_START_SCHED_SCAN_EVENTID = 0x1005, + WMI_STOP_SCHED_SCAN_EVENTID = 0x1006, + WMI_SCHED_SCAN_RESULT_EVENTID = 0x1007, WMI_SCAN_COMPLETE_EVENTID = 0x100A, WMI_REPORT_STATISTICS_EVENTID = 0x100B, WMI_RD_MEM_RSP_EVENTID = 0x1800, @@ -1600,6 +1639,49 @@ struct wmi_scan_complete_event { __le32 status; } __packed; +/* wmi_rx_mgmt_info */ +struct wmi_rx_mgmt_info { + u8 mcs; + s8 rssi; + u8 range; + u8 sqi; + __le16 stype; + __le16 status; + __le32 len; + /* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */ + u8 qid; + /* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */ + u8 mid; + u8 cid; + /* From Radio MNGR */ + u8 channel; +} __packed; + +/* WMI_START_SCHED_SCAN_EVENTID */ +enum wmi_pno_result { + WMI_PNO_SUCCESS = 0x00, + WMI_PNO_REJECT = 0x01, + WMI_PNO_INVALID_PARAMETERS = 0x02, + WMI_PNO_NOT_ENABLED = 0x03, +}; + +struct wmi_start_sched_scan_event { + /* pno_result */ + u8 result; + u8 reserved[3]; +} __packed; + +struct wmi_stop_sched_scan_event { + /* pno_result */ + u8 result; + u8 reserved[3]; +} __packed; + +struct wmi_sched_scan_result_event { + struct wmi_rx_mgmt_info info; + u8 payload[0]; +} __packed; + /* WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENT */ enum wmi_acs_info_bitmask { WMI_ACS_INFO_BITMASK_BEACON_FOUND = 0x01, @@ -1814,24 +1896,6 @@ struct wmi_get_ssid_event { u8 ssid[WMI_MAX_SSID_LEN]; } __packed; -/* wmi_rx_mgmt_info */ -struct wmi_rx_mgmt_info { - u8 mcs; - s8 rssi; - u8 range; - u8 sqi; - __le16 stype; - __le16 status; - __le32 len; - /* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */ - u8 qid; - /* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */ - u8 mid; - u8 cid; - /* From Radio MNGR */ - u8 channel; -} __packed; - /* EVENT: WMI_RF_XPM_READ_RESULT_EVENTID */ struct wmi_rf_xpm_read_result_event { /* enum wmi_fw_status_e - success=0 or fail=1 */ @@ -2267,8 +2331,8 @@ struct wmi_link_maintain_cfg_read_done_event { } __packed; enum wmi_traffic_suspend_status { - WMI_TRAFFIC_SUSPEND_APPROVED = 0x0, - WMI_TRAFFIC_SUSPEND_REJECTED = 0x1, + WMI_TRAFFIC_SUSPEND_APPROVED = 0x0, + WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE = 0x1, }; /* WMI_TRAFFIC_SUSPEND_EVENTID */ @@ -2282,10 +2346,21 @@ enum wmi_traffic_resume_status { WMI_TRAFFIC_RESUME_FAILED = 0x1, }; +enum wmi_resume_trigger { + WMI_RESUME_TRIGGER_UNKNOWN = 0x0, + WMI_RESUME_TRIGGER_HOST = 0x1, + WMI_RESUME_TRIGGER_UCAST_RX = 0x2, + WMI_RESUME_TRIGGER_BCAST_RX = 0x4, + WMI_RESUME_TRIGGER_WMI_EVT = 0x8, +}; + /* WMI_TRAFFIC_RESUME_EVENTID */ struct wmi_traffic_resume_event { - /* enum wmi_traffic_resume_status_e */ + /* enum wmi_traffic_resume_status */ u8 status; + u8 reserved[3]; + /* enum wmi_resume_trigger bitmap */ + __le32 resume_triggers; } __packed; /* Power Save command completion status codes */ diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c index a5557d70689f..f2a2f41e3c96 100644 --- a/drivers/net/wireless/broadcom/b43/phy_n.c +++ b/drivers/net/wireless/broadcom/b43/phy_n.c @@ -1031,7 +1031,7 @@ static void b43_radio_2057_init_post(struct b43_wldev *dev) b43_radio_set(dev, R2057_RFPLL_MISC_CAL_RESETN, 0x78); b43_radio_set(dev, R2057_XTAL_CONFIG2, 0x80); - mdelay(2); + usleep_range(2000, 3000); b43_radio_mask(dev, R2057_RFPLL_MISC_CAL_RESETN, ~0x78); b43_radio_mask(dev, R2057_XTAL_CONFIG2, ~0x80); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c index 9f2d0b0cf6e5..2d3a5dd07a3f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c @@ -165,7 +165,7 @@ static int brcmf_proto_bcdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len) static int brcmf_proto_bcdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd, - void *buf, uint len) + void *buf, uint len, int *fwerr) { struct brcmf_bcdc *bcdc = (struct brcmf_bcdc *)drvr->proto->pd; struct brcmf_proto_bcdc_dcmd *msg = &bcdc->msg; @@ -175,6 +175,7 @@ brcmf_proto_bcdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd, brcmf_dbg(BCDC, "Enter, cmd %d len %d\n", cmd, len); + *fwerr = 0; ret = brcmf_proto_bcdc_msg(drvr, ifidx, cmd, buf, len, false); if (ret < 0) { brcmf_err("brcmf_proto_bcdc_msg failed w/status %d\n", @@ -211,25 +212,27 @@ retry: memcpy(buf, info, len); } + ret = 0; + /* Check the ERROR flag */ if (flags & BCDC_DCMD_ERROR) - ret = le32_to_cpu(msg->status); - + *fwerr = le32_to_cpu(msg->status); done: return ret; } static int brcmf_proto_bcdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd, - void *buf, uint len) + void *buf, uint len, int *fwerr) { struct brcmf_bcdc *bcdc = (struct brcmf_bcdc *)drvr->proto->pd; struct brcmf_proto_bcdc_dcmd *msg = &bcdc->msg; - int ret = 0; + int ret; u32 flags, id; brcmf_dbg(BCDC, "Enter, cmd %d len %d\n", cmd, len); + *fwerr = 0; ret = brcmf_proto_bcdc_msg(drvr, ifidx, cmd, buf, len, true); if (ret < 0) goto done; @@ -249,9 +252,11 @@ brcmf_proto_bcdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd, goto done; } + ret = 0; + /* Check the ERROR flag */ if (flags & BCDC_DCMD_ERROR) - ret = le32_to_cpu(msg->status); + *fwerr = le32_to_cpu(msg->status); done: return ret; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index cd587325e286..0b68240ec7b4 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -118,7 +118,7 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev) ret = request_irq(pdata->oob_irq_nr, brcmf_sdiod_oob_irqhandler, pdata->oob_irq_flags, "brcmf_oob_intr", - &sdiodev->func[1]->dev); + &sdiodev->func1->dev); if (ret != 0) { brcmf_err("request_irq failed %d\n", ret); return ret; @@ -132,39 +132,40 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev) } sdiodev->irq_wake = true; - sdio_claim_host(sdiodev->func[1]); + sdio_claim_host(sdiodev->func1); if (sdiodev->bus_if->chip == BRCM_CC_43362_CHIP_ID) { /* assign GPIO to SDIO core */ addr = CORE_CC_REG(SI_ENUM_BASE, gpiocontrol); - gpiocontrol = brcmf_sdiod_regrl(sdiodev, addr, &ret); + gpiocontrol = brcmf_sdiod_readl(sdiodev, addr, &ret); gpiocontrol |= 0x2; - brcmf_sdiod_regwl(sdiodev, addr, gpiocontrol, &ret); + brcmf_sdiod_writel(sdiodev, addr, gpiocontrol, &ret); - brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_SELECT, 0xf, - &ret); - brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_OUT, 0, &ret); - brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_EN, 0x2, &ret); + brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_SELECT, + 0xf, &ret); + brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_OUT, 0, &ret); + brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_EN, 0x2, &ret); } /* must configure SDIO_CCCR_IENx to enable irq */ - data = brcmf_sdiod_regrb(sdiodev, SDIO_CCCR_IENx, &ret); - data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1; - brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret); + data = brcmf_sdiod_func0_rb(sdiodev, SDIO_CCCR_IENx, &ret); + data |= SDIO_CCCR_IEN_FUNC1 | SDIO_CCCR_IEN_FUNC2 | + SDIO_CCCR_IEN_FUNC0; + brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_IENx, data, &ret); /* redirect, configure and enable io for interrupt signal */ - data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE; + data = SDIO_CCCR_BRCM_SEPINT_MASK | SDIO_CCCR_BRCM_SEPINT_OE; if (pdata->oob_irq_flags & IRQF_TRIGGER_HIGH) - data |= SDIO_SEPINT_ACT_HI; - brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret); - - sdio_release_host(sdiodev->func[1]); + data |= SDIO_CCCR_BRCM_SEPINT_ACT_HI; + brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_SEPINT, + data, &ret); + sdio_release_host(sdiodev->func1); } else { brcmf_dbg(SDIO, "Entering\n"); - sdio_claim_host(sdiodev->func[1]); - sdio_claim_irq(sdiodev->func[1], brcmf_sdiod_ib_irqhandler); - sdio_claim_irq(sdiodev->func[2], brcmf_sdiod_dummy_irqhandler); - sdio_release_host(sdiodev->func[1]); + sdio_claim_host(sdiodev->func1); + sdio_claim_irq(sdiodev->func1, brcmf_sdiod_ib_irqhandler); + sdio_claim_irq(sdiodev->func2, brcmf_sdiod_dummy_irqhandler); + sdio_release_host(sdiodev->func1); sdiodev->sd_irq_requested = true; } @@ -182,26 +183,26 @@ void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev) struct brcmfmac_sdio_pd *pdata; pdata = &sdiodev->settings->bus.sdio; - sdio_claim_host(sdiodev->func[1]); - brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL); - brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL); - sdio_release_host(sdiodev->func[1]); + sdio_claim_host(sdiodev->func1); + brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL); + brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_IENx, 0, NULL); + sdio_release_host(sdiodev->func1); sdiodev->oob_irq_requested = false; if (sdiodev->irq_wake) { disable_irq_wake(pdata->oob_irq_nr); sdiodev->irq_wake = false; } - free_irq(pdata->oob_irq_nr, &sdiodev->func[1]->dev); + free_irq(pdata->oob_irq_nr, &sdiodev->func1->dev); sdiodev->irq_en = false; sdiodev->oob_irq_requested = false; } if (sdiodev->sd_irq_requested) { - sdio_claim_host(sdiodev->func[1]); - sdio_release_irq(sdiodev->func[2]); - sdio_release_irq(sdiodev->func[1]); - sdio_release_host(sdiodev->func[1]); + sdio_claim_host(sdiodev->func1); + sdio_release_irq(sdiodev->func2); + sdio_release_irq(sdiodev->func1); + sdio_release_host(sdiodev->func1); sdiodev->sd_irq_requested = false; } } @@ -230,271 +231,121 @@ void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev, sdiodev->state = state; } -static inline int brcmf_sdiod_f0_writeb(struct sdio_func *func, - uint regaddr, u8 byte) -{ - int err_ret; - - /* - * Can only directly write to some F0 registers. - * Handle CCCR_IENx and CCCR_ABORT command - * as a special case. - */ - if ((regaddr == SDIO_CCCR_ABORT) || - (regaddr == SDIO_CCCR_IENx)) - sdio_writeb(func, byte, regaddr, &err_ret); - else - sdio_f0_writeb(func, byte, regaddr, &err_ret); - - return err_ret; -} - -static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn, - u32 addr, u8 regsz, void *data, bool write) -{ - struct sdio_func *func; - int ret = -EINVAL; - - brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n", - write, fn, addr, regsz); - - /* only allow byte access on F0 */ - if (WARN_ON(regsz > 1 && !fn)) - return -EINVAL; - func = sdiodev->func[fn]; - - switch (regsz) { - case sizeof(u8): - if (write) { - if (fn) - sdio_writeb(func, *(u8 *)data, addr, &ret); - else - ret = brcmf_sdiod_f0_writeb(func, addr, - *(u8 *)data); - } else { - if (fn) - *(u8 *)data = sdio_readb(func, addr, &ret); - else - *(u8 *)data = sdio_f0_readb(func, addr, &ret); - } - break; - case sizeof(u16): - if (write) - sdio_writew(func, *(u16 *)data, addr, &ret); - else - *(u16 *)data = sdio_readw(func, addr, &ret); - break; - case sizeof(u32): - if (write) - sdio_writel(func, *(u32 *)data, addr, &ret); - else - *(u32 *)data = sdio_readl(func, addr, &ret); - break; - default: - brcmf_err("invalid size: %d\n", regsz); - break; - } - - if (ret) - brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n", - write ? "write" : "read", fn, addr, ret); - - return ret; -} - -static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr, - u8 regsz, void *data, bool write) -{ - u8 func; - s32 retry = 0; - int ret; - - if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM) - return -ENOMEDIUM; - - /* - * figure out how to read the register based on address range - * 0x00 ~ 0x7FF: function 0 CCCR and FBR - * 0x10000 ~ 0x1FFFF: function 1 miscellaneous registers - * The rest: function 1 silicon backplane core registers - */ - if ((addr & ~REG_F0_REG_MASK) == 0) - func = SDIO_FUNC_0; - else - func = SDIO_FUNC_1; - - do { - if (!write) - memset(data, 0, regsz); - /* for retry wait for 1 ms till bus get settled down */ - if (retry) - usleep_range(1000, 2000); - ret = brcmf_sdiod_request_data(sdiodev, func, addr, regsz, - data, write); - } while (ret != 0 && ret != -ENOMEDIUM && - retry++ < SDIOH_API_ACCESS_RETRY_LIMIT); - - if (ret == -ENOMEDIUM) - brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM); - else if (ret != 0) { - /* - * SleepCSR register access can fail when - * waking up the device so reduce this noise - * in the logs. - */ - if (addr != SBSDIO_FUNC1_SLEEPCSR) - brcmf_err("failed to %s data F%d@0x%05x, err: %d\n", - write ? "write" : "read", func, addr, ret); - else - brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n", - write ? "write" : "read", func, addr, ret); - } - return ret; -} - -static int -brcmf_sdiod_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address) +static int brcmf_sdiod_set_backplane_window(struct brcmf_sdio_dev *sdiodev, + u32 addr) { + u32 v, bar0 = addr & SBSDIO_SBWINDOW_MASK; int err = 0, i; - u8 addr[3]; - - if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM) - return -ENOMEDIUM; - - addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK; - addr[1] = (address >> 16) & SBSDIO_SBADDRMID_MASK; - addr[2] = (address >> 24) & SBSDIO_SBADDRHIGH_MASK; - - for (i = 0; i < 3; i++) { - err = brcmf_sdiod_regrw_helper(sdiodev, - SBSDIO_FUNC1_SBADDRLOW + i, - sizeof(u8), &addr[i], true); - if (err) { - brcmf_err("failed at addr: 0x%0x\n", - SBSDIO_FUNC1_SBADDRLOW + i); - break; - } - } - return err; -} + if (bar0 == sdiodev->sbwad) + return 0; -static int -brcmf_sdiod_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr) -{ - uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK; - int err = 0; + v = bar0 >> 8; - if (bar0 != sdiodev->sbwad) { - err = brcmf_sdiod_set_sbaddr_window(sdiodev, bar0); - if (err) - return err; + for (i = 0 ; i < 3 && !err ; i++, v >>= 8) + brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_SBADDRLOW + i, + v & 0xff, &err); + if (!err) sdiodev->sbwad = bar0; - } - - *addr &= SBSDIO_SB_OFT_ADDR_MASK; - if (width == 4) - *addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; - - return 0; + return err; } -u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret) +u32 brcmf_sdiod_readl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret) { - u8 data; + u32 data = 0; int retval; - brcmf_dbg(SDIO, "addr:0x%08x\n", addr); - retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data, - false); - brcmf_dbg(SDIO, "data:0x%02x\n", data); + retval = brcmf_sdiod_set_backplane_window(sdiodev, addr); + if (retval) + goto out; + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + data = sdio_readl(sdiodev->func1, addr, &retval); +out: if (ret) *ret = retval; return data; } -u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret) +void brcmf_sdiod_writel(struct brcmf_sdio_dev *sdiodev, u32 addr, + u32 data, int *ret) { - u32 data = 0; int retval; - brcmf_dbg(SDIO, "addr:0x%08x\n", addr); - retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr); + retval = brcmf_sdiod_set_backplane_window(sdiodev, addr); if (retval) - goto done; - retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data, - false); - brcmf_dbg(SDIO, "data:0x%08x\n", data); - -done: - if (ret) - *ret = retval; + goto out; - return data; -} + addr &= SBSDIO_SB_OFT_ADDR_MASK; + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; -void brcmf_sdiod_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr, - u8 data, int *ret) -{ - int retval; + sdio_writel(sdiodev->func1, data, addr, &retval); - brcmf_dbg(SDIO, "addr:0x%08x, data:0x%02x\n", addr, data); - retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data, - true); +out: if (ret) *ret = retval; } -void brcmf_sdiod_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr, - u32 data, int *ret) +static int brcmf_sdiod_skbuff_read(struct brcmf_sdio_dev *sdiodev, + struct sdio_func *func, u32 addr, + struct sk_buff *skb) { - int retval; + unsigned int req_sz; + int err; - brcmf_dbg(SDIO, "addr:0x%08x, data:0x%08x\n", addr, data); - retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr); - if (retval) - goto done; - retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data, - true); + /* Single skb use the standard mmc interface */ + req_sz = skb->len + 3; + req_sz &= (uint)~3; -done: - if (ret) - *ret = retval; + switch (func->num) { + case 1: + err = sdio_memcpy_fromio(func, ((u8 *)(skb->data)), addr, + req_sz); + break; + case 2: + err = sdio_readsb(func, ((u8 *)(skb->data)), addr, req_sz); + break; + default: + /* bail out as things are really fishy here */ + WARN(1, "invalid sdio function number: %d\n", func->num); + err = -ENOMEDIUM; + }; + + if (err == -ENOMEDIUM) + brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM); + + return err; } -static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn, - bool write, u32 addr, struct sk_buff *pkt) +static int brcmf_sdiod_skbuff_write(struct brcmf_sdio_dev *sdiodev, + struct sdio_func *func, u32 addr, + struct sk_buff *skb) { unsigned int req_sz; int err; /* Single skb use the standard mmc interface */ - req_sz = pkt->len + 3; + req_sz = skb->len + 3; req_sz &= (uint)~3; - if (write) - err = sdio_memcpy_toio(sdiodev->func[fn], addr, - ((u8 *)(pkt->data)), req_sz); - else if (fn == 1) - err = sdio_memcpy_fromio(sdiodev->func[fn], ((u8 *)(pkt->data)), - addr, req_sz); - else - /* function 2 read is FIFO operation */ - err = sdio_readsb(sdiodev->func[fn], ((u8 *)(pkt->data)), addr, - req_sz); + err = sdio_memcpy_toio(func, addr, ((u8 *)(skb->data)), req_sz); + if (err == -ENOMEDIUM) brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM); + return err; } /** * brcmf_sdiod_sglist_rw - SDIO interface function for block data access * @sdiodev: brcmfmac sdio device - * @fn: SDIO function number + * @func: SDIO function * @write: direction flag * @addr: dongle memory address as source/destination * @pkt: skb pointer @@ -503,7 +354,8 @@ static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn, * stack for block data access. It assumes that the skb passed down by the * caller has already been padded and aligned. */ -static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn, +static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, + struct sdio_func *func, bool write, u32 addr, struct sk_buff_head *pktlist) { @@ -529,7 +381,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn, req_sz = 0; skb_queue_walk(pktlist, pkt_next) req_sz += pkt_next->len; - req_sz = ALIGN(req_sz, sdiodev->func[fn]->cur_blksize); + req_sz = ALIGN(req_sz, func->cur_blksize); while (req_sz > PAGE_SIZE) { pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE); if (pkt_next == NULL) { @@ -548,7 +400,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn, target_list = &local_list; } - func_blk_sz = sdiodev->func[fn]->cur_blksize; + func_blk_sz = func->cur_blksize; max_req_sz = sdiodev->max_request_size; max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count, target_list->qlen); @@ -565,10 +417,10 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn, mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; mmc_cmd.opcode = SD_IO_RW_EXTENDED; mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */ - mmc_cmd.arg |= (fn & 0x7) << 28; /* SDIO func num */ - mmc_cmd.arg |= 1<<27; /* block mode */ + mmc_cmd.arg |= (func->num & 0x7) << 28; /* SDIO func num */ + mmc_cmd.arg |= 1 << 27; /* block mode */ /* for function 1 the addr will be incremented */ - mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0; + mmc_cmd.arg |= (func->num == 1) ? 1 << 26 : 0; mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC; mmc_req.cmd = &mmc_cmd; mmc_req.data = &mmc_dat; @@ -614,11 +466,11 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn, mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */ mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */ /* incrementing addr for function 1 */ - if (fn == 1) + if (func->num == 1) addr += req_sz; - mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card); - mmc_wait_for_req(sdiodev->func[fn]->card->host, &mmc_req); + mmc_set_data_timeout(&mmc_dat, func->card); + mmc_wait_for_req(func->card->host, &mmc_req); ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error; if (ret == -ENOMEDIUM) { @@ -686,16 +538,19 @@ int brcmf_sdiod_recv_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes) int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt) { - u32 addr = sdiodev->sbwad; + u32 addr = sdiodev->cc_core->base; int err = 0; brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pkt->len); - err = brcmf_sdiod_addrprep(sdiodev, 4, &addr); + err = brcmf_sdiod_set_backplane_window(sdiodev, addr); if (err) goto done; - err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr, pkt); + addr &= SBSDIO_SB_OFT_ADDR_MASK; + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr, pkt); done: return err; @@ -706,25 +561,28 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev, { struct sk_buff *glom_skb = NULL; struct sk_buff *skb; - u32 addr = sdiodev->sbwad; + u32 addr = sdiodev->cc_core->base; int err = 0; brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pktq->qlen); - err = brcmf_sdiod_addrprep(sdiodev, 4, &addr); + err = brcmf_sdiod_set_backplane_window(sdiodev, addr); if (err) goto done; + addr &= SBSDIO_SB_OFT_ADDR_MASK; + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + if (pktq->qlen == 1) - err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr, - pktq->next); + err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr, + pktq->next); else if (!sdiodev->sg_support) { glom_skb = brcmu_pkt_buf_get_skb(totlen); if (!glom_skb) return -ENOMEM; - err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr, - glom_skb); + err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr, + glom_skb); if (err) goto done; @@ -733,8 +591,8 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev, skb_pull(glom_skb, skb->len); } } else - err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, false, addr, - pktq); + err = brcmf_sdiod_sglist_rw(sdiodev, sdiodev->func2, false, + addr, pktq); done: brcmu_pkt_buf_free_skb(glom_skb); @@ -744,10 +602,11 @@ done: int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes) { struct sk_buff *mypkt; - u32 addr = sdiodev->sbwad; + u32 addr = sdiodev->cc_core->base; int err; mypkt = brcmu_pkt_buf_get_skb(nbytes); + if (!mypkt) { brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n", nbytes); @@ -756,40 +615,49 @@ int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes) memcpy(mypkt->data, buf, nbytes); - err = brcmf_sdiod_addrprep(sdiodev, 4, &addr); + err = brcmf_sdiod_set_backplane_window(sdiodev, addr); + if (err) + return err; + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; if (!err) - err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true, addr, - mypkt); + err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2, addr, + mypkt); brcmu_pkt_buf_free_skb(mypkt); - return err; + return err; } int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff_head *pktq) { struct sk_buff *skb; - u32 addr = sdiodev->sbwad; + u32 addr = sdiodev->cc_core->base; int err; brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pktq->qlen); - err = brcmf_sdiod_addrprep(sdiodev, 4, &addr); + err = brcmf_sdiod_set_backplane_window(sdiodev, addr); if (err) return err; - if (pktq->qlen == 1 || !sdiodev->sg_support) + addr &= SBSDIO_SB_OFT_ADDR_MASK; + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + if (pktq->qlen == 1 || !sdiodev->sg_support) { skb_queue_walk(pktq, skb) { - err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true, - addr, skb); + err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2, + addr, skb); if (err) break; } - else - err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, true, addr, - pktq); + } else { + err = brcmf_sdiod_sglist_rw(sdiodev, sdiodev->func2, true, + addr, pktq); + } return err; } @@ -798,7 +666,7 @@ int brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address, u8 *data, uint size) { - int bcmerror = 0; + int err = 0; struct sk_buff *pkt; u32 sdaddr; uint dsize; @@ -818,13 +686,13 @@ brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address, else dsize = size; - sdio_claim_host(sdiodev->func[1]); + sdio_claim_host(sdiodev->func1); /* Do the transfer(s) */ while (size) { /* Set the backplane window to include the start address */ - bcmerror = brcmf_sdiod_set_sbaddr_window(sdiodev, address); - if (bcmerror) + err = brcmf_sdiod_set_backplane_window(sdiodev, address); + if (err) break; brcmf_dbg(SDIO, "%s %d bytes at offset 0x%08x in window 0x%08x\n", @@ -835,11 +703,17 @@ brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address, sdaddr |= SBSDIO_SB_ACCESS_2_4B_FLAG; skb_put(pkt, dsize); - if (write) + + if (write) { memcpy(pkt->data, data, dsize); - bcmerror = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_1, write, - sdaddr, pkt); - if (bcmerror) { + err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func1, + sdaddr, pkt); + } else { + err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func1, + sdaddr, pkt); + } + + if (err) { brcmf_err("membytes transfer failed\n"); break; } @@ -859,24 +733,17 @@ brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address, dev_kfree_skb(pkt); - /* Return the window to backplane enumeration space for core access */ - if (brcmf_sdiod_set_sbaddr_window(sdiodev, sdiodev->sbwad)) - brcmf_err("FAILED to set window back to 0x%x\n", - sdiodev->sbwad); + sdio_release_host(sdiodev->func1); - sdio_release_host(sdiodev->func[1]); - - return bcmerror; + return err; } -int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn) +int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, struct sdio_func *func) { - char t_func = (char)fn; brcmf_dbg(SDIO, "Enter\n"); - /* issue abort cmd52 command through F0 */ - brcmf_sdiod_request_data(sdiodev, SDIO_FUNC_0, SDIO_CCCR_ABORT, - sizeof(t_func), &t_func, true); + /* Issue abort cmd52 command through F0 */ + brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_ABORT, func->num, NULL); brcmf_dbg(SDIO, "Exit\n"); return 0; @@ -890,7 +757,7 @@ void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev) uint nents; int err; - func = sdiodev->func[2]; + func = sdiodev->func2; host = func->card->host; sdiodev->sg_support = host->max_segs > 1; max_blocks = min_t(uint, host->max_blk_count, 511u); @@ -951,17 +818,17 @@ static int brcmf_sdiod_freezer_on(struct brcmf_sdio_dev *sdiodev) brcmf_sdio_trigger_dpc(sdiodev->bus); wait_event(sdiodev->freezer->thread_freeze, atomic_read(expect) == sdiodev->freezer->frozen_count); - sdio_claim_host(sdiodev->func[1]); + sdio_claim_host(sdiodev->func1); res = brcmf_sdio_sleep(sdiodev->bus, true); - sdio_release_host(sdiodev->func[1]); + sdio_release_host(sdiodev->func1); return res; } static void brcmf_sdiod_freezer_off(struct brcmf_sdio_dev *sdiodev) { - sdio_claim_host(sdiodev->func[1]); + sdio_claim_host(sdiodev->func1); brcmf_sdio_sleep(sdiodev->bus, false); - sdio_release_host(sdiodev->func[1]); + sdio_release_host(sdiodev->func1); atomic_set(&sdiodev->freezer->freezing, 0); complete_all(&sdiodev->freezer->resumed); } @@ -1011,19 +878,19 @@ static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev) brcmf_sdiod_freezer_detach(sdiodev); /* Disable Function 2 */ - sdio_claim_host(sdiodev->func[2]); - sdio_disable_func(sdiodev->func[2]); - sdio_release_host(sdiodev->func[2]); + sdio_claim_host(sdiodev->func2); + sdio_disable_func(sdiodev->func2); + sdio_release_host(sdiodev->func2); /* Disable Function 1 */ - sdio_claim_host(sdiodev->func[1]); - sdio_disable_func(sdiodev->func[1]); - sdio_release_host(sdiodev->func[1]); + sdio_claim_host(sdiodev->func1); + sdio_disable_func(sdiodev->func1); + sdio_release_host(sdiodev->func1); sg_free_table(&sdiodev->sgtable); sdiodev->sbwad = 0; - pm_runtime_allow(sdiodev->func[1]->card->host->parent); + pm_runtime_allow(sdiodev->func1->card->host->parent); return 0; } @@ -1039,29 +906,27 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev) { int ret = 0; - sdiodev->num_funcs = 2; + sdio_claim_host(sdiodev->func1); - sdio_claim_host(sdiodev->func[1]); - - ret = sdio_set_block_size(sdiodev->func[1], SDIO_FUNC1_BLOCKSIZE); + ret = sdio_set_block_size(sdiodev->func1, SDIO_FUNC1_BLOCKSIZE); if (ret) { brcmf_err("Failed to set F1 blocksize\n"); - sdio_release_host(sdiodev->func[1]); + sdio_release_host(sdiodev->func1); goto out; } - ret = sdio_set_block_size(sdiodev->func[2], SDIO_FUNC2_BLOCKSIZE); + ret = sdio_set_block_size(sdiodev->func2, SDIO_FUNC2_BLOCKSIZE); if (ret) { brcmf_err("Failed to set F2 blocksize\n"); - sdio_release_host(sdiodev->func[1]); + sdio_release_host(sdiodev->func1); goto out; } /* increase F2 timeout */ - sdiodev->func[2]->enable_timeout = SDIO_WAIT_F2RDY; + sdiodev->func2->enable_timeout = SDIO_WAIT_F2RDY; /* Enable Function 1 */ - ret = sdio_enable_func(sdiodev->func[1]); - sdio_release_host(sdiodev->func[1]); + ret = sdio_enable_func(sdiodev->func1); + sdio_release_host(sdiodev->func1); if (ret) { brcmf_err("Failed to enable F1: err=%d\n", ret); goto out; @@ -1077,7 +942,7 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev) ret = -ENODEV; goto out; } - brcmf_sdiod_host_fixup(sdiodev->func[2]->card->host); + brcmf_sdiod_host_fixup(sdiodev->func2->card->host); out: if (ret) brcmf_sdiod_remove(sdiodev); @@ -1138,6 +1003,10 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func, brcmf_dbg(SDIO, "Function#: %d\n", func->num); dev = &func->dev; + + /* Set MMC_QUIRK_LENIENT_FN0 for this card */ + func->card->quirks |= MMC_QUIRK_LENIENT_FN0; + /* prohibit ACPI power management for this device */ brcmf_sdiod_acpi_set_power_manageable(dev, 0); @@ -1161,17 +1030,15 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func, /* store refs to functions used. mmc_card does * not hold the F0 function pointer. */ - sdiodev->func[0] = kmemdup(func, sizeof(*func), GFP_KERNEL); - sdiodev->func[0]->num = 0; - sdiodev->func[1] = func->card->sdio_func[0]; - sdiodev->func[2] = func; + sdiodev->func1 = func->card->sdio_func[0]; + sdiodev->func2 = func; sdiodev->bus_if = bus_if; bus_if->bus_priv.sdio = sdiodev; bus_if->proto_type = BRCMF_PROTO_BCDC; dev_set_drvdata(&func->dev, bus_if); - dev_set_drvdata(&sdiodev->func[1]->dev, bus_if); - sdiodev->dev = &sdiodev->func[1]->dev; + dev_set_drvdata(&sdiodev->func1->dev, bus_if); + sdiodev->dev = &sdiodev->func1->dev; brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_DOWN); @@ -1187,8 +1054,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func, fail: dev_set_drvdata(&func->dev, NULL); - dev_set_drvdata(&sdiodev->func[1]->dev, NULL); - kfree(sdiodev->func[0]); + dev_set_drvdata(&sdiodev->func1->dev, NULL); kfree(sdiodev); kfree(bus_if); return err; @@ -1217,11 +1083,10 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func) /* only proceed with rest of cleanup if func 1 */ brcmf_sdiod_remove(sdiodev); - dev_set_drvdata(&sdiodev->func[1]->dev, NULL); - dev_set_drvdata(&sdiodev->func[2]->dev, NULL); + dev_set_drvdata(&sdiodev->func1->dev, NULL); + dev_set_drvdata(&sdiodev->func2->dev, NULL); kfree(bus_if); - kfree(sdiodev->func[0]); kfree(sdiodev); } @@ -1247,7 +1112,7 @@ static int brcmf_ops_sdio_suspend(struct device *dev) func = container_of(dev, struct sdio_func, dev); brcmf_dbg(SDIO, "Enter: F%d\n", func->num); - if (func->num != SDIO_FUNC_1) + if (func->num != 1) return 0; @@ -1264,7 +1129,7 @@ static int brcmf_ops_sdio_suspend(struct device *dev) else sdio_flags |= MMC_PM_WAKE_SDIO_IRQ; } - if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags)) + if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags)) brcmf_err("Failed to set pm_flags %x\n", sdio_flags); return 0; } @@ -1276,7 +1141,7 @@ static int brcmf_ops_sdio_resume(struct device *dev) struct sdio_func *func = container_of(dev, struct sdio_func, dev); brcmf_dbg(SDIO, "Enter: F%d\n", func->num); - if (func->num != SDIO_FUNC_2) + if (func->num != 2) return 0; brcmf_sdiod_freezer_off(sdiodev); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c index c5d1a1cbf601..f7b30ce2300d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c @@ -1338,6 +1338,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub) switch (pub->chip) { case BRCM_CC_4354_CHIP_ID: case BRCM_CC_4356_CHIP_ID: + case BRCM_CC_4345_CHIP_ID: /* explicitly check SR engine enable bit */ pmu_cc3_mask = BIT(2); /* fall-through */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c index 53ae30259989..47de35a33853 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c @@ -130,13 +130,19 @@ static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp, } } +#define MAX_CAPS_BUFFER_SIZE 512 static void brcmf_feat_firmware_capabilities(struct brcmf_if *ifp) { - char caps[256]; + char caps[MAX_CAPS_BUFFER_SIZE]; enum brcmf_feat_id id; - int i; + int i, err; + + err = brcmf_fil_iovar_data_get(ifp, "cap", caps, sizeof(caps)); + if (err) { + brcmf_err("could not get firmware cap (%d)\n", err); + return; + } - brcmf_fil_iovar_data_get(ifp, "cap", caps, sizeof(caps)); brcmf_dbg(INFO, "[ %s]\n", caps); for (i = 0; i < ARRAY_SIZE(brcmf_fwcap_map); i++) { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c index f6a2df94dba7..f2cfdd3b2bf1 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c @@ -107,7 +107,7 @@ static s32 brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set) { struct brcmf_pub *drvr = ifp->drvr; - s32 err; + s32 err, fwerr; if (drvr->bus_if->state != BRCMF_BUS_UP) { brcmf_err("bus is down. we have nothing to do.\n"); @@ -117,16 +117,20 @@ brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set) if (data != NULL) len = min_t(uint, len, BRCMF_DCMD_MAXLEN); if (set) - err = brcmf_proto_set_dcmd(drvr, ifp->ifidx, cmd, data, len); + err = brcmf_proto_set_dcmd(drvr, ifp->ifidx, cmd, + data, len, &fwerr); else - err = brcmf_proto_query_dcmd(drvr, ifp->ifidx, cmd, data, len); - - if (err >= 0) - return 0; - - brcmf_dbg(FIL, "Failed: %s (%d)\n", - brcmf_fil_get_errstr((u32)(-err)), err); - + err = brcmf_proto_query_dcmd(drvr, ifp->ifidx, cmd, + data, len, &fwerr); + + if (err) { + brcmf_dbg(FIL, "Failed: %s (%d)\n", + brcmf_fil_get_errstr((u32)(-err)), err); + } else if (fwerr < 0) { + brcmf_dbg(FIL, "Firmware error: %s (%d)\n", + brcmf_fil_get_errstr((u32)(-fwerr)), fwerr); + err = -EBADE; + } return err; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c index d2c834c3b2fc..e212a791a072 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c @@ -477,7 +477,7 @@ static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf *msgbuf) static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx, - uint cmd, void *buf, uint len) + uint cmd, void *buf, uint len, int *fwerr) { struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; struct sk_buff *skb = NULL; @@ -485,6 +485,7 @@ static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx, int err; brcmf_dbg(MSGBUF, "ifidx=%d, cmd=%d, len=%d\n", ifidx, cmd, len); + *fwerr = 0; msgbuf->ctl_completed = false; err = brcmf_msgbuf_tx_ioctl(drvr, ifidx, cmd, buf, len); if (err) @@ -508,14 +509,15 @@ static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx, } brcmu_pkt_buf_free_skb(skb); - return msgbuf->ioctl_resp_status; + *fwerr = msgbuf->ioctl_resp_status; + return 0; } static int brcmf_msgbuf_set_dcmd(struct brcmf_pub *drvr, int ifidx, - uint cmd, void *buf, uint len) + uint cmd, void *buf, uint len, int *fwerr) { - return brcmf_msgbuf_query_dcmd(drvr, ifidx, cmd, buf, len); + return brcmf_msgbuf_query_dcmd(drvr, ifidx, cmd, buf, len, fwerr); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 3c87157f5b85..8752707557bf 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -1251,14 +1251,14 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo) u64 address; u32 addr; - devinfo->shared.scratch = dma_alloc_coherent(&devinfo->pdev->dev, - BRCMF_DMA_D2H_SCRATCH_BUF_LEN, - &devinfo->shared.scratch_dmahandle, GFP_KERNEL); + devinfo->shared.scratch = + dma_zalloc_coherent(&devinfo->pdev->dev, + BRCMF_DMA_D2H_SCRATCH_BUF_LEN, + &devinfo->shared.scratch_dmahandle, + GFP_KERNEL); if (!devinfo->shared.scratch) goto fail; - memset(devinfo->shared.scratch, 0, BRCMF_DMA_D2H_SCRATCH_BUF_LEN); - addr = devinfo->shared.tcm_base_address + BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET; address = (u64)devinfo->shared.scratch_dmahandle; @@ -1268,14 +1268,14 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo) BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET; brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN); - devinfo->shared.ringupd = dma_alloc_coherent(&devinfo->pdev->dev, - BRCMF_DMA_D2H_RINGUPD_BUF_LEN, - &devinfo->shared.ringupd_dmahandle, GFP_KERNEL); + devinfo->shared.ringupd = + dma_zalloc_coherent(&devinfo->pdev->dev, + BRCMF_DMA_D2H_RINGUPD_BUF_LEN, + &devinfo->shared.ringupd_dmahandle, + GFP_KERNEL); if (!devinfo->shared.ringupd) goto fail; - memset(devinfo->shared.ringupd, 0, BRCMF_DMA_D2H_RINGUPD_BUF_LEN); - addr = devinfo->shared.tcm_base_address + BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET; address = (u64)devinfo->shared.ringupd_dmahandle; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h index 2404f8a7c31c..8a8e08f09ea0 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h @@ -30,9 +30,9 @@ struct brcmf_proto { int (*hdrpull)(struct brcmf_pub *drvr, bool do_fws, struct sk_buff *skb, struct brcmf_if **ifp); int (*query_dcmd)(struct brcmf_pub *drvr, int ifidx, uint cmd, - void *buf, uint len); + void *buf, uint len, int *fwerr); int (*set_dcmd)(struct brcmf_pub *drvr, int ifidx, uint cmd, void *buf, - uint len); + uint len, int *fwerr); int (*tx_queue_data)(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb); int (*txdata)(struct brcmf_pub *drvr, int ifidx, u8 offset, @@ -71,14 +71,16 @@ static inline int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, return drvr->proto->hdrpull(drvr, do_fws, skb, ifp); } static inline int brcmf_proto_query_dcmd(struct brcmf_pub *drvr, int ifidx, - uint cmd, void *buf, uint len) + uint cmd, void *buf, uint len, + int *fwerr) { - return drvr->proto->query_dcmd(drvr, ifidx, cmd, buf, len); + return drvr->proto->query_dcmd(drvr, ifidx, cmd, buf, len,fwerr); } static inline int brcmf_proto_set_dcmd(struct brcmf_pub *drvr, int ifidx, - uint cmd, void *buf, uint len) + uint cmd, void *buf, uint len, + int *fwerr) { - return drvr->proto->set_dcmd(drvr, ifidx, cmd, buf, len); + return drvr->proto->set_dcmd(drvr, ifidx, cmd, buf, len, fwerr); } static inline int brcmf_proto_tx_queue_data(struct brcmf_pub *drvr, int ifidx, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index cdf9e4161592..08686147b59d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -159,8 +159,8 @@ struct rte_console { /* manfid tuple length, include tuple, link bytes */ #define SBSDIO_CIS_MANFID_TUPLE_LEN 6 -#define CORE_BUS_REG(base, field) \ - (base + offsetof(struct sdpcmd_regs, field)) +#define SD_REG(field) \ + (offsetof(struct sdpcmd_regs, field)) /* SDIO function 1 register CHIPCLKCSR */ /* Force ALP request to backplane */ @@ -436,6 +436,7 @@ struct brcmf_sdio_count { struct brcmf_sdio { struct brcmf_sdio_dev *sdiodev; /* sdio device handler */ struct brcmf_chip *ci; /* Chip info struct */ + struct brcmf_core *sdio_core; /* sdio core info struct */ u32 hostintmask; /* Copy of Host Interrupt Mask */ atomic_t intstatus; /* Intstatus bits (events) pending */ @@ -659,32 +660,6 @@ static bool data_ok(struct brcmf_sdio *bus) ((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0; } -/* - * Reads a register in the SDIO hardware block. This block occupies a series of - * adresses on the 32 bit backplane bus. - */ -static int r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset) -{ - struct brcmf_core *core; - int ret; - - core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV); - *regvar = brcmf_sdiod_regrl(bus->sdiodev, core->base + offset, &ret); - - return ret; -} - -static int w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset) -{ - struct brcmf_core *core; - int ret; - - core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV); - brcmf_sdiod_regwl(bus->sdiodev, core->base + reg_offset, regval, &ret); - - return ret; -} - static int brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on) { @@ -697,8 +672,7 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on) wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT); /* 1st KSO write goes to AOS wake up core if device is asleep */ - brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, - wr_val, &err); + brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err); if (on) { /* device WAKEUP through KSO: @@ -724,7 +698,7 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on) * just one write attempt may fail, * read it back until it matches written value */ - rd_val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, + rd_val = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err); if (!err) { if ((rd_val & bmask) == cmp_val) @@ -734,9 +708,11 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on) /* bail out upon subsequent access errors */ if (err && (err_cnt++ > BRCMF_SDIO_MAX_ACCESS_ERRORS)) break; + udelay(KSO_WAIT_US); - brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, - wr_val, &err); + brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, wr_val, + &err); + } while (try_cnt++ < MAX_KSO_ATTEMPTS); if (try_cnt > 2) @@ -772,15 +748,15 @@ static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok) clkreq = bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ; - brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, - clkreq, &err); + brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, + clkreq, &err); if (err) { brcmf_err("HT Avail request error: %d\n", err); return -EBADE; } /* Check current status */ - clkctl = brcmf_sdiod_regrb(bus->sdiodev, + clkctl = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, &err); if (err) { brcmf_err("HT Avail read error: %d\n", err); @@ -790,35 +766,34 @@ static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok) /* Go to pending and await interrupt if appropriate */ if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) { /* Allow only clock-available interrupt */ - devctl = brcmf_sdiod_regrb(bus->sdiodev, + devctl = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_DEVICE_CTL, &err); if (err) { - brcmf_err("Devctl error setting CA: %d\n", - err); + brcmf_err("Devctl error setting CA: %d\n", err); return -EBADE; } devctl |= SBSDIO_DEVCTL_CA_INT_ONLY; - brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL, - devctl, &err); + brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_DEVICE_CTL, + devctl, &err); brcmf_dbg(SDIO, "CLKCTL: set PENDING\n"); bus->clkstate = CLK_PENDING; return 0; } else if (bus->clkstate == CLK_PENDING) { /* Cancel CA-only interrupt filter */ - devctl = brcmf_sdiod_regrb(bus->sdiodev, + devctl = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_DEVICE_CTL, &err); devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; - brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL, - devctl, &err); + brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_DEVICE_CTL, + devctl, &err); } /* Otherwise, wait here (polling) for HT Avail */ timeout = jiffies + msecs_to_jiffies(PMU_MAX_TRANSITION_DLY/1000); while (!SBSDIO_CLKAV(clkctl, bus->alp_only)) { - clkctl = brcmf_sdiod_regrb(bus->sdiodev, + clkctl = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, &err); if (time_after(jiffies, timeout)) @@ -852,16 +827,16 @@ static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok) if (bus->clkstate == CLK_PENDING) { /* Cancel CA-only interrupt filter */ - devctl = brcmf_sdiod_regrb(bus->sdiodev, + devctl = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_DEVICE_CTL, &err); devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; - brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL, - devctl, &err); + brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_DEVICE_CTL, + devctl, &err); } bus->clkstate = CLK_SDONLY; - brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, - clkreq, &err); + brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, + clkreq, &err); brcmf_dbg(SDIO, "CLKCTL: turned OFF\n"); if (err) { brcmf_err("Failed access turning clock off: %d\n", @@ -951,14 +926,14 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok) /* Going to sleep */ if (sleep) { - clkcsr = brcmf_sdiod_regrb(bus->sdiodev, + clkcsr = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, &err); if ((clkcsr & SBSDIO_CSR_MASK) == 0) { brcmf_dbg(SDIO, "no clock, set ALP\n"); - brcmf_sdiod_regwb(bus->sdiodev, - SBSDIO_FUNC1_CHIPCLKCSR, - SBSDIO_ALP_AVAIL_REQ, &err); + brcmf_sdiod_writeb(bus->sdiodev, + SBSDIO_FUNC1_CHIPCLKCSR, + SBSDIO_ALP_AVAIL_REQ, &err); } err = brcmf_sdio_kso_control(bus, false); } else { @@ -1004,7 +979,7 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus, struct sdpcm_shared_le sh_le; __le32 addr_le; - sdio_claim_host(bus->sdiodev->func[1]); + sdio_claim_host(bus->sdiodev->func1); brcmf_sdio_bus_sleep(bus, false, false); /* @@ -1038,7 +1013,7 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus, if (rv < 0) goto fail; - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(bus->sdiodev->func1); /* Endianness */ sh->flags = le32_to_cpu(sh_le.flags); @@ -1060,7 +1035,7 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus, fail: brcmf_err("unable to obtain sdpcm_shared info: rv=%d (addr=0x%x)\n", rv, addr); - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(bus->sdiodev->func1); return rv; } @@ -1079,6 +1054,8 @@ static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus) static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus) { + struct brcmf_sdio_dev *sdiod = bus->sdiodev; + struct brcmf_core *core = bus->sdio_core; u32 intstatus = 0; u32 hmb_data; u8 fcbits; @@ -1087,12 +1064,14 @@ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus) brcmf_dbg(SDIO, "Enter\n"); /* Read mailbox data and ack that we did so */ - ret = r_sdreg32(bus, &hmb_data, - offsetof(struct sdpcmd_regs, tohostmailboxdata)); + hmb_data = brcmf_sdiod_readl(sdiod, + core->base + SD_REG(tohostmailboxdata), + &ret); + + if (!ret) + brcmf_sdiod_writel(sdiod, core->base + SD_REG(tosbmailbox), + SMB_INT_ACK, &ret); - if (ret == 0) - w_sdreg32(bus, SMB_INT_ACK, - offsetof(struct sdpcmd_regs, tosbmailbox)); bus->sdcnt.f1regdata += 2; /* dongle indicates the firmware has halted/crashed */ @@ -1166,6 +1145,8 @@ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus) static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx) { + struct brcmf_sdio_dev *sdiod = bus->sdiodev; + struct brcmf_core *core = bus->sdio_core; uint retries = 0; u16 lastrbc; u8 hi, lo; @@ -1176,18 +1157,18 @@ static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx) rtx ? ", send NAK" : ""); if (abort) - brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2); + brcmf_sdiod_abort(bus->sdiodev, bus->sdiodev->func2); - brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, - SFC_RF_TERM, &err); + brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM, + &err); bus->sdcnt.f1regdata++; /* Wait until the packet has been flushed (device/FIFO stable) */ for (lastrbc = retries = 0xffff; retries > 0; retries--) { - hi = brcmf_sdiod_regrb(bus->sdiodev, - SBSDIO_FUNC1_RFRAMEBCHI, &err); - lo = brcmf_sdiod_regrb(bus->sdiodev, - SBSDIO_FUNC1_RFRAMEBCLO, &err); + hi = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_RFRAMEBCHI, + &err); + lo = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_RFRAMEBCLO, + &err); bus->sdcnt.f1regdata += 2; if ((hi == 0) && (lo == 0)) @@ -1207,8 +1188,8 @@ static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx) if (rtx) { bus->sdcnt.rxrtx++; - err = w_sdreg32(bus, SMB_NAK, - offsetof(struct sdpcmd_regs, tosbmailbox)); + brcmf_sdiod_writel(sdiod, core->base + SD_REG(tosbmailbox), + SMB_NAK, &err); bus->sdcnt.f1regdata++; if (err == 0) @@ -1228,13 +1209,13 @@ static void brcmf_sdio_txfail(struct brcmf_sdio *bus) brcmf_err("sdio error, abort command and terminate frame\n"); bus->sdcnt.tx_sderrs++; - brcmf_sdiod_abort(sdiodev, SDIO_FUNC_2); - brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, NULL); + brcmf_sdiod_abort(sdiodev, sdiodev->func2); + brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, NULL); bus->sdcnt.f1regdata++; for (i = 0; i < 3; i++) { - hi = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_WFRAMEBCHI, NULL); - lo = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_WFRAMEBCLO, NULL); + hi = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_WFRAMEBCHI, NULL); + lo = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_WFRAMEBCLO, NULL); bus->sdcnt.f1regdata += 2; if ((hi == 0) && (lo == 0)) break; @@ -1584,10 +1565,10 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq) * read directly into the chained packet, or allocate a large * packet and and copy into the chain. */ - sdio_claim_host(bus->sdiodev->func[1]); + sdio_claim_host(bus->sdiodev->func1); errcode = brcmf_sdiod_recv_chain(bus->sdiodev, &bus->glom, dlen); - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(bus->sdiodev->func1); bus->sdcnt.f2rxdata++; /* On failure, kill the superframe */ @@ -1595,11 +1576,11 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq) brcmf_err("glom read of %d bytes failed: %d\n", dlen, errcode); - sdio_claim_host(bus->sdiodev->func[1]); + sdio_claim_host(bus->sdiodev->func1); brcmf_sdio_rxfail(bus, true, false); bus->sdcnt.rxglomfail++; brcmf_sdio_free_glom(bus); - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(bus->sdiodev->func1); return 0; } @@ -1609,10 +1590,10 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq) rd_new.seq_num = rxseq; rd_new.len = dlen; - sdio_claim_host(bus->sdiodev->func[1]); + sdio_claim_host(bus->sdiodev->func1); errcode = brcmf_sdio_hdparse(bus, pfirst->data, &rd_new, BRCMF_SDIO_FT_SUPER); - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(bus->sdiodev->func1); bus->cur_read.len = rd_new.len_nxtfrm << 4; /* Remove superframe header, remember offset */ @@ -1628,10 +1609,10 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq) rd_new.len = pnext->len; rd_new.seq_num = rxseq++; - sdio_claim_host(bus->sdiodev->func[1]); + sdio_claim_host(bus->sdiodev->func1); errcode = brcmf_sdio_hdparse(bus, pnext->data, &rd_new, BRCMF_SDIO_FT_SUB); - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(bus->sdiodev->func1); brcmf_dbg_hex_dump(BRCMF_GLOM_ON(), pnext->data, 32, "subframe:\n"); @@ -1640,11 +1621,11 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq) if (errcode) { /* Terminate frame on error */ - sdio_claim_host(bus->sdiodev->func[1]); + sdio_claim_host(bus->sdiodev->func1); brcmf_sdio_rxfail(bus, true, false); bus->sdcnt.rxglomfail++; brcmf_sdio_free_glom(bus); - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(bus->sdiodev->func1); bus->cur_read.len = 0; return 0; } @@ -1852,7 +1833,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes) rd->len_left = rd->len; /* read header first for unknow frame length */ - sdio_claim_host(bus->sdiodev->func[1]); + sdio_claim_host(bus->sdiodev->func1); if (!rd->len) { ret = brcmf_sdiod_recv_buf(bus->sdiodev, bus->rxhdr, BRCMF_FIRSTREAD); @@ -1862,7 +1843,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes) ret); bus->sdcnt.rx_hdrfail++; brcmf_sdio_rxfail(bus, true, true); - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(bus->sdiodev->func1); continue; } @@ -1872,7 +1853,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes) if (brcmf_sdio_hdparse(bus, bus->rxhdr, rd, BRCMF_SDIO_FT_NORMAL)) { - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(bus->sdiodev->func1); if (!bus->rxpending) break; else @@ -1888,7 +1869,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes) rd->len_nxtfrm = 0; /* treat all packet as event if we don't know */ rd->channel = SDPCM_EVENT_CHANNEL; - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(bus->sdiodev->func1); continue; } rd->len_left = rd->len > BRCMF_FIRSTREAD ? @@ -1905,7 +1886,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes) brcmf_err("brcmu_pkt_buf_get_skb failed\n"); brcmf_sdio_rxfail(bus, false, RETRYCHAN(rd->channel)); - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(bus->sdiodev->func1); continue; } skb_pull(pkt, head_read); @@ -1913,16 +1894,16 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes) ret = brcmf_sdiod_recv_pkt(bus->sdiodev, pkt); bus->sdcnt.f2rxdata++; - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(bus->sdiodev->func1); if (ret < 0) { brcmf_err("read %d bytes from channel %d failed: %d\n", rd->len, rd->channel, ret); brcmu_pkt_buf_free_skb(pkt); - sdio_claim_host(bus->sdiodev->func[1]); + sdio_claim_host(bus->sdiodev->func1); brcmf_sdio_rxfail(bus, true, RETRYCHAN(rd->channel)); - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(bus->sdiodev->func1); continue; } @@ -1933,7 +1914,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes) } else { memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN); rd_new.seq_num = rd->seq_num; - sdio_claim_host(bus->sdiodev->func[1]); + sdio_claim_host(bus->sdiodev->func1); if (brcmf_sdio_hdparse(bus, bus->rxhdr, &rd_new, BRCMF_SDIO_FT_NORMAL)) { rd->len = 0; @@ -1946,11 +1927,11 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes) roundup(rd_new.len, 16) >> 4); rd->len = 0; brcmf_sdio_rxfail(bus, true, true); - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(bus->sdiodev->func1); brcmu_pkt_buf_free_skb(pkt); continue; } - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(bus->sdiodev->func1); rd->len_nxtfrm = rd_new.len_nxtfrm; rd->channel = rd_new.channel; rd->dat_offset = rd_new.dat_offset; @@ -1966,9 +1947,9 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes) rd_new.seq_num); /* Force retry w/normal header read */ rd->len = 0; - sdio_claim_host(bus->sdiodev->func[1]); + sdio_claim_host(bus->sdiodev->func1); brcmf_sdio_rxfail(bus, false, true); - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(bus->sdiodev->func1); brcmu_pkt_buf_free_skb(pkt); continue; } @@ -1991,9 +1972,9 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes) } else { brcmf_err("%s: glom superframe w/o " "descriptor!\n", __func__); - sdio_claim_host(bus->sdiodev->func[1]); + sdio_claim_host(bus->sdiodev->func1); brcmf_sdio_rxfail(bus, false, false); - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(bus->sdiodev->func1); } /* prepare the descriptor for the next read */ rd->len = rd->len_nxtfrm << 4; @@ -2091,7 +2072,7 @@ static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus, int ntail, ret; sdiodev = bus->sdiodev; - blksize = sdiodev->func[SDIO_FUNC_2]->cur_blksize; + blksize = sdiodev->func2->cur_blksize; /* sg entry alignment should be a divisor of block size */ WARN_ON(blksize % bus->sgentry_align); @@ -2270,14 +2251,14 @@ static int brcmf_sdio_txpkt(struct brcmf_sdio *bus, struct sk_buff_head *pktq, if (ret) goto done; - sdio_claim_host(bus->sdiodev->func[1]); + sdio_claim_host(bus->sdiodev->func1); ret = brcmf_sdiod_send_pkt(bus->sdiodev, pktq); bus->sdcnt.f2txdata++; if (ret < 0) brcmf_sdio_txfail(bus); - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(bus->sdiodev->func1); done: brcmf_sdio_txpkt_postp(bus, pktq); @@ -2295,6 +2276,7 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes) { struct sk_buff *pkt; struct sk_buff_head pktq; + u32 intstat_addr = bus->sdio_core->base + SD_REG(intstatus); u32 intstatus = 0; int ret = 0, prec_out, i; uint cnt = 0; @@ -2332,11 +2314,11 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes) /* In poll mode, need to check for other events */ if (!bus->intr) { /* Check device status, signal pending interrupt */ - sdio_claim_host(bus->sdiodev->func[1]); - ret = r_sdreg32(bus, &intstatus, - offsetof(struct sdpcmd_regs, - intstatus)); - sdio_release_host(bus->sdiodev->func[1]); + sdio_claim_host(bus->sdiodev->func1); + intstatus = brcmf_sdiod_readl(bus->sdiodev, + intstat_addr, &ret); + sdio_release_host(bus->sdiodev->func1); + bus->sdcnt.f2txdata++; if (ret != 0) break; @@ -2419,12 +2401,13 @@ static int brcmf_sdio_tx_ctrlframe(struct brcmf_sdio *bus, u8 *frame, u16 len) static void brcmf_sdio_bus_stop(struct device *dev) { - u32 local_hostintmask; - u8 saveclk; - int err; struct brcmf_bus *bus_if = dev_get_drvdata(dev); struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; struct brcmf_sdio *bus = sdiodev->bus; + struct brcmf_core *core = bus->sdio_core; + u32 local_hostintmask; + u8 saveclk; + int err; brcmf_dbg(TRACE, "Enter\n"); @@ -2435,35 +2418,37 @@ static void brcmf_sdio_bus_stop(struct device *dev) } if (sdiodev->state != BRCMF_SDIOD_NOMEDIUM) { - sdio_claim_host(sdiodev->func[1]); + sdio_claim_host(sdiodev->func1); /* Enable clock for device interrupts */ brcmf_sdio_bus_sleep(bus, false, false); /* Disable and clear interrupts at the chip level also */ - w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask)); + brcmf_sdiod_writel(sdiodev, core->base + SD_REG(hostintmask), + 0, NULL); + local_hostintmask = bus->hostintmask; bus->hostintmask = 0; /* Force backplane clocks to assure F2 interrupt propagates */ - saveclk = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, + saveclk = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, &err); if (!err) - brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, - (saveclk | SBSDIO_FORCE_HT), &err); + brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, + (saveclk | SBSDIO_FORCE_HT), &err); if (err) brcmf_err("Failed to force clock for F2: err %d\n", err); /* Turn off the bus (F2), free any pending packets */ brcmf_dbg(INTR, "disable SDIO interrupts\n"); - sdio_disable_func(sdiodev->func[SDIO_FUNC_2]); + sdio_disable_func(sdiodev->func2); /* Clear any pending interrupts now that F2 is disabled */ - w_sdreg32(bus, local_hostintmask, - offsetof(struct sdpcmd_regs, intstatus)); + brcmf_sdiod_writel(sdiodev, core->base + SD_REG(intstatus), + local_hostintmask, NULL); - sdio_release_host(sdiodev->func[1]); + sdio_release_host(sdiodev->func1); } /* Clear the data packet queues */ brcmu_pktq_flush(&bus->txq, true, NULL, NULL); @@ -2501,15 +2486,14 @@ static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus) static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus) { - struct brcmf_core *buscore; + struct brcmf_core *core = bus->sdio_core; u32 addr; unsigned long val; int ret; - buscore = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV); - addr = buscore->base + offsetof(struct sdpcmd_regs, intstatus); + addr = core->base + SD_REG(intstatus); - val = brcmf_sdiod_regrl(bus->sdiodev, addr, &ret); + val = brcmf_sdiod_readl(bus->sdiodev, addr, &ret); bus->sdcnt.f1regdata++; if (ret != 0) return ret; @@ -2519,7 +2503,7 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus) /* Clear interrupts */ if (val) { - brcmf_sdiod_regwl(bus->sdiodev, addr, val, &ret); + brcmf_sdiod_writel(bus->sdiodev, addr, val, &ret); bus->sdcnt.f1regdata++; atomic_or(val, &bus->intstatus); } @@ -2529,7 +2513,9 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus) static void brcmf_sdio_dpc(struct brcmf_sdio *bus) { + struct brcmf_sdio_dev *sdiod = bus->sdiodev; u32 newstatus = 0; + u32 intstat_addr = bus->sdio_core->base + SD_REG(intstatus); unsigned long intstatus; uint txlimit = bus->txbound; /* Tx frames to send before resched */ uint framecnt; /* Temporary counter of tx/rx frames */ @@ -2537,7 +2523,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus) brcmf_dbg(TRACE, "Enter\n"); - sdio_claim_host(bus->sdiodev->func[1]); + sdio_claim_host(bus->sdiodev->func1); /* If waiting for HTAVAIL, check status */ if (!bus->sr_enabled && bus->clkstate == CLK_PENDING) { @@ -2545,23 +2531,23 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus) #ifdef DEBUG /* Check for inconsistent device control */ - devctl = brcmf_sdiod_regrb(bus->sdiodev, - SBSDIO_DEVICE_CTL, &err); + devctl = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_DEVICE_CTL, + &err); #endif /* DEBUG */ /* Read CSR, if clock on switch to AVAIL, else ignore */ - clkctl = brcmf_sdiod_regrb(bus->sdiodev, + clkctl = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, &err); brcmf_dbg(SDIO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n", devctl, clkctl); if (SBSDIO_HTAV(clkctl)) { - devctl = brcmf_sdiod_regrb(bus->sdiodev, + devctl = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_DEVICE_CTL, &err); devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; - brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL, - devctl, &err); + brcmf_sdiod_writeb(bus->sdiodev, + SBSDIO_DEVICE_CTL, devctl, &err); bus->clkstate = CLK_AVAIL; } } @@ -2584,11 +2570,10 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus) */ if (intstatus & I_HMB_FC_CHANGE) { intstatus &= ~I_HMB_FC_CHANGE; - err = w_sdreg32(bus, I_HMB_FC_CHANGE, - offsetof(struct sdpcmd_regs, intstatus)); + brcmf_sdiod_writel(sdiod, intstat_addr, I_HMB_FC_CHANGE, &err); + + newstatus = brcmf_sdiod_readl(sdiod, intstat_addr, &err); - err = r_sdreg32(bus, &newstatus, - offsetof(struct sdpcmd_regs, intstatus)); bus->sdcnt.f1regdata += 2; atomic_set(&bus->fcstate, !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE))); @@ -2601,7 +2586,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus) intstatus |= brcmf_sdio_hostmail(bus); } - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(bus->sdiodev->func1); /* Generally don't ask for these, can get CRC errors... */ if (intstatus & I_WR_OOSYNC) { @@ -2644,7 +2629,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus) if (bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL) && data_ok(bus)) { - sdio_claim_host(bus->sdiodev->func[1]); + sdio_claim_host(bus->sdiodev->func1); if (bus->ctrl_frame_stat) { err = brcmf_sdio_tx_ctrlframe(bus, bus->ctrl_frame_buf, bus->ctrl_frame_len); @@ -2652,7 +2637,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus) wmb(); bus->ctrl_frame_stat = false; } - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(bus->sdiodev->func1); brcmf_sdio_wait_event_wakeup(bus); } /* Send queued frames (limit 1 if rx may still be pending) */ @@ -2668,14 +2653,14 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus) brcmf_err("failed backplane access over SDIO, halting operation\n"); atomic_set(&bus->intstatus, 0); if (bus->ctrl_frame_stat) { - sdio_claim_host(bus->sdiodev->func[1]); + sdio_claim_host(bus->sdiodev->func1); if (bus->ctrl_frame_stat) { bus->ctrl_frame_err = -ENODEV; wmb(); bus->ctrl_frame_stat = false; brcmf_sdio_wait_event_wakeup(bus); } - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(bus->sdiodev->func1); } } else if (atomic_read(&bus->intstatus) || atomic_read(&bus->ipend) > 0 || @@ -2890,13 +2875,13 @@ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen) CTL_DONE_TIMEOUT); ret = 0; if (bus->ctrl_frame_stat) { - sdio_claim_host(bus->sdiodev->func[1]); + sdio_claim_host(bus->sdiodev->func1); if (bus->ctrl_frame_stat) { brcmf_dbg(SDIO, "ctrl_frame timeout\n"); bus->ctrl_frame_stat = false; ret = -ETIMEDOUT; } - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(bus->sdiodev->func1); } if (!ret) { brcmf_dbg(SDIO, "ctrl_frame complete, err=%d\n", @@ -3020,7 +3005,7 @@ static int brcmf_sdio_assert_info(struct seq_file *seq, struct brcmf_sdio *bus, return 0; } - sdio_claim_host(bus->sdiodev->func[1]); + sdio_claim_host(bus->sdiodev->func1); if (sh->assert_file_addr != 0) { error = brcmf_sdiod_ramrw(bus->sdiodev, false, sh->assert_file_addr, (u8 *)file, 80); @@ -3033,7 +3018,7 @@ static int brcmf_sdio_assert_info(struct seq_file *seq, struct brcmf_sdio *bus, if (error < 0) return error; } - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(bus->sdiodev->func1); seq_printf(seq, "dongle assert: %s:%d: assert(%s)\n", file, sh->assert_line, expr); @@ -3307,7 +3292,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus, int bcmerror; u32 rstvec; - sdio_claim_host(bus->sdiodev->func[1]); + sdio_claim_host(bus->sdiodev->func1); brcmf_sdio_clkctl(bus, CLK_AVAIL, false); rstvec = get_unaligned_le32(fw->data); @@ -3336,7 +3321,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus, err: brcmf_sdio_clkctl(bus, CLK_SDONLY, false); - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(bus->sdiodev->func1); return bcmerror; } @@ -3347,31 +3332,31 @@ static void brcmf_sdio_sr_init(struct brcmf_sdio *bus) brcmf_dbg(TRACE, "Enter\n"); - val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, &err); + val = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, &err); if (err) { brcmf_err("error reading SBSDIO_FUNC1_WAKEUPCTRL\n"); return; } val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT; - brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, val, &err); + brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, val, &err); if (err) { brcmf_err("error writing SBSDIO_FUNC1_WAKEUPCTRL\n"); return; } /* Add CMD14 Support */ - brcmf_sdiod_regwb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP, - (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT | - SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT), - &err); + brcmf_sdiod_func0_wb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP, + (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT | + SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT), + &err); if (err) { brcmf_err("error writing SDIO_CCCR_BRCM_CARDCAP\n"); return; } - brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, - SBSDIO_FORCE_HT, &err); + brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, + SBSDIO_FORCE_HT, &err); if (err) { brcmf_err("error writing SBSDIO_FUNC1_CHIPCLKCSR\n"); return; @@ -3385,16 +3370,17 @@ static void brcmf_sdio_sr_init(struct brcmf_sdio *bus) /* enable KSO bit */ static int brcmf_sdio_kso_init(struct brcmf_sdio *bus) { + struct brcmf_core *core = bus->sdio_core; u8 val; int err = 0; brcmf_dbg(TRACE, "Enter\n"); /* KSO bit added in SDIO core rev 12 */ - if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12) + if (core->rev < 12) return 0; - val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err); + val = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err); if (err) { brcmf_err("error reading SBSDIO_FUNC1_SLEEPCSR\n"); return err; @@ -3403,8 +3389,8 @@ static int brcmf_sdio_kso_init(struct brcmf_sdio *bus) if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) { val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT); - brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, - val, &err); + brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, + val, &err); if (err) { brcmf_err("error writing SBSDIO_FUNC1_SLEEPCSR\n"); return err; @@ -3420,6 +3406,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev) struct brcmf_bus *bus_if = dev_get_drvdata(dev); struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; struct brcmf_sdio *bus = sdiodev->bus; + struct brcmf_core *core = bus->sdio_core; uint pad_size; u32 value; int err; @@ -3428,7 +3415,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev) * a device perspective, ie. bus:txglom affects the * bus transfers from device to host. */ - if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12) { + if (core->rev < 12) { /* for sdio core rev < 12, disable txgloming */ value = 0; err = brcmf_iovar_data_set(dev, "bus:txglom", &value, @@ -3449,7 +3436,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev) if (sdiodev->sg_support) { bus->txglom = false; value = 1; - pad_size = bus->sdiodev->func[2]->cur_blksize << 1; + pad_size = bus->sdiodev->func2->cur_blksize << 1; err = brcmf_iovar_data_set(bus->sdiodev->dev, "bus:rxglom", &value, sizeof(u32)); if (err < 0) { @@ -3491,7 +3478,7 @@ static int brcmf_sdio_bus_get_memdump(struct device *dev, void *data, address = bus->ci->rambase; offset = err = 0; - sdio_claim_host(sdiodev->func[1]); + sdio_claim_host(sdiodev->func1); while (offset < mem_size) { len = ((offset + MEMBLOCK) < mem_size) ? MEMBLOCK : mem_size - offset; @@ -3507,7 +3494,7 @@ static int brcmf_sdio_bus_get_memdump(struct device *dev, void *data, } done: - sdio_release_host(sdiodev->func[1]); + sdio_release_host(sdiodev->func1); return err; } @@ -3564,11 +3551,10 @@ static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus) if (!bus->dpc_triggered) { u8 devpend; - sdio_claim_host(bus->sdiodev->func[1]); - devpend = brcmf_sdiod_regrb(bus->sdiodev, - SDIO_CCCR_INTx, - NULL); - sdio_release_host(bus->sdiodev->func[1]); + sdio_claim_host(bus->sdiodev->func1); + devpend = brcmf_sdiod_func0_rb(bus->sdiodev, + SDIO_CCCR_INTx, NULL); + sdio_release_host(bus->sdiodev->func1); intstatus = devpend & (INTR_STATUS_FUNC1 | INTR_STATUS_FUNC2); } @@ -3594,13 +3580,13 @@ static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus) bus->console.count += jiffies_to_msecs(BRCMF_WD_POLL); if (bus->console.count >= bus->console_interval) { bus->console.count -= bus->console_interval; - sdio_claim_host(bus->sdiodev->func[1]); + sdio_claim_host(bus->sdiodev->func1); /* Make sure backplane clock is on */ brcmf_sdio_bus_sleep(bus, false, false); if (brcmf_sdio_readconsole(bus) < 0) /* stop on error */ bus->console_interval = 0; - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(bus->sdiodev->func1); } } #endif /* DEBUG */ @@ -3613,11 +3599,11 @@ static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus) bus->idlecount++; if (bus->idlecount > bus->idletime) { brcmf_dbg(SDIO, "idle\n"); - sdio_claim_host(bus->sdiodev->func[1]); + sdio_claim_host(bus->sdiodev->func1); brcmf_sdio_wd_timer(bus, false); bus->idlecount = 0; brcmf_sdio_bus_sleep(bus, true, false); - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(bus->sdiodev->func1); } } else { bus->idlecount = 0; @@ -3705,12 +3691,12 @@ brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev, } } addr = CORE_CC_REG(pmu->base, chipcontrol_addr); - brcmf_sdiod_regwl(sdiodev, addr, 1, NULL); - cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL); + brcmf_sdiod_writel(sdiodev, addr, 1, NULL); + cc_data_temp = brcmf_sdiod_readl(sdiodev, addr, NULL); cc_data_temp &= ~str_mask; drivestrength_sel <<= str_shift; cc_data_temp |= drivestrength_sel; - brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL); + brcmf_sdiod_writel(sdiodev, addr, cc_data_temp, NULL); brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n", str_tab[i].strength, drivestrength, cc_data_temp); @@ -3725,7 +3711,7 @@ static int brcmf_sdio_buscoreprep(void *ctx) /* Try forcing SDIO core to do ALPAvail request only */ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ; - brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err); + brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err); if (err) { brcmf_err("error writing for HT off\n"); return err; @@ -3733,8 +3719,7 @@ static int brcmf_sdio_buscoreprep(void *ctx) /* If register supported, wait for ALPAvail and then force ALP */ /* This may take up to 15 milliseconds */ - clkval = brcmf_sdiod_regrb(sdiodev, - SBSDIO_FUNC1_CHIPCLKCSR, NULL); + clkval = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, NULL); if ((clkval & ~SBSDIO_AVBITS) != clkset) { brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n", @@ -3742,10 +3727,11 @@ static int brcmf_sdio_buscoreprep(void *ctx) return -EACCES; } - SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev, - SBSDIO_FUNC1_CHIPCLKCSR, NULL)), - !SBSDIO_ALPAV(clkval)), - PMU_MAX_TRANSITION_DLY); + SPINWAIT(((clkval = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, + NULL)), + !SBSDIO_ALPAV(clkval)), + PMU_MAX_TRANSITION_DLY); + if (!SBSDIO_ALPAV(clkval)) { brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n", clkval); @@ -3753,11 +3739,11 @@ static int brcmf_sdio_buscoreprep(void *ctx) } clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP; - brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err); + brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err); udelay(65); /* Also, disable the extra SDIO pull-ups */ - brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL); + brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL); return 0; } @@ -3766,13 +3752,12 @@ static void brcmf_sdio_buscore_activate(void *ctx, struct brcmf_chip *chip, u32 rstvec) { struct brcmf_sdio_dev *sdiodev = ctx; - struct brcmf_core *core; + struct brcmf_core *core = sdiodev->bus->sdio_core; u32 reg_addr; /* clear all interrupts */ - core = brcmf_chip_get_core(chip, BCMA_CORE_SDIO_DEV); - reg_addr = core->base + offsetof(struct sdpcmd_regs, intstatus); - brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL); + reg_addr = core->base + SD_REG(intstatus); + brcmf_sdiod_writel(sdiodev, reg_addr, 0xFFFFFFFF, NULL); if (rstvec) /* Write reset vector to address 0 */ @@ -3785,16 +3770,25 @@ static u32 brcmf_sdio_buscore_read32(void *ctx, u32 addr) struct brcmf_sdio_dev *sdiodev = ctx; u32 val, rev; - val = brcmf_sdiod_regrl(sdiodev, addr, NULL); - if ((sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 || - sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4339) && - addr == CORE_CC_REG(SI_ENUM_BASE, chipid)) { + val = brcmf_sdiod_readl(sdiodev, addr, NULL); + + /* + * this is a bit of special handling if reading the chipcommon chipid + * register. The 4339 is a next-gen of the 4335. It uses the same + * SDIO device id as 4335 and the chipid register returns 4335 as well. + * It can be identified as 4339 by looking at the chip revision. It + * is corrected here so the chip.c module has the right info. + */ + if (addr == CORE_CC_REG(SI_ENUM_BASE, chipid) && + (sdiodev->func1->device == SDIO_DEVICE_ID_BROADCOM_4339 || + sdiodev->func1->device == SDIO_DEVICE_ID_BROADCOM_4335_4339)) { rev = (val & CID_REV_MASK) >> CID_REV_SHIFT; if (rev >= 2) { val &= ~CID_ID_MASK; val |= BRCM_CC_4339_CHIP_ID; } } + return val; } @@ -3802,7 +3796,7 @@ static void brcmf_sdio_buscore_write32(void *ctx, u32 addr, u32 val) { struct brcmf_sdio_dev *sdiodev = ctx; - brcmf_sdiod_regwl(sdiodev, addr, val, NULL); + brcmf_sdiod_writel(sdiodev, addr, val, NULL); } static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops = { @@ -3823,21 +3817,21 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus) u32 drivestrength; sdiodev = bus->sdiodev; - sdio_claim_host(sdiodev->func[1]); + sdio_claim_host(sdiodev->func1); pr_debug("F1 signature read @0x18000000=0x%4x\n", - brcmf_sdiod_regrl(sdiodev, SI_ENUM_BASE, NULL)); + brcmf_sdiod_readl(sdiodev, SI_ENUM_BASE, NULL)); /* * Force PLL off until brcmf_chip_attach() * programs PLL control regs */ - brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, - BRCMF_INIT_CLKCTL1, &err); + brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, BRCMF_INIT_CLKCTL1, + &err); if (!err) - clkctl = brcmf_sdiod_regrb(sdiodev, - SBSDIO_FUNC1_CHIPCLKCSR, &err); + clkctl = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, + &err); if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) { brcmf_err("ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n", @@ -3851,6 +3845,17 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus) bus->ci = NULL; goto fail; } + + /* Pick up the SDIO core info struct from chip.c */ + bus->sdio_core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV); + if (!bus->sdio_core) + goto fail; + + /* Pick up the CHIPCOMMON core info struct, for bulk IO in bcmsdh.c */ + sdiodev->cc_core = brcmf_chip_get_core(bus->ci, BCMA_CORE_CHIPCOMMON); + if (!sdiodev->cc_core) + goto fail; + sdiodev->settings = brcmf_get_module_param(sdiodev->dev, BRCMF_BUSTYPE_SDIO, bus->ci->chip, @@ -3879,8 +3884,8 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus) /* wowl can be supported when KEEP_POWER is true and (WAKE_SDIO_IRQ * is true or when platform data OOB irq is true). */ - if ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_KEEP_POWER) && - ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_WAKE_SDIO_IRQ) || + if ((sdio_get_host_pm_caps(sdiodev->func1) & MMC_PM_KEEP_POWER) && + ((sdio_get_host_pm_caps(sdiodev->func1) & MMC_PM_WAKE_SDIO_IRQ) || (sdiodev->settings->bus.sdio.oob_irq_supported))) sdiodev->bus_if->wowl_supported = true; #endif @@ -3897,29 +3902,29 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus) brcmf_sdio_drivestrengthinit(sdiodev, bus->ci, drivestrength); /* Set card control so an SDIO card reset does a WLAN backplane reset */ - reg_val = brcmf_sdiod_regrb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, &err); + reg_val = brcmf_sdiod_func0_rb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, &err); if (err) goto fail; reg_val |= SDIO_CCCR_BRCM_CARDCTRL_WLANRESET; - brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err); + brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err); if (err) goto fail; /* set PMUControl so a backplane reset does PMU state reload */ reg_addr = CORE_CC_REG(brcmf_chip_get_pmu(bus->ci)->base, pmucontrol); - reg_val = brcmf_sdiod_regrl(sdiodev, reg_addr, &err); + reg_val = brcmf_sdiod_readl(sdiodev, reg_addr, &err); if (err) goto fail; reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT); - brcmf_sdiod_regwl(sdiodev, reg_addr, reg_val, &err); + brcmf_sdiod_writel(sdiodev, reg_addr, reg_val, &err); if (err) goto fail; - sdio_release_host(sdiodev->func[1]); + sdio_release_host(sdiodev->func1); brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN); @@ -3940,7 +3945,7 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus) return true; fail: - sdio_release_host(sdiodev->func[1]); + sdio_release_host(sdiodev->func1); return false; } @@ -4020,22 +4025,21 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, const struct firmware *code, void *nvram, u32 nvram_len) { - struct brcmf_bus *bus_if; - struct brcmf_sdio_dev *sdiodev; - struct brcmf_sdio *bus; + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + struct brcmf_sdio *bus = sdiodev->bus; + struct brcmf_sdio_dev *sdiod = bus->sdiodev; + struct brcmf_core *core = bus->sdio_core; u8 saveclk; brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err); - bus_if = dev_get_drvdata(dev); - sdiodev = bus_if->bus_priv.sdio; + if (err) goto fail; if (!bus_if->drvr) return; - bus = sdiodev->bus; - /* try to download image and nvram to the dongle */ bus->alp_only = true; err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len); @@ -4047,7 +4051,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, bus->sdcnt.tickcnt = 0; brcmf_sdio_wd_timer(bus, true); - sdio_claim_host(sdiodev->func[1]); + sdio_claim_host(sdiodev->func1); /* Make sure backplane clock is on, needed to generate F2 interrupt */ brcmf_sdio_clkctl(bus, CLK_AVAIL, false); @@ -4055,10 +4059,10 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, goto release; /* Force clocks on backplane to be sure F2 interrupt propagates */ - saveclk = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, &err); + saveclk = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, &err); if (!err) { - brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, - (saveclk | SBSDIO_FORCE_HT), &err); + brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, + (saveclk | SBSDIO_FORCE_HT), &err); } if (err) { brcmf_err("Failed to force clock for F2: err %d\n", err); @@ -4066,10 +4070,10 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, } /* Enable function 2 (frame transfers) */ - w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT, - offsetof(struct sdpcmd_regs, tosbmailboxdata)); - err = sdio_enable_func(sdiodev->func[SDIO_FUNC_2]); + brcmf_sdiod_writel(sdiod, core->base + SD_REG(tosbmailboxdata), + SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT, NULL); + err = sdio_enable_func(sdiodev->func2); brcmf_dbg(INFO, "enable F2: err=%d\n", err); @@ -4077,13 +4081,14 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, if (!err) { /* Set up the interrupt mask and enable interrupts */ bus->hostintmask = HOSTINTMASK; - w_sdreg32(bus, bus->hostintmask, - offsetof(struct sdpcmd_regs, hostintmask)); + brcmf_sdiod_writel(sdiod, core->base + SD_REG(hostintmask), + bus->hostintmask, NULL); + - brcmf_sdiod_regwb(sdiodev, SBSDIO_WATERMARK, 8, &err); + brcmf_sdiod_writeb(sdiodev, SBSDIO_WATERMARK, 8, &err); } else { /* Disable F2 again */ - sdio_disable_func(sdiodev->func[SDIO_FUNC_2]); + sdio_disable_func(sdiodev->func2); goto release; } @@ -4091,8 +4096,8 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, brcmf_sdio_sr_init(bus); } else { /* Restore previous clock setting */ - brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, - saveclk, &err); + brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, + saveclk, &err); } if (err == 0) { @@ -4108,7 +4113,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, if (err != 0) brcmf_sdio_clkctl(bus, CLK_NONE, false); - sdio_release_host(sdiodev->func[1]); + sdio_release_host(sdiodev->func1); err = brcmf_bus_started(dev); if (err != 0) { @@ -4118,10 +4123,10 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, return; release: - sdio_release_host(sdiodev->func[1]); + sdio_release_host(sdiodev->func1); fail: brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err); - device_release_driver(&sdiodev->func[2]->dev); + device_release_driver(&sdiodev->func2->dev); device_release_driver(dev); } @@ -4148,7 +4153,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) /* single-threaded workqueue */ wq = alloc_ordered_workqueue("brcmf_wq/%s", WQ_MEM_RECLAIM, - dev_name(&sdiodev->func[1]->dev)); + dev_name(&sdiodev->func1->dev)); if (!wq) { brcmf_err("insufficient memory to create txworkqueue\n"); goto fail; @@ -4174,7 +4179,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) init_completion(&bus->watchdog_wait); bus->watchdog_tsk = kthread_run(brcmf_sdio_watchdog_thread, bus, "brcmf_wdog/%s", - dev_name(&sdiodev->func[1]->dev)); + dev_name(&sdiodev->func1->dev)); if (IS_ERR(bus->watchdog_tsk)) { pr_warn("brcmf_watchdog thread failed to start\n"); bus->watchdog_tsk = NULL; @@ -4200,7 +4205,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) } /* Query the F2 block size, set roundup accordingly */ - bus->blocksize = bus->sdiodev->func[2]->cur_blksize; + bus->blocksize = bus->sdiodev->func2->cur_blksize; bus->roundup = min(max_roundup, bus->blocksize); /* Allocate buffers */ @@ -4216,17 +4221,17 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) } } - sdio_claim_host(bus->sdiodev->func[1]); + sdio_claim_host(bus->sdiodev->func1); /* Disable F2 to clear any intermediate frame state on the dongle */ - sdio_disable_func(bus->sdiodev->func[SDIO_FUNC_2]); + sdio_disable_func(bus->sdiodev->func2); bus->rxflow = false; /* Done with backplane-dependent accesses, can drop clock... */ - brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL); + brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL); - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(bus->sdiodev->func1); /* ...and initialize clock/power states */ bus->clkstate = CLK_SDONLY; @@ -4278,7 +4283,7 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus) if (bus->ci) { if (bus->sdiodev->state != BRCMF_SDIOD_NOMEDIUM) { - sdio_claim_host(bus->sdiodev->func[1]); + sdio_claim_host(bus->sdiodev->func1); brcmf_sdio_wd_timer(bus, false); brcmf_sdio_clkctl(bus, CLK_AVAIL, false); /* Leave the device in state where it is @@ -4288,7 +4293,7 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus) msleep(20); brcmf_chip_set_passive(bus->ci); brcmf_sdio_clkctl(bus, CLK_NONE, false); - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(bus->sdiodev->func1); } brcmf_chip_detach(bus->ci); } @@ -4335,9 +4340,9 @@ int brcmf_sdio_sleep(struct brcmf_sdio *bus, bool sleep) { int ret; - sdio_claim_host(bus->sdiodev->func[1]); + sdio_claim_host(bus->sdiodev->func1); ret = brcmf_sdio_bus_sleep(bus, sleep, false); - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(bus->sdiodev->func1); return ret; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h index f3da32fc6360..7faed831f07d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h @@ -21,10 +21,6 @@ #include <linux/firmware.h> #include "firmware.h" -#define SDIO_FUNC_0 0 -#define SDIO_FUNC_1 1 -#define SDIO_FUNC_2 2 - #define SDIOD_FBR_SIZE 0x100 /* io_en */ @@ -39,28 +35,29 @@ #define INTR_STATUS_FUNC1 0x2 #define INTR_STATUS_FUNC2 0x4 -/* Maximum number of I/O funcs */ -#define SDIOD_MAX_IOFUNCS 7 - /* mask of register map */ #define REG_F0_REG_MASK 0x7FF #define REG_F1_MISC_MASK 0x1FFFF -/* as of sdiod rev 0, supports 3 functions */ -#define SBSDIO_NUM_FUNCTION 3 - /* function 0 vendor specific CCCR registers */ + #define SDIO_CCCR_BRCM_CARDCAP 0xf0 -#define SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT 0x02 -#define SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT 0x04 -#define SDIO_CCCR_BRCM_CARDCAP_CMD_NODEC 0x08 -#define SDIO_CCCR_BRCM_CARDCTRL 0xf1 -#define SDIO_CCCR_BRCM_CARDCTRL_WLANRESET 0x02 -#define SDIO_CCCR_BRCM_SEPINT 0xf2 +#define SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT BIT(1) +#define SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT BIT(2) +#define SDIO_CCCR_BRCM_CARDCAP_CMD_NODEC BIT(3) + +/* Interrupt enable bits for each function */ +#define SDIO_CCCR_IEN_FUNC0 BIT(0) +#define SDIO_CCCR_IEN_FUNC1 BIT(1) +#define SDIO_CCCR_IEN_FUNC2 BIT(2) + +#define SDIO_CCCR_BRCM_CARDCTRL 0xf1 +#define SDIO_CCCR_BRCM_CARDCTRL_WLANRESET BIT(1) -#define SDIO_SEPINT_MASK 0x01 -#define SDIO_SEPINT_OE 0x02 -#define SDIO_SEPINT_ACT_HI 0x04 +#define SDIO_CCCR_BRCM_SEPINT 0xf2 +#define SDIO_CCCR_BRCM_SEPINT_MASK BIT(0) +#define SDIO_CCCR_BRCM_SEPINT_OE BIT(1) +#define SDIO_CCCR_BRCM_SEPINT_ACT_HI BIT(2) /* function 1 miscellaneous registers */ @@ -131,11 +128,6 @@ /* with b15, maps to 32-bit SB access */ #define SBSDIO_SB_ACCESS_2_4B_FLAG 0x08000 -/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */ - -#define SBSDIO_SBADDRLOW_MASK 0x80 /* Valid bits in SBADDRLOW */ -#define SBSDIO_SBADDRMID_MASK 0xff /* Valid bits in SBADDRMID */ -#define SBSDIO_SBADDRHIGH_MASK 0xffU /* Valid bits in SBADDRHIGH */ /* Address bits from SBADDR regs */ #define SBSDIO_SBWINDOW_MASK 0xffff8000 @@ -178,9 +170,10 @@ struct brcmf_sdio; struct brcmf_sdiod_freezer; struct brcmf_sdio_dev { - struct sdio_func *func[SDIO_MAX_FUNCS]; - u8 num_funcs; /* Supported funcs on client */ + struct sdio_func *func1; + struct sdio_func *func2; u32 sbwad; /* Save backplane window address */ + struct brcmf_core *cc_core; /* chipcommon core info struct */ struct brcmf_sdio *bus; struct device *dev; struct brcmf_bus *bus_if; @@ -296,13 +289,24 @@ struct sdpcmd_regs { int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev); void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev); -/* sdio device register access interface */ -u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret); -u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret); -void brcmf_sdiod_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr, u8 data, - int *ret); -void brcmf_sdiod_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr, u32 data, - int *ret); +/* SDIO device register access interface */ +/* Accessors for SDIO Function 0 */ +#define brcmf_sdiod_func0_rb(sdiodev, addr, r) \ + sdio_f0_readb((sdiodev)->func1, (addr), (r)) + +#define brcmf_sdiod_func0_wb(sdiodev, addr, v, ret) \ + sdio_f0_writeb((sdiodev)->func1, (v), (addr), (ret)) + +/* Accessors for SDIO Function 1 */ +#define brcmf_sdiod_readb(sdiodev, addr, r) \ + sdio_readb((sdiodev)->func1, (addr), (r)) + +#define brcmf_sdiod_writeb(sdiodev, addr, v, ret) \ + sdio_writeb((sdiodev)->func1, (v), (addr), (ret)) + +u32 brcmf_sdiod_readl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret); +void brcmf_sdiod_writel(struct brcmf_sdio_dev *sdiodev, u32 addr, u32 data, + int *ret); /* Buffer transfer to/from device (client) core via cmd53. * fn: function number @@ -342,7 +346,8 @@ int brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address, u8 *data, uint size); /* Issue an abort to the specified function */ -int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn); +int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, struct sdio_func *func); + void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev); void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev, enum brcmf_sdiod_state state); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c index 763e8ba6b178..7e01981bc5c8 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c @@ -16049,8 +16049,7 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi) wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_UPDATEGAINU, rfseq_updategainu_events, rfseq_updategainu_dlys, - sizeof(rfseq_updategainu_events) / - sizeof(rfseq_updategainu_events[0])); + ARRAY_SIZE(rfseq_updategainu_events)); mod_phy_reg(pi, 0x153, (0xff << 8), (90 << 8)); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c index dbf50ef6cd75..533bd4b0277e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c @@ -14,6 +14,7 @@ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#include <linux/kernel.h> #include <types.h> #include "phytbl_n.h" @@ -4437,109 +4438,39 @@ static const u16 loft_lut_core1_rev0[] = { }; const struct phytbl_info mimophytbl_info_rev0_volatile[] = { - {&bdi_tbl_rev0, sizeof(bdi_tbl_rev0) / sizeof(bdi_tbl_rev0[0]), 21, 0, - 16} - , - {&pltlut_tbl_rev0, sizeof(pltlut_tbl_rev0) / sizeof(pltlut_tbl_rev0[0]), - 20, 0, 32} - , - {&gainctrl_lut_core0_rev0, - sizeof(gainctrl_lut_core0_rev0) / sizeof(gainctrl_lut_core0_rev0[0]), - 26, 192, 32} - , - {&gainctrl_lut_core1_rev0, - sizeof(gainctrl_lut_core1_rev0) / sizeof(gainctrl_lut_core1_rev0[0]), - 27, 192, 32} - , - - {&est_pwr_lut_core0_rev0, - sizeof(est_pwr_lut_core0_rev0) / sizeof(est_pwr_lut_core0_rev0[0]), 26, - 0, 8} - , - {&est_pwr_lut_core1_rev0, - sizeof(est_pwr_lut_core1_rev0) / sizeof(est_pwr_lut_core1_rev0[0]), 27, - 0, 8} - , - {&adj_pwr_lut_core0_rev0, - sizeof(adj_pwr_lut_core0_rev0) / sizeof(adj_pwr_lut_core0_rev0[0]), 26, - 64, 8} - , - {&adj_pwr_lut_core1_rev0, - sizeof(adj_pwr_lut_core1_rev0) / sizeof(adj_pwr_lut_core1_rev0[0]), 27, - 64, 8} - , - {&iq_lut_core0_rev0, - sizeof(iq_lut_core0_rev0) / sizeof(iq_lut_core0_rev0[0]), 26, 320, 32} - , - {&iq_lut_core1_rev0, - sizeof(iq_lut_core1_rev0) / sizeof(iq_lut_core1_rev0[0]), 27, 320, 32} - , - {&loft_lut_core0_rev0, - sizeof(loft_lut_core0_rev0) / sizeof(loft_lut_core0_rev0[0]), 26, 448, - 16} - , - {&loft_lut_core1_rev0, - sizeof(loft_lut_core1_rev0) / sizeof(loft_lut_core1_rev0[0]), 27, 448, - 16} - , + {&bdi_tbl_rev0, ARRAY_SIZE(bdi_tbl_rev0), 21, 0, 16}, + {&pltlut_tbl_rev0, ARRAY_SIZE(pltlut_tbl_rev0), 20, 0, 32}, + {&gainctrl_lut_core0_rev0, ARRAY_SIZE(gainctrl_lut_core0_rev0), 26, 192, 32}, + {&gainctrl_lut_core1_rev0, ARRAY_SIZE(gainctrl_lut_core1_rev0), 27, 192, 32}, + {&est_pwr_lut_core0_rev0, ARRAY_SIZE(est_pwr_lut_core0_rev0), 26, 0, 8}, + {&est_pwr_lut_core1_rev0, ARRAY_SIZE(est_pwr_lut_core1_rev0), 27, 0, 8}, + {&adj_pwr_lut_core0_rev0, ARRAY_SIZE(adj_pwr_lut_core0_rev0), 26, 64, 8}, + {&adj_pwr_lut_core1_rev0, ARRAY_SIZE(adj_pwr_lut_core1_rev0), 27, 64, 8}, + {&iq_lut_core0_rev0, ARRAY_SIZE(iq_lut_core0_rev0), 26, 320, 32}, + {&iq_lut_core1_rev0, ARRAY_SIZE(iq_lut_core1_rev0), 27, 320, 32}, + {&loft_lut_core0_rev0, ARRAY_SIZE(loft_lut_core0_rev0), 26, 448, 16}, + {&loft_lut_core1_rev0, ARRAY_SIZE(loft_lut_core1_rev0), 27, 448, 16}, }; const struct phytbl_info mimophytbl_info_rev0[] = { - {&frame_struct_rev0, - sizeof(frame_struct_rev0) / sizeof(frame_struct_rev0[0]), 10, 0, 32} - , - {&frame_lut_rev0, sizeof(frame_lut_rev0) / sizeof(frame_lut_rev0[0]), - 24, 0, 8} - , - {&tmap_tbl_rev0, sizeof(tmap_tbl_rev0) / sizeof(tmap_tbl_rev0[0]), 12, - 0, 32} - , - {&tdtrn_tbl_rev0, sizeof(tdtrn_tbl_rev0) / sizeof(tdtrn_tbl_rev0[0]), - 14, 0, 32} - , - {&intlv_tbl_rev0, sizeof(intlv_tbl_rev0) / sizeof(intlv_tbl_rev0[0]), - 13, 0, 32} - , - {&pilot_tbl_rev0, sizeof(pilot_tbl_rev0) / sizeof(pilot_tbl_rev0[0]), - 11, 0, 16} - , - {&tdi_tbl20_ant0_rev0, - sizeof(tdi_tbl20_ant0_rev0) / sizeof(tdi_tbl20_ant0_rev0[0]), 19, 128, - 32} - , - {&tdi_tbl20_ant1_rev0, - sizeof(tdi_tbl20_ant1_rev0) / sizeof(tdi_tbl20_ant1_rev0[0]), 19, 256, - 32} - , - {&tdi_tbl40_ant0_rev0, - sizeof(tdi_tbl40_ant0_rev0) / sizeof(tdi_tbl40_ant0_rev0[0]), 19, 640, - 32} - , - {&tdi_tbl40_ant1_rev0, - sizeof(tdi_tbl40_ant1_rev0) / sizeof(tdi_tbl40_ant1_rev0[0]), 19, 768, - 32} - , - {&chanest_tbl_rev0, - sizeof(chanest_tbl_rev0) / sizeof(chanest_tbl_rev0[0]), 22, 0, 32} - , - {&mcs_tbl_rev0, sizeof(mcs_tbl_rev0) / sizeof(mcs_tbl_rev0[0]), 18, 0, - 8} - , - {&noise_var_tbl0_rev0, - sizeof(noise_var_tbl0_rev0) / sizeof(noise_var_tbl0_rev0[0]), 16, 0, - 32} - , - {&noise_var_tbl1_rev0, - sizeof(noise_var_tbl1_rev0) / sizeof(noise_var_tbl1_rev0[0]), 16, 128, - 32} - , + {&frame_struct_rev0, ARRAY_SIZE(frame_struct_rev0), 10, 0, 32}, + {&frame_lut_rev0, ARRAY_SIZE(frame_lut_rev0), 24, 0, 8}, + {&tmap_tbl_rev0, ARRAY_SIZE(tmap_tbl_rev0), 12, 0, 32}, + {&tdtrn_tbl_rev0, ARRAY_SIZE(tdtrn_tbl_rev0), 14, 0, 32}, + {&intlv_tbl_rev0, ARRAY_SIZE(intlv_tbl_rev0), 13, 0, 32}, + {&pilot_tbl_rev0, ARRAY_SIZE(pilot_tbl_rev0), 11, 0, 16}, + {&tdi_tbl20_ant0_rev0, ARRAY_SIZE(tdi_tbl20_ant0_rev0), 19, 128, 32}, + {&tdi_tbl20_ant1_rev0, ARRAY_SIZE(tdi_tbl20_ant1_rev0), 19, 256, 32}, + {&tdi_tbl40_ant0_rev0, ARRAY_SIZE(tdi_tbl40_ant0_rev0), 19, 640, 32}, + {&tdi_tbl40_ant1_rev0, ARRAY_SIZE(tdi_tbl40_ant1_rev0), 19, 768, 32}, + {&chanest_tbl_rev0, ARRAY_SIZE(chanest_tbl_rev0), 22, 0, 32}, + {&mcs_tbl_rev0, ARRAY_SIZE(mcs_tbl_rev0), 18, 0, 8}, + {&noise_var_tbl0_rev0, ARRAY_SIZE(noise_var_tbl0_rev0), 16, 0, 32}, + {&noise_var_tbl1_rev0, ARRAY_SIZE(noise_var_tbl1_rev0), 16, 128, 32}, }; -const u32 mimophytbl_info_sz_rev0 = - sizeof(mimophytbl_info_rev0) / sizeof(mimophytbl_info_rev0[0]); -const u32 mimophytbl_info_sz_rev0_volatile = - sizeof(mimophytbl_info_rev0_volatile) / - sizeof(mimophytbl_info_rev0_volatile[0]); +const u32 mimophytbl_info_sz_rev0 = ARRAY_SIZE(mimophytbl_info_rev0); +const u32 mimophytbl_info_sz_rev0_volatile = ARRAY_SIZE(mimophytbl_info_rev0_volatile); static const u16 ant_swctrl_tbl_rev3[] = { 0x0082, @@ -9363,132 +9294,53 @@ static const u32 papd_cal_scalars_tbl_core1_rev3[] = { }; const struct phytbl_info mimophytbl_info_rev3_volatile[] = { - {&ant_swctrl_tbl_rev3, - sizeof(ant_swctrl_tbl_rev3) / sizeof(ant_swctrl_tbl_rev3[0]), 9, 0, 16} - , + {&ant_swctrl_tbl_rev3, ARRAY_SIZE(ant_swctrl_tbl_rev3), 9, 0, 16}, }; const struct phytbl_info mimophytbl_info_rev3_volatile1[] = { - {&ant_swctrl_tbl_rev3_1, - sizeof(ant_swctrl_tbl_rev3_1) / sizeof(ant_swctrl_tbl_rev3_1[0]), 9, 0, - 16} - , + {&ant_swctrl_tbl_rev3_1, ARRAY_SIZE(ant_swctrl_tbl_rev3_1), 9, 0, 16}, }; const struct phytbl_info mimophytbl_info_rev3_volatile2[] = { - {&ant_swctrl_tbl_rev3_2, - sizeof(ant_swctrl_tbl_rev3_2) / sizeof(ant_swctrl_tbl_rev3_2[0]), 9, 0, - 16} - , + {&ant_swctrl_tbl_rev3_2, ARRAY_SIZE(ant_swctrl_tbl_rev3_2), 9, 0, 16}, }; const struct phytbl_info mimophytbl_info_rev3_volatile3[] = { - {&ant_swctrl_tbl_rev3_3, - sizeof(ant_swctrl_tbl_rev3_3) / sizeof(ant_swctrl_tbl_rev3_3[0]), 9, 0, - 16} - , + {&ant_swctrl_tbl_rev3_3, ARRAY_SIZE(ant_swctrl_tbl_rev3_3), 9, 0, 16}, }; const struct phytbl_info mimophytbl_info_rev3[] = { - {&frame_struct_rev3, - sizeof(frame_struct_rev3) / sizeof(frame_struct_rev3[0]), 10, 0, 32} - , - {&pilot_tbl_rev3, sizeof(pilot_tbl_rev3) / sizeof(pilot_tbl_rev3[0]), - 11, 0, 16} - , - {&tmap_tbl_rev3, sizeof(tmap_tbl_rev3) / sizeof(tmap_tbl_rev3[0]), 12, - 0, 32} - , - {&intlv_tbl_rev3, sizeof(intlv_tbl_rev3) / sizeof(intlv_tbl_rev3[0]), - 13, 0, 32} - , - {&tdtrn_tbl_rev3, sizeof(tdtrn_tbl_rev3) / sizeof(tdtrn_tbl_rev3[0]), - 14, 0, 32} - , - {&noise_var_tbl_rev3, - sizeof(noise_var_tbl_rev3) / sizeof(noise_var_tbl_rev3[0]), 16, 0, 32} - , - {&mcs_tbl_rev3, sizeof(mcs_tbl_rev3) / sizeof(mcs_tbl_rev3[0]), 18, 0, - 16} - , - {&tdi_tbl20_ant0_rev3, - sizeof(tdi_tbl20_ant0_rev3) / sizeof(tdi_tbl20_ant0_rev3[0]), 19, 128, - 32} - , - {&tdi_tbl20_ant1_rev3, - sizeof(tdi_tbl20_ant1_rev3) / sizeof(tdi_tbl20_ant1_rev3[0]), 19, 256, - 32} - , - {&tdi_tbl40_ant0_rev3, - sizeof(tdi_tbl40_ant0_rev3) / sizeof(tdi_tbl40_ant0_rev3[0]), 19, 640, - 32} - , - {&tdi_tbl40_ant1_rev3, - sizeof(tdi_tbl40_ant1_rev3) / sizeof(tdi_tbl40_ant1_rev3[0]), 19, 768, - 32} - , - {&pltlut_tbl_rev3, sizeof(pltlut_tbl_rev3) / sizeof(pltlut_tbl_rev3[0]), - 20, 0, 32} - , - {&chanest_tbl_rev3, - sizeof(chanest_tbl_rev3) / sizeof(chanest_tbl_rev3[0]), 22, 0, 32} - , - {&frame_lut_rev3, sizeof(frame_lut_rev3) / sizeof(frame_lut_rev3[0]), - 24, 0, 8} - , - {&est_pwr_lut_core0_rev3, - sizeof(est_pwr_lut_core0_rev3) / sizeof(est_pwr_lut_core0_rev3[0]), 26, - 0, 8} - , - {&est_pwr_lut_core1_rev3, - sizeof(est_pwr_lut_core1_rev3) / sizeof(est_pwr_lut_core1_rev3[0]), 27, - 0, 8} - , - {&adj_pwr_lut_core0_rev3, - sizeof(adj_pwr_lut_core0_rev3) / sizeof(adj_pwr_lut_core0_rev3[0]), 26, - 64, 8} - , - {&adj_pwr_lut_core1_rev3, - sizeof(adj_pwr_lut_core1_rev3) / sizeof(adj_pwr_lut_core1_rev3[0]), 27, - 64, 8} - , - {&gainctrl_lut_core0_rev3, - sizeof(gainctrl_lut_core0_rev3) / sizeof(gainctrl_lut_core0_rev3[0]), - 26, 192, 32} - , - {&gainctrl_lut_core1_rev3, - sizeof(gainctrl_lut_core1_rev3) / sizeof(gainctrl_lut_core1_rev3[0]), - 27, 192, 32} - , - {&iq_lut_core0_rev3, - sizeof(iq_lut_core0_rev3) / sizeof(iq_lut_core0_rev3[0]), 26, 320, 32} - , - {&iq_lut_core1_rev3, - sizeof(iq_lut_core1_rev3) / sizeof(iq_lut_core1_rev3[0]), 27, 320, 32} - , - {&loft_lut_core0_rev3, - sizeof(loft_lut_core0_rev3) / sizeof(loft_lut_core0_rev3[0]), 26, 448, - 16} - , - {&loft_lut_core1_rev3, - sizeof(loft_lut_core1_rev3) / sizeof(loft_lut_core1_rev3[0]), 27, 448, - 16} + {&frame_struct_rev3, ARRAY_SIZE(frame_struct_rev3), 10, 0, 32}, + {&pilot_tbl_rev3, ARRAY_SIZE(pilot_tbl_rev3), 11, 0, 16}, + {&tmap_tbl_rev3, ARRAY_SIZE(tmap_tbl_rev3), 12, 0, 32}, + {&intlv_tbl_rev3, ARRAY_SIZE(intlv_tbl_rev3), 13, 0, 32}, + {&tdtrn_tbl_rev3, ARRAY_SIZE(tdtrn_tbl_rev3), 14, 0, 32}, + {&noise_var_tbl_rev3, ARRAY_SIZE(noise_var_tbl_rev3), 16, 0, 32}, + {&mcs_tbl_rev3, ARRAY_SIZE(mcs_tbl_rev3), 18, 0, 16}, + {&tdi_tbl20_ant0_rev3, ARRAY_SIZE(tdi_tbl20_ant0_rev3), 19, 128, 32}, + {&tdi_tbl20_ant1_rev3, ARRAY_SIZE(tdi_tbl20_ant1_rev3), 19, 256, 32}, + {&tdi_tbl40_ant0_rev3, ARRAY_SIZE(tdi_tbl40_ant0_rev3), 19, 640, 32}, + {&tdi_tbl40_ant1_rev3, ARRAY_SIZE(tdi_tbl40_ant1_rev3), 19, 768, 32}, + {&pltlut_tbl_rev3, ARRAY_SIZE(pltlut_tbl_rev3), 20, 0, 32}, + {&chanest_tbl_rev3, ARRAY_SIZE(chanest_tbl_rev3), 22, 0, 32}, + {&frame_lut_rev3, ARRAY_SIZE(frame_lut_rev3), 24, 0, 8}, + {&est_pwr_lut_core0_rev3, ARRAY_SIZE(est_pwr_lut_core0_rev3), 26, 0, 8}, + {&est_pwr_lut_core1_rev3, ARRAY_SIZE(est_pwr_lut_core1_rev3), 27, 0, 8}, + {&adj_pwr_lut_core0_rev3, ARRAY_SIZE(adj_pwr_lut_core0_rev3), 26, 64, 8}, + {&adj_pwr_lut_core1_rev3, ARRAY_SIZE(adj_pwr_lut_core1_rev3), 27, 64, 8}, + {&gainctrl_lut_core0_rev3, ARRAY_SIZE(gainctrl_lut_core0_rev3), 26, 192, 32}, + {&gainctrl_lut_core1_rev3, ARRAY_SIZE(gainctrl_lut_core1_rev3), 27, 192, 32}, + {&iq_lut_core0_rev3, ARRAY_SIZE(iq_lut_core0_rev3), 26, 320, 32}, + {&iq_lut_core1_rev3, ARRAY_SIZE(iq_lut_core1_rev3), 27, 320, 32}, + {&loft_lut_core0_rev3, ARRAY_SIZE(loft_lut_core0_rev3), 26, 448, 16}, + {&loft_lut_core1_rev3, ARRAY_SIZE(loft_lut_core1_rev3), 27, 448, 16} }; -const u32 mimophytbl_info_sz_rev3 = - sizeof(mimophytbl_info_rev3) / sizeof(mimophytbl_info_rev3[0]); -const u32 mimophytbl_info_sz_rev3_volatile = - sizeof(mimophytbl_info_rev3_volatile) / - sizeof(mimophytbl_info_rev3_volatile[0]); -const u32 mimophytbl_info_sz_rev3_volatile1 = - sizeof(mimophytbl_info_rev3_volatile1) / - sizeof(mimophytbl_info_rev3_volatile1[0]); -const u32 mimophytbl_info_sz_rev3_volatile2 = - sizeof(mimophytbl_info_rev3_volatile2) / - sizeof(mimophytbl_info_rev3_volatile2[0]); -const u32 mimophytbl_info_sz_rev3_volatile3 = - sizeof(mimophytbl_info_rev3_volatile3) / - sizeof(mimophytbl_info_rev3_volatile3[0]); +const u32 mimophytbl_info_sz_rev3 = ARRAY_SIZE(mimophytbl_info_rev3); +const u32 mimophytbl_info_sz_rev3_volatile = ARRAY_SIZE(mimophytbl_info_rev3_volatile); +const u32 mimophytbl_info_sz_rev3_volatile1 = ARRAY_SIZE(mimophytbl_info_rev3_volatile1); +const u32 mimophytbl_info_sz_rev3_volatile2 = ARRAY_SIZE(mimophytbl_info_rev3_volatile2); +const u32 mimophytbl_info_sz_rev3_volatile3 = ARRAY_SIZE(mimophytbl_info_rev3_volatile3); static const u32 tmap_tbl_rev7[] = { 0x8a88aa80, @@ -10469,162 +10321,58 @@ static const u32 papd_cal_scalars_tbl_core1_rev7[] = { }; const struct phytbl_info mimophytbl_info_rev7[] = { - {&frame_struct_rev3, - sizeof(frame_struct_rev3) / sizeof(frame_struct_rev3[0]), 10, 0, 32} - , - {&pilot_tbl_rev3, sizeof(pilot_tbl_rev3) / sizeof(pilot_tbl_rev3[0]), - 11, 0, 16} - , - {&tmap_tbl_rev7, sizeof(tmap_tbl_rev7) / sizeof(tmap_tbl_rev7[0]), 12, - 0, 32} - , - {&intlv_tbl_rev3, sizeof(intlv_tbl_rev3) / sizeof(intlv_tbl_rev3[0]), - 13, 0, 32} - , - {&tdtrn_tbl_rev3, sizeof(tdtrn_tbl_rev3) / sizeof(tdtrn_tbl_rev3[0]), - 14, 0, 32} - , - {&noise_var_tbl_rev7, - sizeof(noise_var_tbl_rev7) / sizeof(noise_var_tbl_rev7[0]), 16, 0, 32} - , - {&mcs_tbl_rev3, sizeof(mcs_tbl_rev3) / sizeof(mcs_tbl_rev3[0]), 18, 0, - 16} - , - {&tdi_tbl20_ant0_rev3, - sizeof(tdi_tbl20_ant0_rev3) / sizeof(tdi_tbl20_ant0_rev3[0]), 19, 128, - 32} - , - {&tdi_tbl20_ant1_rev3, - sizeof(tdi_tbl20_ant1_rev3) / sizeof(tdi_tbl20_ant1_rev3[0]), 19, 256, - 32} - , - {&tdi_tbl40_ant0_rev3, - sizeof(tdi_tbl40_ant0_rev3) / sizeof(tdi_tbl40_ant0_rev3[0]), 19, 640, - 32} - , - {&tdi_tbl40_ant1_rev3, - sizeof(tdi_tbl40_ant1_rev3) / sizeof(tdi_tbl40_ant1_rev3[0]), 19, 768, - 32} - , - {&pltlut_tbl_rev3, sizeof(pltlut_tbl_rev3) / sizeof(pltlut_tbl_rev3[0]), - 20, 0, 32} - , - {&chanest_tbl_rev3, - sizeof(chanest_tbl_rev3) / sizeof(chanest_tbl_rev3[0]), 22, 0, 32} - , - {&frame_lut_rev3, sizeof(frame_lut_rev3) / sizeof(frame_lut_rev3[0]), - 24, 0, 8} - , - {&est_pwr_lut_core0_rev3, - sizeof(est_pwr_lut_core0_rev3) / sizeof(est_pwr_lut_core0_rev3[0]), 26, - 0, 8} - , - {&est_pwr_lut_core1_rev3, - sizeof(est_pwr_lut_core1_rev3) / sizeof(est_pwr_lut_core1_rev3[0]), 27, - 0, 8} - , - {&adj_pwr_lut_core0_rev3, - sizeof(adj_pwr_lut_core0_rev3) / sizeof(adj_pwr_lut_core0_rev3[0]), 26, - 64, 8} - , - {&adj_pwr_lut_core1_rev3, - sizeof(adj_pwr_lut_core1_rev3) / sizeof(adj_pwr_lut_core1_rev3[0]), 27, - 64, 8} - , - {&gainctrl_lut_core0_rev3, - sizeof(gainctrl_lut_core0_rev3) / sizeof(gainctrl_lut_core0_rev3[0]), - 26, 192, 32} - , - {&gainctrl_lut_core1_rev3, - sizeof(gainctrl_lut_core1_rev3) / sizeof(gainctrl_lut_core1_rev3[0]), - 27, 192, 32} - , - {&iq_lut_core0_rev3, - sizeof(iq_lut_core0_rev3) / sizeof(iq_lut_core0_rev3[0]), 26, 320, 32} - , - {&iq_lut_core1_rev3, - sizeof(iq_lut_core1_rev3) / sizeof(iq_lut_core1_rev3[0]), 27, 320, 32} - , - {&loft_lut_core0_rev3, - sizeof(loft_lut_core0_rev3) / sizeof(loft_lut_core0_rev3[0]), 26, 448, - 16} - , - {&loft_lut_core1_rev3, - sizeof(loft_lut_core1_rev3) / sizeof(loft_lut_core1_rev3[0]), 27, 448, - 16} - , + {&frame_struct_rev3, ARRAY_SIZE(frame_struct_rev3), 10, 0, 32}, + {&pilot_tbl_rev3, ARRAY_SIZE(pilot_tbl_rev3), 11, 0, 16}, + {&tmap_tbl_rev7, ARRAY_SIZE(tmap_tbl_rev7), 12, 0, 32}, + {&intlv_tbl_rev3, ARRAY_SIZE(intlv_tbl_rev3), 13, 0, 32}, + {&tdtrn_tbl_rev3, ARRAY_SIZE(tdtrn_tbl_rev3), 14, 0, 32}, + {&noise_var_tbl_rev7, ARRAY_SIZE(noise_var_tbl_rev7), 16, 0, 32}, + {&mcs_tbl_rev3, ARRAY_SIZE(mcs_tbl_rev3), 18, 0, 16}, + {&tdi_tbl20_ant0_rev3, ARRAY_SIZE(tdi_tbl20_ant0_rev3), 19, 128, 32}, + {&tdi_tbl20_ant1_rev3, ARRAY_SIZE(tdi_tbl20_ant1_rev3), 19, 256, 32}, + {&tdi_tbl40_ant0_rev3, ARRAY_SIZE(tdi_tbl40_ant0_rev3), 19, 640, 32}, + {&tdi_tbl40_ant1_rev3, ARRAY_SIZE(tdi_tbl40_ant1_rev3), 19, 768, 32}, + {&pltlut_tbl_rev3, ARRAY_SIZE(pltlut_tbl_rev3), 20, 0, 32}, + {&chanest_tbl_rev3, ARRAY_SIZE(chanest_tbl_rev3), 22, 0, 32}, + {&frame_lut_rev3, ARRAY_SIZE(frame_lut_rev3), 24, 0, 8}, + {&est_pwr_lut_core0_rev3, ARRAY_SIZE(est_pwr_lut_core0_rev3), 26, 0, 8}, + {&est_pwr_lut_core1_rev3, ARRAY_SIZE(est_pwr_lut_core1_rev3), 27, 0, 8}, + {&adj_pwr_lut_core0_rev3, ARRAY_SIZE(adj_pwr_lut_core0_rev3), 26, 64, 8}, + {&adj_pwr_lut_core1_rev3, ARRAY_SIZE(adj_pwr_lut_core1_rev3), 27, 64, 8}, + {&gainctrl_lut_core0_rev3, ARRAY_SIZE(gainctrl_lut_core0_rev3), 26, 192, 32}, + {&gainctrl_lut_core1_rev3, ARRAY_SIZE(gainctrl_lut_core1_rev3), 27, 192, 32}, + {&iq_lut_core0_rev3, ARRAY_SIZE(iq_lut_core0_rev3), 26, 320, 32}, + {&iq_lut_core1_rev3, ARRAY_SIZE(iq_lut_core1_rev3), 27, 320, 32}, + {&loft_lut_core0_rev3, ARRAY_SIZE(loft_lut_core0_rev3), 26, 448, 16}, + {&loft_lut_core1_rev3, ARRAY_SIZE(loft_lut_core1_rev3), 27, 448, 16}, {&papd_comp_rfpwr_tbl_core0_rev3, - sizeof(papd_comp_rfpwr_tbl_core0_rev3) / - sizeof(papd_comp_rfpwr_tbl_core0_rev3[0]), 26, 576, 16} - , + ARRAY_SIZE(papd_comp_rfpwr_tbl_core0_rev3), 26, 576, 16}, {&papd_comp_rfpwr_tbl_core1_rev3, - sizeof(papd_comp_rfpwr_tbl_core1_rev3) / - sizeof(papd_comp_rfpwr_tbl_core1_rev3[0]), 27, 576, 16} - , + ARRAY_SIZE(papd_comp_rfpwr_tbl_core1_rev3), 27, 576, 16}, {&papd_comp_epsilon_tbl_core0_rev7, - sizeof(papd_comp_epsilon_tbl_core0_rev7) / - sizeof(papd_comp_epsilon_tbl_core0_rev7[0]), 31, 0, 32} - , + ARRAY_SIZE(papd_comp_epsilon_tbl_core0_rev7), 31, 0, 32}, {&papd_cal_scalars_tbl_core0_rev7, - sizeof(papd_cal_scalars_tbl_core0_rev7) / - sizeof(papd_cal_scalars_tbl_core0_rev7[0]), 32, 0, 32} - , + ARRAY_SIZE(papd_cal_scalars_tbl_core0_rev7), 32, 0, 32}, {&papd_comp_epsilon_tbl_core1_rev7, - sizeof(papd_comp_epsilon_tbl_core1_rev7) / - sizeof(papd_comp_epsilon_tbl_core1_rev7[0]), 33, 0, 32} - , + ARRAY_SIZE(papd_comp_epsilon_tbl_core1_rev7), 33, 0, 32}, {&papd_cal_scalars_tbl_core1_rev7, - sizeof(papd_cal_scalars_tbl_core1_rev7) / - sizeof(papd_cal_scalars_tbl_core1_rev7[0]), 34, 0, 32} - , + ARRAY_SIZE(papd_cal_scalars_tbl_core1_rev7), 34, 0, 32}, }; -const u32 mimophytbl_info_sz_rev7 = - sizeof(mimophytbl_info_rev7) / sizeof(mimophytbl_info_rev7[0]); +const u32 mimophytbl_info_sz_rev7 = ARRAY_SIZE(mimophytbl_info_rev7); const struct phytbl_info mimophytbl_info_rev16[] = { - {&noise_var_tbl_rev7, - sizeof(noise_var_tbl_rev7) / sizeof(noise_var_tbl_rev7[0]), 16, 0, 32} - , - {&est_pwr_lut_core0_rev3, - sizeof(est_pwr_lut_core0_rev3) / sizeof(est_pwr_lut_core0_rev3[0]), 26, - 0, 8} - , - {&est_pwr_lut_core1_rev3, - sizeof(est_pwr_lut_core1_rev3) / sizeof(est_pwr_lut_core1_rev3[0]), 27, - 0, 8} - , - {&adj_pwr_lut_core0_rev3, - sizeof(adj_pwr_lut_core0_rev3) / sizeof(adj_pwr_lut_core0_rev3[0]), 26, - 64, 8} - , - {&adj_pwr_lut_core1_rev3, - sizeof(adj_pwr_lut_core1_rev3) / sizeof(adj_pwr_lut_core1_rev3[0]), 27, - 64, 8} - , - {&gainctrl_lut_core0_rev3, - sizeof(gainctrl_lut_core0_rev3) / sizeof(gainctrl_lut_core0_rev3[0]), - 26, 192, 32} - , - {&gainctrl_lut_core1_rev3, - sizeof(gainctrl_lut_core1_rev3) / sizeof(gainctrl_lut_core1_rev3[0]), - 27, 192, 32} - , - {&iq_lut_core0_rev3, - sizeof(iq_lut_core0_rev3) / sizeof(iq_lut_core0_rev3[0]), 26, 320, 32} - , - {&iq_lut_core1_rev3, - sizeof(iq_lut_core1_rev3) / sizeof(iq_lut_core1_rev3[0]), 27, 320, 32} - , - {&loft_lut_core0_rev3, - sizeof(loft_lut_core0_rev3) / sizeof(loft_lut_core0_rev3[0]), 26, 448, - 16} - , - {&loft_lut_core1_rev3, - sizeof(loft_lut_core1_rev3) / sizeof(loft_lut_core1_rev3[0]), 27, 448, - 16} - , + {&noise_var_tbl_rev7, ARRAY_SIZE(noise_var_tbl_rev7), 16, 0, 32}, + {&est_pwr_lut_core0_rev3, ARRAY_SIZE(est_pwr_lut_core0_rev3), 26, 0, 8}, + {&est_pwr_lut_core1_rev3, ARRAY_SIZE(est_pwr_lut_core1_rev3), 27, 0, 8}, + {&adj_pwr_lut_core0_rev3, ARRAY_SIZE(adj_pwr_lut_core0_rev3), 26, 64, 8}, + {&adj_pwr_lut_core1_rev3, ARRAY_SIZE(adj_pwr_lut_core1_rev3), 27, 64, 8}, + {&gainctrl_lut_core0_rev3, ARRAY_SIZE(gainctrl_lut_core0_rev3), 26, 192, 32}, + {&gainctrl_lut_core1_rev3, ARRAY_SIZE(gainctrl_lut_core1_rev3), 27, 192, 32}, + {&iq_lut_core0_rev3, ARRAY_SIZE(iq_lut_core0_rev3), 26, 320, 32}, + {&iq_lut_core1_rev3, ARRAY_SIZE(iq_lut_core1_rev3), 27, 320, 32}, + {&loft_lut_core0_rev3, ARRAY_SIZE(loft_lut_core0_rev3), 26, 448, 16}, + {&loft_lut_core1_rev3, ARRAY_SIZE(loft_lut_core1_rev3), 27, 448, 16}, }; -const u32 mimophytbl_info_sz_rev16 = - sizeof(mimophytbl_info_rev16) / sizeof(mimophytbl_info_rev16[0]); +const u32 mimophytbl_info_sz_rev16 = ARRAY_SIZE(mimophytbl_info_rev16); diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile index ff136f299a0a..e6205eae51fd 100644 --- a/drivers/net/wireless/intel/iwlwifi/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/Makefile @@ -9,12 +9,13 @@ iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o iwlwifi-objs += pcie/ctxt-info.o pcie/trans-gen2.o pcie/tx-gen2.o iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o -iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/a000.o +iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o iwlwifi-objs += iwl-trans.o iwlwifi-objs += fw/notif-wait.o iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o iwlwifi-$(CONFIG_IWLMVM) += fw/common_rx.o fw/nvm.o iwlwifi-$(CONFIG_ACPI) += fw/acpi.o +iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o iwlwifi-objs += $(iwlwifi-m) diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c new file mode 100644 index 000000000000..48f6f80eb24b --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -0,0 +1,216 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015-2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright(c) 2015-2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include <linux/module.h> +#include <linux/stringify.h> +#include "iwl-config.h" +#include "iwl-agn-hw.h" + +/* Highest firmware API version supported */ +#define IWL_22000_UCODE_API_MAX 36 + +/* Lowest firmware API version supported */ +#define IWL_22000_UCODE_API_MIN 24 + +/* NVM versions */ +#define IWL_22000_NVM_VERSION 0x0a1d +#define IWL_22000_TX_POWER_VERSION 0xffff /* meaningless */ + +/* Memory offsets and lengths */ +#define IWL_22000_DCCM_OFFSET 0x800000 /* LMAC1 */ +#define IWL_22000_DCCM_LEN 0x10000 /* LMAC1 */ +#define IWL_22000_DCCM2_OFFSET 0x880000 +#define IWL_22000_DCCM2_LEN 0x8000 +#define IWL_22000_SMEM_OFFSET 0x400000 +#define IWL_22000_SMEM_LEN 0xD0000 + +#define IWL_22000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-" +#define IWL_22000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-" +#define IWL_22000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-" +#define IWL_22000_HR_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-" +#define IWL_22000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-" +#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-" + +#define IWL_22000_HR_MODULE_FIRMWARE(api) \ + IWL_22000_HR_FW_PRE __stringify(api) ".ucode" +#define IWL_22000_JF_MODULE_FIRMWARE(api) \ + IWL_22000_JF_FW_PRE __stringify(api) ".ucode" +#define IWL_22000_HR_F0_QNJ_MODULE_FIRMWARE(api) \ + IWL_22000_HR_F0_FW_PRE __stringify(api) ".ucode" +#define IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(api) \ + IWL_22000_JF_B0_FW_PRE __stringify(api) ".ucode" +#define IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(api) \ + IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode" + +#define NVM_HW_SECTION_NUM_FAMILY_22000 10 + +static const struct iwl_base_params iwl_22000_base_params = { + .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000, + .num_of_queues = 512, + .shadow_ram_support = true, + .led_compensation = 57, + .wd_timeout = IWL_LONG_WD_TIMEOUT, + .max_event_log_size = 512, + .shadow_reg_enable = true, + .pcie_l1_allowed = true, +}; + +static const struct iwl_ht_params iwl_22000_ht_params = { + .stbc = true, + .ldpc = true, + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), +}; + +#define IWL_DEVICE_22000 \ + .ucode_api_max = IWL_22000_UCODE_API_MAX, \ + .ucode_api_min = IWL_22000_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_22000, \ + .max_inst_size = IWL60_RTC_INST_SIZE, \ + .max_data_size = IWL60_RTC_DATA_SIZE, \ + .base_params = &iwl_22000_base_params, \ + .led_mode = IWL_LED_RF_STATE, \ + .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_22000, \ + .non_shared_ant = ANT_A, \ + .dccm_offset = IWL_22000_DCCM_OFFSET, \ + .dccm_len = IWL_22000_DCCM_LEN, \ + .dccm2_offset = IWL_22000_DCCM2_OFFSET, \ + .dccm2_len = IWL_22000_DCCM2_LEN, \ + .smem_offset = IWL_22000_SMEM_OFFSET, \ + .smem_len = IWL_22000_SMEM_LEN, \ + .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \ + .apmg_not_supported = true, \ + .mq_rx_supported = true, \ + .vht_mu_mimo_supported = true, \ + .mac_addr_from_csr = true, \ + .use_tfh = true, \ + .rf_id = true, \ + .gen2 = true, \ + .nvm_type = IWL_NVM_EXT, \ + .dbgc_supported = true, \ + .tx_cmd_queue_size = 32, \ + .min_umac_error_event_table = 0x400000 + +const struct iwl_cfg iwl22000_2ac_cfg_hr = { + .name = "Intel(R) Dual Band Wireless AC 22000", + .fw_name_pre = IWL_22000_HR_FW_PRE, + IWL_DEVICE_22000, + .ht_params = &iwl_22000_ht_params, + .nvm_ver = IWL_22000_NVM_VERSION, + .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + +const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb = { + .name = "Intel(R) Dual Band Wireless AC 22000", + .fw_name_pre = IWL_22000_HR_CDB_FW_PRE, + IWL_DEVICE_22000, + .ht_params = &iwl_22000_ht_params, + .nvm_ver = IWL_22000_NVM_VERSION, + .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .cdb = true, +}; + +const struct iwl_cfg iwl22000_2ac_cfg_jf = { + .name = "Intel(R) Dual Band Wireless AC 22000", + .fw_name_pre = IWL_22000_JF_FW_PRE, + IWL_DEVICE_22000, + .ht_params = &iwl_22000_ht_params, + .nvm_ver = IWL_22000_NVM_VERSION, + .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + +const struct iwl_cfg iwl22000_2ax_cfg_hr = { + .name = "Intel(R) Dual Band Wireless AX 22000", + .fw_name_pre = IWL_22000_HR_FW_PRE, + IWL_DEVICE_22000, + .ht_params = &iwl_22000_ht_params, + .nvm_ver = IWL_22000_NVM_VERSION, + .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + +const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0 = { + .name = "Intel(R) Dual Band Wireless AX 22000", + .fw_name_pre = IWL_22000_HR_F0_FW_PRE, + IWL_DEVICE_22000, + .ht_params = &iwl_22000_ht_params, + .nvm_ver = IWL_22000_NVM_VERSION, + .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + +const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0 = { + .name = "Intel(R) Dual Band Wireless AX 22000", + .fw_name_pre = IWL_22000_JF_B0_FW_PRE, + IWL_DEVICE_22000, + .ht_params = &iwl_22000_ht_params, + .nvm_ver = IWL_22000_NVM_VERSION, + .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + +const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = { + .name = "Intel(R) Dual Band Wireless AX 22000", + .fw_name_pre = IWL_22000_HR_A0_FW_PRE, + IWL_DEVICE_22000, + .ht_params = &iwl_22000_ht_params, + .nvm_ver = IWL_22000_NVM_VERSION, + .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + +MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_22000_HR_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c index 9bb7c19d48eb..3f4d9bac9f73 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c @@ -70,8 +70,8 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL8000_UCODE_API_MAX 34 -#define IWL8265_UCODE_API_MAX 34 +#define IWL8000_UCODE_API_MAX 36 +#define IWL8265_UCODE_API_MAX 36 /* Lowest firmware API version supported */ #define IWL8000_UCODE_API_MIN 22 diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index e7e75b458005..90a1d14cf7d2 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -55,7 +55,7 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL9000_UCODE_API_MAX 34 +#define IWL9000_UCODE_API_MAX 36 /* Lowest firmware API version supported */ #define IWL9000_UCODE_API_MIN 30 diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c deleted file mode 100644 index 705f83b02e13..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c +++ /dev/null @@ -1,216 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2015-2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Copyright(c) 2015-2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#include <linux/module.h> -#include <linux/stringify.h> -#include "iwl-config.h" -#include "iwl-agn-hw.h" - -/* Highest firmware API version supported */ -#define IWL_A000_UCODE_API_MAX 34 - -/* Lowest firmware API version supported */ -#define IWL_A000_UCODE_API_MIN 24 - -/* NVM versions */ -#define IWL_A000_NVM_VERSION 0x0a1d -#define IWL_A000_TX_POWER_VERSION 0xffff /* meaningless */ - -/* Memory offsets and lengths */ -#define IWL_A000_DCCM_OFFSET 0x800000 /* LMAC1 */ -#define IWL_A000_DCCM_LEN 0x10000 /* LMAC1 */ -#define IWL_A000_DCCM2_OFFSET 0x880000 -#define IWL_A000_DCCM2_LEN 0x8000 -#define IWL_A000_SMEM_OFFSET 0x400000 -#define IWL_A000_SMEM_LEN 0xD0000 - -#define IWL_A000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-" -#define IWL_A000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-" -#define IWL_A000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-" -#define IWL_A000_HR_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-" -#define IWL_A000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-" -#define IWL_A000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-" - -#define IWL_A000_HR_MODULE_FIRMWARE(api) \ - IWL_A000_HR_FW_PRE __stringify(api) ".ucode" -#define IWL_A000_JF_MODULE_FIRMWARE(api) \ - IWL_A000_JF_FW_PRE __stringify(api) ".ucode" -#define IWL_A000_HR_F0_QNJ_MODULE_FIRMWARE(api) \ - IWL_A000_HR_F0_FW_PRE __stringify(api) ".ucode" -#define IWL_A000_JF_B0_QNJ_MODULE_FIRMWARE(api) \ - IWL_A000_JF_B0_FW_PRE __stringify(api) ".ucode" -#define IWL_A000_HR_A0_QNJ_MODULE_FIRMWARE(api) \ - IWL_A000_HR_A0_FW_PRE __stringify(api) ".ucode" - -#define NVM_HW_SECTION_NUM_FAMILY_A000 10 - -static const struct iwl_base_params iwl_a000_base_params = { - .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_A000, - .num_of_queues = 512, - .shadow_ram_support = true, - .led_compensation = 57, - .wd_timeout = IWL_LONG_WD_TIMEOUT, - .max_event_log_size = 512, - .shadow_reg_enable = true, - .pcie_l1_allowed = true, -}; - -static const struct iwl_ht_params iwl_a000_ht_params = { - .stbc = true, - .ldpc = true, - .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), -}; - -#define IWL_DEVICE_A000 \ - .ucode_api_max = IWL_A000_UCODE_API_MAX, \ - .ucode_api_min = IWL_A000_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_A000, \ - .max_inst_size = IWL60_RTC_INST_SIZE, \ - .max_data_size = IWL60_RTC_DATA_SIZE, \ - .base_params = &iwl_a000_base_params, \ - .led_mode = IWL_LED_RF_STATE, \ - .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_A000, \ - .non_shared_ant = ANT_A, \ - .dccm_offset = IWL_A000_DCCM_OFFSET, \ - .dccm_len = IWL_A000_DCCM_LEN, \ - .dccm2_offset = IWL_A000_DCCM2_OFFSET, \ - .dccm2_len = IWL_A000_DCCM2_LEN, \ - .smem_offset = IWL_A000_SMEM_OFFSET, \ - .smem_len = IWL_A000_SMEM_LEN, \ - .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \ - .apmg_not_supported = true, \ - .mq_rx_supported = true, \ - .vht_mu_mimo_supported = true, \ - .mac_addr_from_csr = true, \ - .use_tfh = true, \ - .rf_id = true, \ - .gen2 = true, \ - .nvm_type = IWL_NVM_EXT, \ - .dbgc_supported = true, \ - .tx_cmd_queue_size = 32, \ - .min_umac_error_event_table = 0x400000 - -const struct iwl_cfg iwla000_2ac_cfg_hr = { - .name = "Intel(R) Dual Band Wireless AC a000", - .fw_name_pre = IWL_A000_HR_FW_PRE, - IWL_DEVICE_A000, - .ht_params = &iwl_a000_ht_params, - .nvm_ver = IWL_A000_NVM_VERSION, - .nvm_calib_ver = IWL_A000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, -}; - -const struct iwl_cfg iwla000_2ac_cfg_hr_cdb = { - .name = "Intel(R) Dual Band Wireless AC a000", - .fw_name_pre = IWL_A000_HR_CDB_FW_PRE, - IWL_DEVICE_A000, - .ht_params = &iwl_a000_ht_params, - .nvm_ver = IWL_A000_NVM_VERSION, - .nvm_calib_ver = IWL_A000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, - .cdb = true, -}; - -const struct iwl_cfg iwla000_2ac_cfg_jf = { - .name = "Intel(R) Dual Band Wireless AC a000", - .fw_name_pre = IWL_A000_JF_FW_PRE, - IWL_DEVICE_A000, - .ht_params = &iwl_a000_ht_params, - .nvm_ver = IWL_A000_NVM_VERSION, - .nvm_calib_ver = IWL_A000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, -}; - -const struct iwl_cfg iwla000_2ax_cfg_hr = { - .name = "Intel(R) Dual Band Wireless AX a000", - .fw_name_pre = IWL_A000_HR_FW_PRE, - IWL_DEVICE_A000, - .ht_params = &iwl_a000_ht_params, - .nvm_ver = IWL_A000_NVM_VERSION, - .nvm_calib_ver = IWL_A000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, -}; - -const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_f0 = { - .name = "Intel(R) Dual Band Wireless AX a000", - .fw_name_pre = IWL_A000_HR_F0_FW_PRE, - IWL_DEVICE_A000, - .ht_params = &iwl_a000_ht_params, - .nvm_ver = IWL_A000_NVM_VERSION, - .nvm_calib_ver = IWL_A000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, -}; - -const struct iwl_cfg iwla000_2ax_cfg_qnj_jf_b0 = { - .name = "Intel(R) Dual Band Wireless AX a000", - .fw_name_pre = IWL_A000_JF_B0_FW_PRE, - IWL_DEVICE_A000, - .ht_params = &iwl_a000_ht_params, - .nvm_ver = IWL_A000_NVM_VERSION, - .nvm_calib_ver = IWL_A000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, -}; - -const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_a0 = { - .name = "Intel(R) Dual Band Wireless AX a000", - .fw_name_pre = IWL_A000_HR_A0_FW_PRE, - IWL_DEVICE_A000, - .ht_params = &iwl_a000_ht_params, - .nvm_ver = IWL_A000_NVM_VERSION, - .nvm_calib_ver = IWL_A000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, -}; - -MODULE_FIRMWARE(IWL_A000_HR_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_A000_JF_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_A000_HR_F0_QNJ_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_A000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_A000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h index 3684a3e180e5..007bfe7656a4 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h @@ -95,8 +95,8 @@ enum { #define IWL_ALIVE_FLG_RFKILL BIT(0) struct iwl_lmac_alive { - __le32 ucode_minor; __le32 ucode_major; + __le32 ucode_minor; u8 ver_subtype; u8 ver_type; u8 mac; @@ -113,8 +113,8 @@ struct iwl_lmac_alive { } __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */ struct iwl_umac_alive { - __le32 umac_minor; /* UMAC version: minor */ __le32 umac_major; /* UMAC version: major */ + __le32 umac_minor; /* UMAC version: minor */ __le32 error_info_addr; /* SRAM address for UMAC error log */ __le32 dbg_print_buff_addr; } __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h index d09555afe2c5..87c1ddea75ae 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h @@ -188,11 +188,6 @@ enum iwl_bt_mxbox_dw3 { BT_MBOX(3, UPDATE_REQUEST, 21, 1), }; -enum iwl_bt_mxbox_dw4 { - BT_MBOX(4, ATS_BT_INTERVAL, 0, 7), - BT_MBOX(4, ATS_BT_ACTIVE_MAX_TH, 7, 7), -}; - #define BT_MBOX_MSG(_notif, _num, _field) \ ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\ >> BT_MBOX##_num##_##_field##_POS) @@ -232,31 +227,6 @@ enum iwl_bt_ci_compliance { * @reserved: reserved */ struct iwl_bt_coex_profile_notif { - __le32 mbox_msg[8]; - __le32 msg_idx; - __le32 bt_ci_compliance; - - __le32 primary_ch_lut; - __le32 secondary_ch_lut; - __le32 bt_activity_grading; - u8 ttc_status; - u8 rrc_status; - __le16 reserved; -} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_5 */ - -/** - * struct iwl_bt_coex_profile_notif - notification about BT coex - * @mbox_msg: message from BT to WiFi - * @msg_idx: the index of the message - * @bt_ci_compliance: enum %iwl_bt_ci_compliance - * @primary_ch_lut: LUT used for primary channel &enum iwl_bt_coex_lut_type - * @secondary_ch_lut: LUT used for secondary channel &enum iwl_bt_coex_lut_type - * @bt_activity_grading: the activity of BT &enum iwl_bt_activity_grading - * @ttc_status: is TTC enabled - one bit per PHY - * @rrc_status: is RRC enabled - one bit per PHY - * @reserved: reserved - */ -struct iwl_bt_coex_profile_notif_v4 { __le32 mbox_msg[4]; __le32 msg_idx; __le32 bt_ci_compliance; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h index 7ebbf097488b..f285bacc8726 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h @@ -215,7 +215,7 @@ enum iwl_legacy_cmds { /** * @SCD_QUEUE_CFG: &struct iwl_scd_txq_cfg_cmd for older hardware, * &struct iwl_tx_queue_cfg_cmd with &struct iwl_tx_queue_cfg_rsp - * for newer (A000) hardware. + * for newer (22000) hardware. */ SCD_QUEUE_CFG = 0x1d, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h index aa76dcc148bd..a57c7223df0f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h @@ -83,6 +83,21 @@ enum iwl_data_path_subcmd_ids { TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2, /** + * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd + */ + TLC_MNG_CONFIG_CMD = 0xF, + + /** + * @TLC_MNG_NOTIF_REQ_CMD: &struct iwl_tlc_notif_req_config_cmd + */ + TLC_MNG_NOTIF_REQ_CMD = 0x10, + + /** + * @TLC_MNG_UPDATE_NOTIF: &struct iwl_tlc_update_notif + */ + TLC_MNG_UPDATE_NOTIF = 0xF7, + + /** * @STA_PM_NOTIF: &struct iwl_mvm_pm_state_notification */ STA_PM_NOTIF = 0xFD, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h index 0a81fb1b6ed4..106782341544 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h @@ -250,10 +250,12 @@ struct iwl_mfu_assert_dump_notif { * The ids for different type of markers to insert into the usniffer logs * * @MARKER_ID_TX_FRAME_LATENCY: TX latency marker + * @MARKER_ID_SYNC_CLOCK: sync FW time and systime */ enum iwl_mvm_marker_id { MARKER_ID_TX_FRAME_LATENCY = 1, -}; /* MARKER_ID_API_E_VER_1 */ + MARKER_ID_SYNC_CLOCK = 2, +}; /* MARKER_ID_API_E_VER_2 */ /** * struct iwl_mvm_marker - mark info into the usniffer logs diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h index ec42c84e5df2..17c7ef1662a9 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h @@ -68,6 +68,10 @@ */ enum iwl_mac_conf_subcmd_ids { /** + * @LOW_LATENCY_CMD: &struct iwl_mac_low_latency_cmd + */ + LOW_LATENCY_CMD = 0x3, + /** * @CHANNEL_SWITCH_NOA_NOTIF: &struct iwl_channel_switch_noa_notif */ CHANNEL_SWITCH_NOA_NOTIF = 0xFF, @@ -82,4 +86,19 @@ struct iwl_channel_switch_noa_notif { __le32 id_and_color; } __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */ +/** + * struct iwl_mac_low_latency_cmd - set/clear mac to 'low-latency mode' + * + * @mac_id: MAC ID to whom to apply the low-latency configurations + * @low_latency_rx: 1/0 to set/clear Rx low latency direction + * @low_latency_tx: 1/0 to set/clear Tx low latency direction + * @reserved: reserved for alignment purposes + */ +struct iwl_mac_low_latency_cmd { + __le32 mac_id; + u8 low_latency_rx; + u8 low_latency_tx; + __le16 reserved; +} __packed; /* MAC_LOW_LATENCY_API_S_VER_1 */ + #endif /* __iwl_fw_api_mac_cfg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h index a13fd8a1be62..e49a6f7be613 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h @@ -62,6 +62,267 @@ #include "mac.h" +/** + * enum iwl_tlc_mng_cfg_flags_enum - options for TLC config flags + * @IWL_TLC_MNG_CFG_FLAGS_CCK_MSK: CCK support + * @IWL_TLC_MNG_CFG_FLAGS_DD_MSK: enable DD + * @IWL_TLC_MNG_CFG_FLAGS_STBC_MSK: enable STBC + * @IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK: enable LDPC + * @IWL_TLC_MNG_CFG_FLAGS_BF_MSK: enable BFER + * @IWL_TLC_MNG_CFG_FLAGS_DCM_MSK: enable DCM + */ +enum iwl_tlc_mng_cfg_flags { + IWL_TLC_MNG_CFG_FLAGS_CCK_MSK = BIT(0), + IWL_TLC_MNG_CFG_FLAGS_DD_MSK = BIT(1), + IWL_TLC_MNG_CFG_FLAGS_STBC_MSK = BIT(2), + IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK = BIT(3), + IWL_TLC_MNG_CFG_FLAGS_BF_MSK = BIT(4), + IWL_TLC_MNG_CFG_FLAGS_DCM_MSK = BIT(5), +}; + +/** + * enum iwl_tlc_mng_cfg_cw - channel width options + * @IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ: 20MHZ channel + * @IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ: 40MHZ channel + * @IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ: 80MHZ channel + * @IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ: 160MHZ channel + * @IWL_TLC_MNG_MAX_CH_WIDTH_LAST: maximum value + */ +enum iwl_tlc_mng_cfg_cw { + IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ, + IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ, + IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ, + IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ, + IWL_TLC_MNG_MAX_CH_WIDTH_LAST = IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ, +}; + +/** + * enum iwl_tlc_mng_cfg_chains - possible chains + * @IWL_TLC_MNG_CHAIN_A_MSK: chain A + * @IWL_TLC_MNG_CHAIN_B_MSK: chain B + * @IWL_TLC_MNG_CHAIN_C_MSK: chain C + */ +enum iwl_tlc_mng_cfg_chains { + IWL_TLC_MNG_CHAIN_A_MSK = BIT(0), + IWL_TLC_MNG_CHAIN_B_MSK = BIT(1), + IWL_TLC_MNG_CHAIN_C_MSK = BIT(2), +}; + +/** + * enum iwl_tlc_mng_cfg_gi - guard interval options + * @IWL_TLC_MNG_SGI_20MHZ_MSK: enable short GI for 20MHZ + * @IWL_TLC_MNG_SGI_40MHZ_MSK: enable short GI for 40MHZ + * @IWL_TLC_MNG_SGI_80MHZ_MSK: enable short GI for 80MHZ + * @IWL_TLC_MNG_SGI_160MHZ_MSK: enable short GI for 160MHZ + */ +enum iwl_tlc_mng_cfg_gi { + IWL_TLC_MNG_SGI_20MHZ_MSK = BIT(0), + IWL_TLC_MNG_SGI_40MHZ_MSK = BIT(1), + IWL_TLC_MNG_SGI_80MHZ_MSK = BIT(2), + IWL_TLC_MNG_SGI_160MHZ_MSK = BIT(3), +}; + +/** + * enum iwl_tlc_mng_cfg_mode - supported modes + * @IWL_TLC_MNG_MODE_CCK: enable CCK + * @IWL_TLC_MNG_MODE_OFDM_NON_HT: enable OFDM (non HT) + * @IWL_TLC_MNG_MODE_NON_HT: enable non HT + * @IWL_TLC_MNG_MODE_HT: enable HT + * @IWL_TLC_MNG_MODE_VHT: enable VHT + * @IWL_TLC_MNG_MODE_HE: enable HE + * @IWL_TLC_MNG_MODE_INVALID: invalid value + * @IWL_TLC_MNG_MODE_NUM: a count of possible modes + */ +enum iwl_tlc_mng_cfg_mode { + IWL_TLC_MNG_MODE_CCK = 0, + IWL_TLC_MNG_MODE_OFDM_NON_HT = IWL_TLC_MNG_MODE_CCK, + IWL_TLC_MNG_MODE_NON_HT = IWL_TLC_MNG_MODE_CCK, + IWL_TLC_MNG_MODE_HT, + IWL_TLC_MNG_MODE_VHT, + IWL_TLC_MNG_MODE_HE, + IWL_TLC_MNG_MODE_INVALID, + IWL_TLC_MNG_MODE_NUM = IWL_TLC_MNG_MODE_INVALID, +}; + +/** + * enum iwl_tlc_mng_vht_he_types - VHT HE types + * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU: VHT HT single user + * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU_EXT: VHT HT single user extended + * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_MU: VHT HT multiple users + * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED: trigger based + * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_NUM: a count of possible types + */ +enum iwl_tlc_mng_vht_he_types { + IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU = 0, + IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU_EXT, + IWL_TLC_MNG_VALID_VHT_HE_TYPES_MU, + IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED, + IWL_TLC_MNG_VALID_VHT_HE_TYPES_NUM = + IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED, + +}; + +/** + * enum iwl_tlc_mng_ht_rates - HT/VHT rates + * @IWL_TLC_MNG_HT_RATE_MCS0: index of MCS0 + * @IWL_TLC_MNG_HT_RATE_MCS1: index of MCS1 + * @IWL_TLC_MNG_HT_RATE_MCS2: index of MCS2 + * @IWL_TLC_MNG_HT_RATE_MCS3: index of MCS3 + * @IWL_TLC_MNG_HT_RATE_MCS4: index of MCS4 + * @IWL_TLC_MNG_HT_RATE_MCS5: index of MCS5 + * @IWL_TLC_MNG_HT_RATE_MCS6: index of MCS6 + * @IWL_TLC_MNG_HT_RATE_MCS7: index of MCS7 + * @IWL_TLC_MNG_HT_RATE_MCS8: index of MCS8 + * @IWL_TLC_MNG_HT_RATE_MCS9: index of MCS9 + * @IWL_TLC_MNG_HT_RATE_MAX: maximal rate for HT/VHT + */ +enum iwl_tlc_mng_ht_rates { + IWL_TLC_MNG_HT_RATE_MCS0 = 0, + IWL_TLC_MNG_HT_RATE_MCS1, + IWL_TLC_MNG_HT_RATE_MCS2, + IWL_TLC_MNG_HT_RATE_MCS3, + IWL_TLC_MNG_HT_RATE_MCS4, + IWL_TLC_MNG_HT_RATE_MCS5, + IWL_TLC_MNG_HT_RATE_MCS6, + IWL_TLC_MNG_HT_RATE_MCS7, + IWL_TLC_MNG_HT_RATE_MCS8, + IWL_TLC_MNG_HT_RATE_MCS9, + IWL_TLC_MNG_HT_RATE_MAX = IWL_TLC_MNG_HT_RATE_MCS9, +}; + +/* Maximum supported tx antennas number */ +#define MAX_RS_ANT_NUM 3 + +/** + * struct tlc_config_cmd - TLC configuration + * @sta_id: station id + * @reserved1: reserved + * @max_supp_ch_width: channel width + * @flags: bitmask of &enum iwl_tlc_mng_cfg_flags + * @chains: bitmask of &enum iwl_tlc_mng_cfg_chains + * @max_supp_ss: valid values are 0-3, 0 - spatial streams are not supported + * @valid_vht_he_types: bitmap of &enum iwl_tlc_mng_vht_he_types + * @non_ht_supp_rates: bitmap of supported legacy rates + * @ht_supp_rates: bitmap of supported HT/VHT rates, valid bits are 0-9 + * @mode: &enum iwl_tlc_mng_cfg_mode + * @reserved2: reserved + * @he_supp_rates: bitmap of supported HE rates + * @sgi_ch_width_supp: bitmap of SGI support per channel width + * @he_gi_support: 11ax HE guard interval + * @max_ampdu_cnt: max AMPDU size (frames count) + */ +struct iwl_tlc_config_cmd { + u8 sta_id; + u8 reserved1[3]; + u8 max_supp_ch_width; + u8 chains; + u8 max_supp_ss; + u8 valid_vht_he_types; + __le16 flags; + __le16 non_ht_supp_rates; + __le16 ht_supp_rates[MAX_RS_ANT_NUM]; + u8 mode; + u8 reserved2; + __le16 he_supp_rates; + u8 sgi_ch_width_supp; + u8 he_gi_support; + __le32 max_ampdu_cnt; +} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_1 */ + +#define IWL_TLC_NOTIF_INIT_RATE_POS 0 +#define IWL_TLC_NOTIF_INIT_RATE_MSK BIT(IWL_TLC_NOTIF_INIT_RATE_POS) +#define IWL_TLC_NOTIF_REQ_INTERVAL (500) + +/** + * struct iwl_tlc_notif_req_config_cmd - request notif on specific changes + * @sta_id: relevant station + * @reserved1: reserved + * @flags: bitmap of requested notifications %IWL_TLC_NOTIF_INIT_\* + * @interval: minimum time between notifications from TLC to the driver (msec) + * @reserved2: reserved + */ +struct iwl_tlc_notif_req_config_cmd { + u8 sta_id; + u8 reserved1; + __le16 flags; + __le16 interval; + __le16 reserved2; +} __packed; /* TLC_MNG_NOTIF_REQ_CMD_API_S_VER_1 */ + +/** + * struct iwl_tlc_update_notif - TLC notification from FW + * @sta_id: station id + * @reserved: reserved + * @flags: bitmap of notifications reported + * @values: field per flag in struct iwl_tlc_notif_req_config_cmd + */ +struct iwl_tlc_update_notif { + u8 sta_id; + u8 reserved; + __le16 flags; + __le32 values[16]; +} __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_1 */ + +/** + * enum iwl_tlc_debug_flags - debug options + * @IWL_TLC_DEBUG_FIXED_RATE: set fixed rate for rate scaling + * @IWL_TLC_DEBUG_STATS_TH: threshold for sending statistics to the driver, in + * frames + * @IWL_TLC_DEBUG_STATS_TIME_TH: threshold for sending statistics to the + * driver, in msec + * @IWL_TLC_DEBUG_AGG_TIME_LIM: time limit for a BA session + * @IWL_TLC_DEBUG_AGG_DIS_START_TH: frame with try-count greater than this + * threshold should not start an aggregation session + * @IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM: set max number of frames in an aggregation + * @IWL_TLC_DEBUG_RENEW_ADDBA_DELAY: delay between retries of ADD BA + * @IWL_TLC_DEBUG_START_AC_RATE_IDX: frames per second to start a BA session + * @IWL_TLC_DEBUG_NO_FAR_RANGE_TWEAK: disable BW scaling + */ +enum iwl_tlc_debug_flags { + IWL_TLC_DEBUG_FIXED_RATE, + IWL_TLC_DEBUG_STATS_TH, + IWL_TLC_DEBUG_STATS_TIME_TH, + IWL_TLC_DEBUG_AGG_TIME_LIM, + IWL_TLC_DEBUG_AGG_DIS_START_TH, + IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM, + IWL_TLC_DEBUG_RENEW_ADDBA_DELAY, + IWL_TLC_DEBUG_START_AC_RATE_IDX, + IWL_TLC_DEBUG_NO_FAR_RANGE_TWEAK, +}; /* TLC_MNG_DEBUG_FLAGS_API_E_VER_1 */ + +/** + * struct iwl_dhc_tlc_dbg - fixed debug config + * @sta_id: bit 0 - enable/disable, bits 1 - 7 hold station id + * @reserved1: reserved + * @flags: bitmap of %IWL_TLC_DEBUG_\* + * @fixed_rate: rate value + * @stats_threshold: if number of tx-ed frames is greater, send statistics + * @time_threshold: statistics threshold in usec + * @agg_time_lim: max agg time + * @agg_dis_start_threshold: frames with try-cont greater than this count will + * not be aggregated + * @agg_frame_count_lim: agg size + * @addba_retry_delay: delay between retries of ADD BA + * @start_ac_rate_idx: frames per second to start a BA session + * @no_far_range_tweak: disable BW scaling + * @reserved2: reserved + */ +struct iwl_dhc_tlc_cmd { + u8 sta_id; + u8 reserved1[3]; + __le32 flags; + __le32 fixed_rate; + __le16 stats_threshold; + __le16 time_threshold; + __le16 agg_time_lim; + __le16 agg_dis_start_threshold; + __le16 agg_frame_count_lim; + __le16 addba_retry_delay; + u8 start_ac_rate_idx[IEEE80211_NUM_ACS]; + u8 no_far_range_tweak; + u8 reserved2[3]; +} __packed; + /* * These serve as indexes into * struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT]; @@ -253,7 +514,6 @@ enum { #define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | \ RATE_MCS_ANT_C_MSK) #define RATE_MCS_ANT_MSK RATE_MCS_ANT_ABC_MSK -#define RATE_MCS_ANT_NUM 3 /* Bit 17: (0) SS, (1) SS*2 */ #define RATE_MCS_STBC_POS 17 diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h index f5d5ba7e37ec..a2a40b515a3c 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h @@ -121,7 +121,7 @@ enum iwl_tx_flags { }; /* TX_FLAGS_BITS_API_S_VER_1 */ /** - * enum iwl_tx_cmd_flags - bitmasks for tx_flags in TX command for a000 + * enum iwl_tx_cmd_flags - bitmasks for tx_flags in TX command for 22000 * @IWL_TX_FLAGS_CMD_RATE: use rate from the TX command * @IWL_TX_FLAGS_ENCRYPT_DIS: frame should not be encrypted, even if it belongs * to a secured STA @@ -301,7 +301,7 @@ struct iwl_dram_sec_info { } __packed; /* DRAM_SEC_INFO_API_S_VER_1 */ /** - * struct iwl_tx_cmd_gen2 - TX command struct to FW for a000 devices + * struct iwl_tx_cmd_gen2 - TX command struct to FW for 22000 devices * ( TX_CMD = 0x1c ) * @len: in bytes of the payload, see below for details * @offload_assist: TX offload configuration diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 8106fd4be996..67aefc8fc9ac 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -964,7 +964,20 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, if (trigger) delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay)); - if (WARN(fwrt->trans->state == IWL_TRANS_NO_FW, + /* + * If the loading of the FW completed successfully, the next step is to + * get the SMEM config data. Thus, if fwrt->smem_cfg.num_lmacs is non + * zero, the FW was already loaded successully. If the state is "NO_FW" + * in such a case - WARN and exit, since FW may be dead. Otherwise, we + * can try to collect the data, since FW might just not be fully + * loaded (no "ALIVE" yet), and the debug data is accessible. + * + * Corner case: got the FW alive but crashed before getting the SMEM + * config. In such a case, due to HW access problems, we might + * collect garbage. + */ + if (WARN((fwrt->trans->state == IWL_TRANS_NO_FW) && + fwrt->smem_cfg.num_lmacs, "Can't collect dbg data when FW isn't alive\n")) return -EIO; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c new file mode 100644 index 000000000000..e2ded29a145d --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c @@ -0,0 +1,195 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include "api/commands.h" +#include "debugfs.h" + +#define FWRT_DEBUGFS_READ_FILE_OPS(name) \ +static ssize_t iwl_dbgfs_##name##_read(struct iwl_fw_runtime *fwrt, \ + char *buf, size_t count, \ + loff_t *ppos); \ +static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .read = iwl_dbgfs_##name##_read, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +} + +#define FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen) \ +static ssize_t iwl_dbgfs_##name##_write(struct iwl_fw_runtime *fwrt, \ + char *buf, size_t count, \ + loff_t *ppos); \ +static ssize_t _iwl_dbgfs_##name##_write(struct file *file, \ + const char __user *user_buf, \ + size_t count, loff_t *ppos) \ +{ \ + struct iwl_fw_runtime *fwrt = file->private_data; \ + char buf[buflen] = {}; \ + size_t buf_size = min(count, sizeof(buf) - 1); \ + \ + if (copy_from_user(buf, user_buf, buf_size)) \ + return -EFAULT; \ + \ + return iwl_dbgfs_##name##_write(fwrt, buf, buf_size, ppos); \ +} + +#define FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen) \ +FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen) \ +static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .write = _iwl_dbgfs_##name##_write, \ + .read = iwl_dbgfs_##name##_read, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +} + +#define FWRT_DEBUGFS_WRITE_FILE_OPS(name, buflen) \ +FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen) \ +static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .write = _iwl_dbgfs_##name##_write, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +} + +#define FWRT_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \ + if (!debugfs_create_file(alias, mode, parent, fwrt, \ + &iwl_dbgfs_##name##_ops)) \ + goto err; \ + } while (0) +#define FWRT_DEBUGFS_ADD_FILE(name, parent, mode) \ + FWRT_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode) + +static int iwl_fw_send_timestamp_marker_cmd(struct iwl_fw_runtime *fwrt) +{ + struct iwl_mvm_marker marker = { + .dw_len = sizeof(struct iwl_mvm_marker) / 4, + .marker_id = MARKER_ID_SYNC_CLOCK, + + /* the real timestamp is taken from the ftrace clock + * this is for finding the match between fw and kernel logs + */ + .timestamp = cpu_to_le64(fwrt->timestamp.seq++), + }; + + struct iwl_host_cmd hcmd = { + .id = MARKER_CMD, + .flags = CMD_ASYNC, + .data[0] = &marker, + .len[0] = sizeof(marker), + }; + + return iwl_trans_send_cmd(fwrt->trans, &hcmd); +} + +static void iwl_fw_timestamp_marker_wk(struct work_struct *work) +{ + int ret; + struct iwl_fw_runtime *fwrt = + container_of(work, struct iwl_fw_runtime, timestamp.wk.work); + unsigned long delay = fwrt->timestamp.delay; + + ret = iwl_fw_send_timestamp_marker_cmd(fwrt); + if (!ret && delay) + schedule_delayed_work(&fwrt->timestamp.wk, + round_jiffies_relative(delay)); + else + IWL_INFO(fwrt, + "stopping timestamp_marker, ret: %d, delay: %u\n", + ret, jiffies_to_msecs(delay) / 1000); +} + +static ssize_t iwl_dbgfs_timestamp_marker_write(struct iwl_fw_runtime *fwrt, + char *buf, size_t count, + loff_t *ppos) +{ + int ret; + u32 delay; + + ret = kstrtou32(buf, 10, &delay); + if (ret < 0) + return ret; + + IWL_INFO(fwrt, + "starting timestamp_marker trigger with delay: %us\n", + delay); + + iwl_fw_cancel_timestamp(fwrt); + + fwrt->timestamp.delay = msecs_to_jiffies(delay * 1000); + + schedule_delayed_work(&fwrt->timestamp.wk, + round_jiffies_relative(fwrt->timestamp.delay)); + return count; +} + +FWRT_DEBUGFS_WRITE_FILE_OPS(timestamp_marker, 10); + +int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, + struct dentry *dbgfs_dir) +{ + INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk); + FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, S_IWUSR); + return 0; +err: + IWL_ERR(fwrt, "Can't create the fwrt debugfs directory\n"); + return -ENOMEM; +} diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h new file mode 100644 index 000000000000..e57ff92a68ae --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h @@ -0,0 +1,87 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include "runtime.h" + +#ifdef CONFIG_IWLWIFI_DEBUGFS +int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, + struct dentry *dbgfs_dir); + +static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) +{ + fwrt->timestamp.delay = 0; + cancel_delayed_work_sync(&fwrt->timestamp.wk); +} + +#else +static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, + struct dentry *dbgfs_dir) +{ + return 0; +} + +static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) {} + +#endif /* CONFIG_IWLWIFI_DEBUGFS */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index 37a5c5b4eda6..1a05d506ac9a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -7,7 +7,7 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -246,10 +246,10 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t; * @IWL_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignement. * @IWL_UCODE_TLV_API_NAN2_VER2: This ucode supports NAN API version 2 * @IWL_UCODE_TLV_API_NEW_RX_STATS: should new RX STATISTICS API be used - * @IWL_UCODE_TLV_API_ATS_COEX_EXTERNAL: the coex notification is enlared to - * include information about ACL time sharing. * @IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY: Quota command includes a field * indicating low latency direction. + * @IWL_UCODE_TLV_API_DEPRECATE_TTAK: RX status flag TTAK ok (bit 7) is + * deprecated. * * @NUM_IWL_UCODE_TLV_API: number of bits used */ @@ -267,8 +267,8 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_ADAPTIVE_DWELL = (__force iwl_ucode_tlv_api_t)32, IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE = (__force iwl_ucode_tlv_api_t)34, IWL_UCODE_TLV_API_NEW_RX_STATS = (__force iwl_ucode_tlv_api_t)35, - IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL = (__force iwl_ucode_tlv_api_t)37, IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY = (__force iwl_ucode_tlv_api_t)38, + IWL_UCODE_TLV_API_DEPRECATE_TTAK = (__force iwl_ucode_tlv_api_t)41, NUM_IWL_UCODE_TLV_API #ifdef __CHECKER__ @@ -313,6 +313,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan * @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification + * @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm + * @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related * @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement * @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts * @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT @@ -367,6 +369,8 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)39, IWL_UCODE_TLV_CAPA_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)40, IWL_UCODE_TLV_CAPA_D0I3_END_FIRST = (__force iwl_ucode_tlv_capa_t)41, + IWL_UCODE_TLV_CAPA_TLC_OFFLOAD = (__force iwl_ucode_tlv_capa_t)43, + IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA = (__force iwl_ucode_tlv_capa_t)44, IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64, IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65, IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67, @@ -541,7 +545,7 @@ struct iwl_fw_dbg_mem_seg_tlv { } __packed; /** - * struct iwl_fw_dbg_dest_tlv - configures the destination of the debug data + * struct iwl_fw_dbg_dest_tlv_v1 - configures the destination of the debug data * * @version: version of the TLV - currently 0 * @monitor_mode: &enum iwl_fw_dbg_monitor_mode @@ -556,7 +560,7 @@ struct iwl_fw_dbg_mem_seg_tlv { * * This parses IWL_UCODE_TLV_FW_DBG_DEST */ -struct iwl_fw_dbg_dest_tlv { +struct iwl_fw_dbg_dest_tlv_v1 { u8 version; u8 monitor_mode; u8 size_power; @@ -570,6 +574,26 @@ struct iwl_fw_dbg_dest_tlv { struct iwl_fw_dbg_reg_op reg_ops[0]; } __packed; +/* Mask of the register for defining the LDBG MAC2SMEM buffer SMEM size */ +#define IWL_LDBG_M2S_BUF_SIZE_MSK 0x0fff0000 +/* Mask of the register for defining the LDBG MAC2SMEM SMEM base address */ +#define IWL_LDBG_M2S_BUF_BA_MSK 0x00000fff +/* The smem buffer chunks are in units of 256 bits */ +#define IWL_M2S_UNIT_SIZE 0x100 + +struct iwl_fw_dbg_dest_tlv { + u8 version; + u8 monitor_mode; + u8 size_power; + u8 reserved; + __le32 cfg_reg; + __le32 write_ptr_reg; + __le32 wrap_count; + u8 base_shift; + u8 size_shift; + struct iwl_fw_dbg_reg_op reg_ops[0]; +} __packed; + struct iwl_fw_dbg_conf_hcmd { u8 id; u8 reserved; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h index 985496cc01d0..b23ffe12ad84 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/img.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h @@ -284,7 +284,7 @@ struct iwl_fw { struct iwl_fw_cipher_scheme cs[IWL_UCODE_MAX_CS]; u8 human_readable[FW_VER_HUMAN_READABLE_SZ]; - struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv; + struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv; struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX]; struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c index bfe5316bbb6a..c39fe84bb4c4 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/init.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c @@ -58,10 +58,12 @@ #include "iwl-drv.h" #include "runtime.h" #include "dbg.h" +#include "debugfs.h" void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, - const struct iwl_fw *fw, - const struct iwl_fw_runtime_ops *ops, void *ops_ctx) + const struct iwl_fw *fw, + const struct iwl_fw_runtime_ops *ops, void *ops_ctx, + struct dentry *dbgfs_dir) { memset(fwrt, 0, sizeof(*fwrt)); fwrt->trans = trans; @@ -71,5 +73,12 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, fwrt->ops = ops; fwrt->ops_ctx = ops_ctx; INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk); + iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir); } IWL_EXPORT_SYMBOL(iwl_fw_runtime_init); + +void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt) +{ + iwl_fw_cancel_timestamp(fwrt); +} +IWL_EXPORT_SYMBOL(iwl_fw_runtime_exit); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index 50cfb6d795a5..e25c049f980f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -134,11 +134,21 @@ struct iwl_fw_runtime { /* ts of the beginning of a non-collect fw dbg data period */ unsigned long non_collect_ts_start[FW_DBG_TRIGGER_MAX - 1]; } dump; +#ifdef CONFIG_IWLWIFI_DEBUGFS + struct { + struct delayed_work wk; + u32 delay; + u64 seq; + } timestamp; +#endif /* CONFIG_IWLWIFI_DEBUGFS */ }; void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, - const struct iwl_fw *fw, - const struct iwl_fw_runtime_ops *ops, void *ops_ctx); + const struct iwl_fw *fw, + const struct iwl_fw_runtime_ops *ops, void *ops_ctx, + struct dentry *dbgfs_dir); + +void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt); static inline void iwl_fw_set_current_image(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type cur_fw_img) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c index 76675736ba4f..fb4b6442b4d7 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c @@ -63,8 +63,8 @@ #include "runtime.h" #include "fw/api/commands.h" -static void iwl_parse_shared_mem_a000(struct iwl_fw_runtime *fwrt, - struct iwl_rx_packet *pkt) +static void iwl_parse_shared_mem_22000(struct iwl_fw_runtime *fwrt, + struct iwl_rx_packet *pkt) { struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data; int i, lmac; @@ -143,8 +143,8 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt) return; pkt = cmd.resp_pkt; - if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_A000) - iwl_parse_shared_mem_a000(fwrt, pkt); + if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_22000) + iwl_parse_shared_mem_22000(fwrt, pkt); else iwl_parse_shared_mem(fwrt, pkt); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index e21e46cf6f9a..258d439bb0a9 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -89,7 +89,7 @@ enum iwl_device_family { IWL_DEVICE_FAMILY_7000, IWL_DEVICE_FAMILY_8000, IWL_DEVICE_FAMILY_9000, - IWL_DEVICE_FAMILY_A000, + IWL_DEVICE_FAMILY_22000, }; /* @@ -266,7 +266,7 @@ struct iwl_tt_params { #define OTP_LOW_IMAGE_SIZE_FAMILY_7000 (16 * 512 * sizeof(u16)) /* 16 KB */ #define OTP_LOW_IMAGE_SIZE_FAMILY_8000 (32 * 512 * sizeof(u16)) /* 32 KB */ #define OTP_LOW_IMAGE_SIZE_FAMILY_9000 OTP_LOW_IMAGE_SIZE_FAMILY_8000 -#define OTP_LOW_IMAGE_SIZE_FAMILY_A000 OTP_LOW_IMAGE_SIZE_FAMILY_9000 +#define OTP_LOW_IMAGE_SIZE_FAMILY_22000 OTP_LOW_IMAGE_SIZE_FAMILY_9000 struct iwl_eeprom_params { const u8 regulatory_bands[7]; @@ -330,7 +330,7 @@ struct iwl_pwr_tx_backoff { * @vht_mu_mimo_supported: VHT MU-MIMO support * @rf_id: need to read rf_id to determine the firmware image * @integrated: discrete or integrated - * @gen2: a000 and on transport operation + * @gen2: 22000 and on transport operation * @cdb: CDB support * @nvm_type: see &enum iwl_nvm_type * @tx_cmd_queue_size: size of the cmd queue. If zero, use the same value as @@ -477,13 +477,13 @@ extern const struct iwl_cfg iwl9460_2ac_cfg_soc; extern const struct iwl_cfg iwl9461_2ac_cfg_soc; extern const struct iwl_cfg iwl9462_2ac_cfg_soc; extern const struct iwl_cfg iwl9560_2ac_cfg_soc; -extern const struct iwl_cfg iwla000_2ac_cfg_hr; -extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb; -extern const struct iwl_cfg iwla000_2ac_cfg_jf; -extern const struct iwl_cfg iwla000_2ax_cfg_hr; -extern const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_f0; -extern const struct iwl_cfg iwla000_2ax_cfg_qnj_jf_b0; -extern const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_a0; +extern const struct iwl_cfg iwl22000_2ac_cfg_hr; +extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb; +extern const struct iwl_cfg iwl22000_2ac_cfg_jf; +extern const struct iwl_cfg iwl22000_2ax_cfg_hr; +extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0; +extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0; +extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0; #endif /* CONFIG_IWLMVM */ #endif /* __IWL_CONFIG_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h index 7f16dcce0995..9518a82f44c2 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h @@ -95,7 +95,7 @@ TRACE_EVENT(iwlwifi_dev_tx, TP_ARGS(dev, skb, tfd, tfdlen, buf0, buf0_len, hdr_len), TP_STRUCT__entry( DEV_ENTRY - + __field(void *, skbaddr) __field(size_t, framelen) __dynamic_array(u8, tfd, tfdlen) @@ -110,6 +110,7 @@ TRACE_EVENT(iwlwifi_dev_tx, ), TP_fast_assign( DEV_ASSIGN; + __entry->skbaddr = skb; __entry->framelen = buf0_len; if (hdr_len > 0) __entry->framelen += skb->len - hdr_len; @@ -120,9 +121,9 @@ TRACE_EVENT(iwlwifi_dev_tx, __get_dynamic_array(buf1), skb->len - hdr_len); ), - TP_printk("[%s] TX %.2x (%zu bytes)", + TP_printk("[%s] TX %.2x (%zu bytes) skbaddr=%p", __get_str(dev), ((u8 *)__get_dynamic_array(buf0))[0], - __entry->framelen) + __entry->framelen, __entry->skbaddr) ); TRACE_EVENT(iwlwifi_dev_ucode_error, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 4b224d7d967c..9c4a7f648a44 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -296,7 +296,12 @@ struct iwl_firmware_pieces { u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr; /* FW debug data parsed for driver usage */ - struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv; + bool dbg_dest_tlv_init; + u8 *dbg_dest_ver; + union { + struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv; + struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv_v1; + }; struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX]; struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; @@ -919,27 +924,60 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, minor = le32_to_cpup(ptr++); local_comp = le32_to_cpup(ptr); - snprintf(drv->fw.fw_version, - sizeof(drv->fw.fw_version), "%u.%u.%u", - major, minor, local_comp); + if (major >= 35) + snprintf(drv->fw.fw_version, + sizeof(drv->fw.fw_version), + "%u.%08x.%u", major, minor, local_comp); + else + snprintf(drv->fw.fw_version, + sizeof(drv->fw.fw_version), + "%u.%u.%u", major, minor, local_comp); break; } case IWL_UCODE_TLV_FW_DBG_DEST: { - struct iwl_fw_dbg_dest_tlv *dest = (void *)tlv_data; + struct iwl_fw_dbg_dest_tlv *dest = NULL; + struct iwl_fw_dbg_dest_tlv_v1 *dest_v1 = NULL; + u8 mon_mode; + + pieces->dbg_dest_ver = (u8 *)tlv_data; + if (*pieces->dbg_dest_ver == 1) { + dest = (void *)tlv_data; + } else if (*pieces->dbg_dest_ver == 0) { + dest_v1 = (void *)tlv_data; + } else { + IWL_ERR(drv, + "The version is %d, and it is invalid\n", + *pieces->dbg_dest_ver); + break; + } - if (pieces->dbg_dest_tlv) { + if (pieces->dbg_dest_tlv_init) { IWL_ERR(drv, "dbg destination ignored, already exists\n"); break; } - pieces->dbg_dest_tlv = dest; + pieces->dbg_dest_tlv_init = true; + + if (dest_v1) { + pieces->dbg_dest_tlv_v1 = dest_v1; + mon_mode = dest_v1->monitor_mode; + } else { + pieces->dbg_dest_tlv = dest; + mon_mode = dest->monitor_mode; + } + IWL_INFO(drv, "Found debug destination: %s\n", - get_fw_dbg_mode_string(dest->monitor_mode)); + get_fw_dbg_mode_string(mon_mode)); + + drv->fw.dbg_dest_reg_num = (dest_v1) ? + tlv_len - + offsetof(struct iwl_fw_dbg_dest_tlv_v1, + reg_ops) : + tlv_len - + offsetof(struct iwl_fw_dbg_dest_tlv, + reg_ops); - drv->fw.dbg_dest_reg_num = - tlv_len - offsetof(struct iwl_fw_dbg_dest_tlv, - reg_ops); drv->fw.dbg_dest_reg_num /= sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]); @@ -948,7 +986,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, case IWL_UCODE_TLV_FW_DBG_CONF: { struct iwl_fw_dbg_conf_tlv *conf = (void *)tlv_data; - if (!pieces->dbg_dest_tlv) { + if (!pieces->dbg_dest_tlv_init) { IWL_ERR(drv, "Ignore dbg config %d - no destination configured\n", conf->id); @@ -1335,15 +1373,51 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) if (iwl_alloc_ucode(drv, pieces, i)) goto out_free_fw; - if (pieces->dbg_dest_tlv) { - drv->fw.dbg_dest_tlv = - kmemdup(pieces->dbg_dest_tlv, - sizeof(*pieces->dbg_dest_tlv) + - sizeof(pieces->dbg_dest_tlv->reg_ops[0]) * - drv->fw.dbg_dest_reg_num, GFP_KERNEL); + if (pieces->dbg_dest_tlv_init) { + size_t dbg_dest_size = sizeof(*drv->fw.dbg_dest_tlv) + + sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]) * + drv->fw.dbg_dest_reg_num; + + drv->fw.dbg_dest_tlv = kmalloc(dbg_dest_size, GFP_KERNEL); if (!drv->fw.dbg_dest_tlv) goto out_free_fw; + + if (*pieces->dbg_dest_ver == 0) { + memcpy(drv->fw.dbg_dest_tlv, pieces->dbg_dest_tlv_v1, + dbg_dest_size); + } else { + struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv = + drv->fw.dbg_dest_tlv; + + dest_tlv->version = pieces->dbg_dest_tlv->version; + dest_tlv->monitor_mode = + pieces->dbg_dest_tlv->monitor_mode; + dest_tlv->size_power = + pieces->dbg_dest_tlv->size_power; + dest_tlv->wrap_count = + pieces->dbg_dest_tlv->wrap_count; + dest_tlv->write_ptr_reg = + pieces->dbg_dest_tlv->write_ptr_reg; + dest_tlv->base_shift = + pieces->dbg_dest_tlv->base_shift; + memcpy(dest_tlv->reg_ops, + pieces->dbg_dest_tlv->reg_ops, + sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]) * + drv->fw.dbg_dest_reg_num); + + /* In version 1 of the destination tlv, which is + * relevant for internal buffer exclusively, + * the base address is part of given with the length + * of the buffer, and the size shift is give instead of + * end shift. We now store these values in base_reg, + * and end shift, and when dumping the data we'll + * manipulate it for extracting both the length and + * base address */ + dest_tlv->base_reg = pieces->dbg_dest_tlv->cfg_reg; + dest_tlv->end_shift = + pieces->dbg_dest_tlv->size_shift; + } } for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++) { diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h index 66e5db41e559..11789ffb6512 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h @@ -121,7 +121,7 @@ #define FH_MEM_CBBC_16_19_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) #define FH_MEM_CBBC_20_31_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xB20) #define FH_MEM_CBBC_20_31_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xB80) -/* a000 TFD table address, 64 bit */ +/* 22000 TFD table address, 64 bit */ #define TFH_TFDQ_CBB_TABLE (0x1C00) /* Find TFD CB base pointer for given queue */ @@ -140,7 +140,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, return FH_MEM_CBBC_20_31_LOWER_BOUND + 4 * (chnl - 20); } -/* a000 configuration registers */ +/* 22000 configuration registers */ /* * TFH Configuration register. @@ -697,8 +697,8 @@ struct iwl_tfh_tb { * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM. * Both driver and device share these circular buffers, each of which must be * contiguous 256 TFDs. - * For pre a000 HW it is 256 x 128 bytes-per-TFD = 32 KBytes - * For a000 HW and on it is 256 x 256 bytes-per-TFD = 65 KBytes + * For pre 22000 HW it is 256 x 128 bytes-per-TFD = 32 KBytes + * For 22000 HW and on it is 256 x 256 bytes-per-TFD = 65 KBytes * * Driver must indicate the physical address of the base of each * circular buffer via the FH_MEM_CBBC_QUEUE registers. @@ -750,10 +750,10 @@ struct iwl_tfh_tfd { /** * struct iwlagn_schedq_bc_tbl scheduler byte count table * base physical address provided by SCD_DRAM_BASE_ADDR - * For devices up to a000: + * For devices up to 22000: * @tfd_offset 0-12 - tx command byte count * 12-16 - station index - * For a000 and on: + * For 22000 and on: * @tfd_offset 0-12 - tx command byte count * 12-13 - number of 64 byte chunks * 14-16 - reserved diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 921cab9e2d73..c25ed1a0bbb0 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -551,7 +551,7 @@ struct iwl_trans_ops { unsigned int queue_wdg_timeout); void (*txq_disable)(struct iwl_trans *trans, int queue, bool configure_scd); - /* a000 functions */ + /* 22000 functions */ int (*txq_alloc)(struct iwl_trans *trans, struct iwl_tx_queue_cfg_cmd *cmd, int cmd_id, @@ -579,6 +579,7 @@ struct iwl_trans_ops { void (*configure)(struct iwl_trans *trans, const struct iwl_trans_config *trans_cfg); void (*set_pmi)(struct iwl_trans *trans, bool state); + void (*sw_reset)(struct iwl_trans *trans); bool (*grab_nic_access)(struct iwl_trans *trans, unsigned long *flags); void (*release_nic_access)(struct iwl_trans *trans, unsigned long *flags); @@ -744,7 +745,7 @@ struct iwl_trans { struct lockdep_map sync_cmd_lockdep_map; #endif - const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv; + const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv; const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv; u8 dbg_dest_reg_num; @@ -1124,6 +1125,12 @@ static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state) trans->ops->set_pmi(trans, state); } +static inline void iwl_trans_sw_reset(struct iwl_trans *trans) +{ + if (trans->ops->sw_reset) + trans->ops->sw_reset(trans); +} + static inline void iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile index a47635c32c11..9ffd21918b5a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile @@ -2,7 +2,7 @@ obj-$(CONFIG_IWLMVM) += iwlmvm.o iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o iwlmvm-y += utils.o rx.o rxmq.o tx.o binding.o quota.o sta.o sf.o -iwlmvm-y += scan.o time-event.o rs.o +iwlmvm-y += scan.o time-event.o rs.o rs-fw.o iwlmvm-y += power.o coex.o iwlmvm-y += tt.o offloading.o tdls.o iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c index 79c80f181f7d..890dbfff3a06 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c @@ -7,7 +7,6 @@ * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +33,6 @@ * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -514,36 +512,17 @@ void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data; - if (!iwl_mvm_has_new_ats_coex_api(mvm)) { - struct iwl_bt_coex_profile_notif_v4 *v4 = (void *)pkt->data; - - mvm->last_bt_notif.mbox_msg[0] = v4->mbox_msg[0]; - mvm->last_bt_notif.mbox_msg[1] = v4->mbox_msg[1]; - mvm->last_bt_notif.mbox_msg[2] = v4->mbox_msg[2]; - mvm->last_bt_notif.mbox_msg[3] = v4->mbox_msg[3]; - mvm->last_bt_notif.msg_idx = v4->msg_idx; - mvm->last_bt_notif.bt_ci_compliance = v4->bt_ci_compliance; - mvm->last_bt_notif.primary_ch_lut = v4->primary_ch_lut; - mvm->last_bt_notif.secondary_ch_lut = v4->secondary_ch_lut; - mvm->last_bt_notif.bt_activity_grading = - v4->bt_activity_grading; - mvm->last_bt_notif.ttc_status = v4->ttc_status; - mvm->last_bt_notif.rrc_status = v4->rrc_status; - } else { - /* save this notification for future use: rssi fluctuations */ - memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif)); - } - IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n"); - IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", - mvm->last_bt_notif.bt_ci_compliance); + IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance); IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n", - le32_to_cpu(mvm->last_bt_notif.primary_ch_lut)); + le32_to_cpu(notif->primary_ch_lut)); IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n", - le32_to_cpu(mvm->last_bt_notif.secondary_ch_lut)); + le32_to_cpu(notif->secondary_ch_lut)); IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n", - le32_to_cpu(mvm->last_bt_notif.bt_activity_grading)); + le32_to_cpu(notif->bt_activity_grading)); + /* remember this notification for future use: rssi fluctuations */ + memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif)); iwl_mvm_bt_coex_notif_handle(mvm); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index b1f73dcabd31..0e6cf39285f4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -429,231 +429,6 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm, return err; } -enum iwl_mvm_tcp_packet_type { - MVM_TCP_TX_SYN, - MVM_TCP_RX_SYNACK, - MVM_TCP_TX_DATA, - MVM_TCP_RX_ACK, - MVM_TCP_RX_WAKE, - MVM_TCP_TX_FIN, -}; - -static __le16 pseudo_hdr_check(int len, __be32 saddr, __be32 daddr) -{ - __sum16 check = tcp_v4_check(len, saddr, daddr, 0); - return cpu_to_le16(be16_to_cpu((__force __be16)check)); -} - -static void iwl_mvm_build_tcp_packet(struct ieee80211_vif *vif, - struct cfg80211_wowlan_tcp *tcp, - void *_pkt, u8 *mask, - __le16 *pseudo_hdr_csum, - enum iwl_mvm_tcp_packet_type ptype) -{ - struct { - struct ethhdr eth; - struct iphdr ip; - struct tcphdr tcp; - u8 data[]; - } __packed *pkt = _pkt; - u16 ip_tot_len = sizeof(struct iphdr) + sizeof(struct tcphdr); - int i; - - pkt->eth.h_proto = cpu_to_be16(ETH_P_IP), - pkt->ip.version = 4; - pkt->ip.ihl = 5; - pkt->ip.protocol = IPPROTO_TCP; - - switch (ptype) { - case MVM_TCP_TX_SYN: - case MVM_TCP_TX_DATA: - case MVM_TCP_TX_FIN: - memcpy(pkt->eth.h_dest, tcp->dst_mac, ETH_ALEN); - memcpy(pkt->eth.h_source, vif->addr, ETH_ALEN); - pkt->ip.ttl = 128; - pkt->ip.saddr = tcp->src; - pkt->ip.daddr = tcp->dst; - pkt->tcp.source = cpu_to_be16(tcp->src_port); - pkt->tcp.dest = cpu_to_be16(tcp->dst_port); - /* overwritten for TX SYN later */ - pkt->tcp.doff = sizeof(struct tcphdr) / 4; - pkt->tcp.window = cpu_to_be16(65000); - break; - case MVM_TCP_RX_SYNACK: - case MVM_TCP_RX_ACK: - case MVM_TCP_RX_WAKE: - memcpy(pkt->eth.h_dest, vif->addr, ETH_ALEN); - memcpy(pkt->eth.h_source, tcp->dst_mac, ETH_ALEN); - pkt->ip.saddr = tcp->dst; - pkt->ip.daddr = tcp->src; - pkt->tcp.source = cpu_to_be16(tcp->dst_port); - pkt->tcp.dest = cpu_to_be16(tcp->src_port); - break; - default: - WARN_ON(1); - return; - } - - switch (ptype) { - case MVM_TCP_TX_SYN: - /* firmware assumes 8 option bytes - 8 NOPs for now */ - memset(pkt->data, 0x01, 8); - ip_tot_len += 8; - pkt->tcp.doff = (sizeof(struct tcphdr) + 8) / 4; - pkt->tcp.syn = 1; - break; - case MVM_TCP_TX_DATA: - ip_tot_len += tcp->payload_len; - memcpy(pkt->data, tcp->payload, tcp->payload_len); - pkt->tcp.psh = 1; - pkt->tcp.ack = 1; - break; - case MVM_TCP_TX_FIN: - pkt->tcp.fin = 1; - pkt->tcp.ack = 1; - break; - case MVM_TCP_RX_SYNACK: - pkt->tcp.syn = 1; - pkt->tcp.ack = 1; - break; - case MVM_TCP_RX_ACK: - pkt->tcp.ack = 1; - break; - case MVM_TCP_RX_WAKE: - ip_tot_len += tcp->wake_len; - pkt->tcp.psh = 1; - pkt->tcp.ack = 1; - memcpy(pkt->data, tcp->wake_data, tcp->wake_len); - break; - } - - switch (ptype) { - case MVM_TCP_TX_SYN: - case MVM_TCP_TX_DATA: - case MVM_TCP_TX_FIN: - pkt->ip.tot_len = cpu_to_be16(ip_tot_len); - pkt->ip.check = ip_fast_csum(&pkt->ip, pkt->ip.ihl); - break; - case MVM_TCP_RX_WAKE: - for (i = 0; i < DIV_ROUND_UP(tcp->wake_len, 8); i++) { - u8 tmp = tcp->wake_mask[i]; - mask[i + 6] |= tmp << 6; - if (i + 1 < DIV_ROUND_UP(tcp->wake_len, 8)) - mask[i + 7] = tmp >> 2; - } - /* fall through for ethernet/IP/TCP headers mask */ - case MVM_TCP_RX_SYNACK: - case MVM_TCP_RX_ACK: - mask[0] = 0xff; /* match ethernet */ - /* - * match ethernet, ip.version, ip.ihl - * the ip.ihl half byte is really masked out by firmware - */ - mask[1] = 0x7f; - mask[2] = 0x80; /* match ip.protocol */ - mask[3] = 0xfc; /* match ip.saddr, ip.daddr */ - mask[4] = 0x3f; /* match ip.daddr, tcp.source, tcp.dest */ - mask[5] = 0x80; /* match tcp flags */ - /* leave rest (0 or set for MVM_TCP_RX_WAKE) */ - break; - }; - - *pseudo_hdr_csum = pseudo_hdr_check(ip_tot_len - sizeof(struct iphdr), - pkt->ip.saddr, pkt->ip.daddr); -} - -static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct cfg80211_wowlan_tcp *tcp) -{ - struct iwl_wowlan_remote_wake_config *cfg; - struct iwl_host_cmd cmd = { - .id = REMOTE_WAKE_CONFIG_CMD, - .len = { sizeof(*cfg), }, - .dataflags = { IWL_HCMD_DFL_NOCOPY, }, - }; - int ret; - - if (!tcp) - return 0; - - cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); - if (!cfg) - return -ENOMEM; - cmd.data[0] = cfg; - - cfg->max_syn_retries = 10; - cfg->max_data_retries = 10; - cfg->tcp_syn_ack_timeout = 1; /* seconds */ - cfg->tcp_ack_timeout = 1; /* seconds */ - - /* SYN (TX) */ - iwl_mvm_build_tcp_packet( - vif, tcp, cfg->syn_tx.data, NULL, - &cfg->syn_tx.info.tcp_pseudo_header_checksum, - MVM_TCP_TX_SYN); - cfg->syn_tx.info.tcp_payload_length = 0; - - /* SYN/ACK (RX) */ - iwl_mvm_build_tcp_packet( - vif, tcp, cfg->synack_rx.data, cfg->synack_rx.rx_mask, - &cfg->synack_rx.info.tcp_pseudo_header_checksum, - MVM_TCP_RX_SYNACK); - cfg->synack_rx.info.tcp_payload_length = 0; - - /* KEEPALIVE/ACK (TX) */ - iwl_mvm_build_tcp_packet( - vif, tcp, cfg->keepalive_tx.data, NULL, - &cfg->keepalive_tx.info.tcp_pseudo_header_checksum, - MVM_TCP_TX_DATA); - cfg->keepalive_tx.info.tcp_payload_length = - cpu_to_le16(tcp->payload_len); - cfg->sequence_number_offset = tcp->payload_seq.offset; - /* length must be 0..4, the field is little endian */ - cfg->sequence_number_length = tcp->payload_seq.len; - cfg->initial_sequence_number = cpu_to_le32(tcp->payload_seq.start); - cfg->keepalive_interval = cpu_to_le16(tcp->data_interval); - if (tcp->payload_tok.len) { - cfg->token_offset = tcp->payload_tok.offset; - cfg->token_length = tcp->payload_tok.len; - cfg->num_tokens = - cpu_to_le16(tcp->tokens_size % tcp->payload_tok.len); - memcpy(cfg->tokens, tcp->payload_tok.token_stream, - tcp->tokens_size); - } else { - /* set tokens to max value to almost never run out */ - cfg->num_tokens = cpu_to_le16(65535); - } - - /* ACK (RX) */ - iwl_mvm_build_tcp_packet( - vif, tcp, cfg->keepalive_ack_rx.data, - cfg->keepalive_ack_rx.rx_mask, - &cfg->keepalive_ack_rx.info.tcp_pseudo_header_checksum, - MVM_TCP_RX_ACK); - cfg->keepalive_ack_rx.info.tcp_payload_length = 0; - - /* WAKEUP (RX) */ - iwl_mvm_build_tcp_packet( - vif, tcp, cfg->wake_rx.data, cfg->wake_rx.rx_mask, - &cfg->wake_rx.info.tcp_pseudo_header_checksum, - MVM_TCP_RX_WAKE); - cfg->wake_rx.info.tcp_payload_length = - cpu_to_le16(tcp->wake_len); - - /* FIN */ - iwl_mvm_build_tcp_packet( - vif, tcp, cfg->fin_tx.data, NULL, - &cfg->fin_tx.info.tcp_pseudo_header_checksum, - MVM_TCP_TX_FIN); - cfg->fin_tx.info.tcp_payload_length = 0; - - ret = iwl_mvm_send_cmd(mvm, &cmd); - kfree(cfg); - - return ret; -} - static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *ap_sta) { @@ -1082,12 +857,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm, if (ret) return ret; - ret = iwl_mvm_send_proto_offload(mvm, vif, false, true, 0); - if (ret) - return ret; - - ret = iwl_mvm_send_remote_wake_cfg(mvm, vif, wowlan->tcp); - return ret; + return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0); } static int diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 2ff594f11259..a7892c1254a2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -425,6 +425,50 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf, return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } +static ssize_t iwl_dbgfs_rs_data_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_sta *sta = file->private_data; + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; + struct iwl_mvm *mvm = lq_sta->pers.drv; + static const size_t bufsz = 2048; + char *buff; + int desc = 0; + ssize_t ret; + + buff = kmalloc(bufsz, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + mutex_lock(&mvm->mutex); + + desc += scnprintf(buff + desc, bufsz - desc, "sta_id %d\n", + lq_sta->pers.sta_id); + desc += scnprintf(buff + desc, bufsz - desc, + "fixed rate 0x%X\n", + lq_sta->pers.dbg_fixed_rate); + desc += scnprintf(buff + desc, bufsz - desc, + "A-MPDU size limit %d\n", + lq_sta->pers.dbg_agg_frame_count_lim); + desc += scnprintf(buff + desc, bufsz - desc, + "valid_tx_ant %s%s%s\n", + (iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "", + (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "", + (iwl_mvm_get_valid_tx_ant(mvm) & ANT_C) ? "ANT_C" : ""); + desc += scnprintf(buff + desc, bufsz - desc, + "last tx rate=0x%X ", + lq_sta->last_rate_n_flags); + + desc += rs_pretty_print_rate(buff + desc, bufsz - desc, + lq_sta->last_rate_n_flags); + mutex_unlock(&mvm->mutex); + + ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); + kfree(buff); + return ret; +} + static ssize_t iwl_dbgfs_disable_power_off_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -470,8 +514,7 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf, } static -int iwl_mvm_coex_dump_mbox(struct iwl_mvm *mvm, - struct iwl_bt_coex_profile_notif *notif, char *buf, +int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf, int pos, int bufsz) { pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n"); @@ -525,12 +568,7 @@ int iwl_mvm_coex_dump_mbox(struct iwl_mvm *mvm, BT_MBOX_PRINT(3, INBAND_P, false); BT_MBOX_PRINT(3, MSG_TYPE_2, false); BT_MBOX_PRINT(3, SSN_2, false); - BT_MBOX_PRINT(3, UPDATE_REQUEST, !iwl_mvm_has_new_ats_coex_api(mvm)); - - if (iwl_mvm_has_new_ats_coex_api(mvm)) { - BT_MBOX_PRINT(4, ATS_BT_INTERVAL, false); - BT_MBOX_PRINT(4, ATS_BT_ACTIVE_MAX_TH, true); - } + BT_MBOX_PRINT(3, UPDATE_REQUEST, true); return pos; } @@ -549,7 +587,7 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf, mutex_lock(&mvm->mutex); - pos += iwl_mvm_coex_dump_mbox(mvm, notif, buf, pos, bufsz); + pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz); pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n", notif->bt_ci_compliance); @@ -721,6 +759,9 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file, mutex_lock(&mvm->mutex); + if (iwl_mvm_firmware_running(mvm)) + iwl_mvm_request_statistics(mvm, false); + pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - OFDM"); if (!iwl_mvm_has_new_rx_stats_api(mvm)) { @@ -936,7 +977,8 @@ static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm, continue; pos += scnprintf(pos, endpos - pos, "Rate[%d]: ", (int)(ARRAY_SIZE(stats->last_rates) - i)); - pos += rs_pretty_print_rate(pos, stats->last_rates[idx]); + pos += rs_pretty_print_rate(pos, endpos - pos, + stats->last_rates[idx]); } spin_unlock_bh(&mvm->drv_stats_lock); @@ -1179,7 +1221,7 @@ static ssize_t iwl_dbgfs_cont_recording_write(struct iwl_mvm *mvm, loff_t *ppos) { struct iwl_trans *trans = mvm->trans; - const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv; + const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg_dest_tlv; struct iwl_continuous_record_cmd cont_rec = {}; int ret, rec_mode; @@ -1603,6 +1645,19 @@ static ssize_t iwl_dbgfs_d0i3_refs_write(struct iwl_mvm *mvm, char *buf, #define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \ MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode) +#define MVM_DEBUGFS_WRITE_STA_FILE_OPS(name, bufsz) \ + _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta) +#define MVM_DEBUGFS_READ_WRITE_STA_FILE_OPS(name, bufsz) \ + _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta) + +#define MVM_DEBUGFS_ADD_STA_FILE_ALIAS(alias, name, parent, mode) do { \ + if (!debugfs_create_file(alias, mode, parent, sta, \ + &iwl_dbgfs_##name##_ops)) \ + goto err; \ + } while (0) +#define MVM_DEBUGFS_ADD_STA_FILE(name, parent, mode) \ + MVM_DEBUGFS_ADD_STA_FILE_ALIAS(#name, name, parent, mode) + static ssize_t iwl_dbgfs_prph_reg_read(struct file *file, char __user *user_buf, @@ -1687,6 +1742,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64); MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64); MVM_DEBUGFS_READ_FILE_OPS(nic_temp); MVM_DEBUGFS_READ_FILE_OPS(stations); +MVM_DEBUGFS_READ_FILE_OPS(rs_data); MVM_DEBUGFS_READ_FILE_OPS(bt_notif); MVM_DEBUGFS_READ_FILE_OPS(bt_cmd); MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64); @@ -1851,6 +1907,21 @@ static const struct file_operations iwl_dbgfs_mem_ops = { .llseek = default_llseek, }; +void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct dentry *dir) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + if (iwl_mvm_has_tlc_offload(mvm)) + MVM_DEBUGFS_ADD_STA_FILE(rs_data, dir, S_IRUSR); + + return; +err: + IWL_ERR(mvm, "Can't create the mvm station debugfs entry\n"); +} + int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) { struct dentry *bcast_dir __maybe_unused; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index c0de7bb86cf7..0920be637b57 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -297,7 +297,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, if (ret) { struct iwl_trans *trans = mvm->trans; - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_A000) + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22000) IWL_ERR(mvm, "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS), @@ -923,11 +923,11 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm) ret = iwl_run_init_mvm_ucode(mvm, false); - if (iwlmvm_mod_params.init_dbg) - return 0; - if (ret) { IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); + + if (iwlmvm_mod_params.init_dbg) + return 0; return ret; } @@ -998,9 +998,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm) goto error; /* Init RSS configuration */ - /* TODO - remove a000 disablement when we have RXQ config API */ + /* TODO - remove 22000 disablement when we have RXQ config API */ if (iwl_mvm_has_new_rx_api(mvm) && - mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_A000) { + mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_22000) { ret = iwl_send_rss_cfg_cmd(mvm); if (ret) { IWL_ERR(mvm, "Failed to configure RSS queues: %d\n", @@ -1111,7 +1111,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); return 0; error: - if (!iwlmvm_mod_params.init_dbg) + if (!iwlmvm_mod_params.init_dbg || !ret) iwl_mvm_stop_device(mvm); return ret; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 3e92a117c0b8..8aed40a8bc38 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -114,29 +114,6 @@ static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = { }, }; -#ifdef CONFIG_PM_SLEEP -static const struct nl80211_wowlan_tcp_data_token_feature -iwl_mvm_wowlan_tcp_token_feature = { - .min_len = 0, - .max_len = 255, - .bufsize = IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS, -}; - -static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = { - .tok = &iwl_mvm_wowlan_tcp_token_feature, - .data_payload_max = IWL_WOWLAN_TCP_MAX_PACKET_LEN - - sizeof(struct ethhdr) - - sizeof(struct iphdr) - - sizeof(struct tcphdr), - .data_interval_max = 65535, /* __le16 in API */ - .wake_payload_max = IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN - - sizeof(struct ethhdr) - - sizeof(struct iphdr) - - sizeof(struct tcphdr), - .seq = true, -}; -#endif - #ifdef CONFIG_IWLWIFI_BCAST_FILTERING /* * Use the reserved field to indicate magic values. @@ -443,6 +420,12 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); + + if (iwl_mvm_has_tlc_offload(mvm)) { + ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW); + ieee80211_hw_set(hw, HAS_RATE_CONTROL); + } + if (iwl_mvm_has_new_rx_api(mvm)) ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); @@ -477,7 +460,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) /* this is the case for CCK frames, it's better (only 8) for OFDM */ hw->radiotap_timestamp.accuracy = 22; - hw->rate_control_algorithm = "iwl-mvm-rs"; + if (!iwl_mvm_has_tlc_offload(mvm)) + hw->rate_control_algorithm = RS_NAME; + hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES; hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; @@ -702,7 +687,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES; - mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support; hw->wiphy->wowlan = &mvm->wowlan; } #endif @@ -3216,6 +3200,10 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw, IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value, duration, type); + /* + * Flush the done work, just in case it's still pending, so that + * the work it does can complete and we can accept new frames. + */ flush_work(&mvm->roc_done_wk); mutex_lock(&mvm->mutex); @@ -3813,7 +3801,7 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm, mvm->noa_duration = noa_duration; mvm->noa_vif = vif; - return iwl_mvm_update_quotas(mvm, false, NULL); + return iwl_mvm_update_quotas(mvm, true, NULL); case IWL_MVM_TM_CMD_SET_BEACON_FILTER: /* must be associated client vif - ignore authorized */ if (!vif || vif->type != NL80211_IFTYPE_STATION || @@ -4301,7 +4289,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, mvm->trans->num_rx_queues); /* TODO - remove this when we have RXQ config API */ - if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_A000) { + if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_22000) { qmask = BIT(0); if (notif->sync) atomic_set(&mvm->queue_sync_counter, 1); @@ -4414,4 +4402,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = { #endif .get_survey = iwl_mvm_mac_get_survey, .sta_statistics = iwl_mvm_mac_sta_statistics, +#ifdef CONFIG_IWLWIFI_DEBUGFS + .sta_add_debugfs = iwl_mvm_sta_add_debugfs, +#endif }; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 55ab5349dd40..2d28e0804218 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1248,7 +1248,7 @@ static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm) static inline bool iwl_mvm_has_unified_ucode(struct iwl_mvm *mvm) { /* TODO - better define this */ - return mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_A000; + return mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000; } static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm) @@ -1272,16 +1272,16 @@ static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm) IWL_UCODE_TLV_API_NEW_RX_STATS); } -static inline bool iwl_mvm_has_new_ats_coex_api(struct iwl_mvm *mvm) +static inline bool iwl_mvm_has_quota_low_latency(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL); + IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY); } -static inline bool iwl_mvm_has_quota_low_latency(struct iwl_mvm *mvm) +static inline bool iwl_mvm_has_tlc_offload(const struct iwl_mvm *mvm) { - return fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY); + return fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_TLC_OFFLOAD); } static inline struct agg_tx_status * @@ -1600,9 +1600,9 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif) /* rate scaling */ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init); void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg); -int rs_pretty_print_rate(char *buf, const u32 rate); +int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate); void rs_update_last_rssi(struct iwl_mvm *mvm, - struct iwl_lq_sta *lq_sta, + struct iwl_mvm_sta *mvmsta, struct ieee80211_rx_status *rx_status); /* power management */ @@ -1882,5 +1882,11 @@ void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b); int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm); +#ifdef CONFIG_IWLWIFI_DEBUGFS +void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct dentry *dir); +#endif #endif /* __IWL_MVM_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 45470b6b351a..5d525a0023dc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -127,11 +127,8 @@ static int __init iwl_mvm_init(void) } ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops); - - if (ret) { + if (ret) pr_err("Unable to register MVM op_mode: %d\n", ret); - iwl_mvm_rate_control_unregister(); - } return ret; } @@ -605,7 +602,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->fw = fw; mvm->hw = hw; - iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm); + iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm, + dbgfs_dir); mvm->init_status = 0; @@ -750,7 +748,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mutex_lock(&mvm->mutex); iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE); err = iwl_run_init_mvm_ucode(mvm, true); - if (!iwlmvm_mod_params.init_dbg) + if (!iwlmvm_mod_params.init_dbg || !err) iwl_mvm_stop_device(mvm); iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE); mutex_unlock(&mvm->mutex); @@ -804,6 +802,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_mvm_leds_exit(mvm); iwl_mvm_thermal_exit(mvm); out_free: + iwl_fw_runtime_exit(&mvm->fwrt); iwl_fw_flush_dump(&mvm->fwrt); if (iwlmvm_mod_params.init_dbg) @@ -844,7 +843,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS) kfree(mvm->d3_resume_sram); #endif - + iwl_fw_runtime_exit(&mvm->fwrt); iwl_trans_op_mode_leave(mvm->trans); iwl_phy_db_free(mvm->phy_db); @@ -1021,6 +1020,8 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode, iwl_mvm_rx_queue_notif(mvm, rxb, 0); else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)) iwl_mvm_rx_frame_release(mvm, napi, rxb, 0); + else if (cmd == WIDE_ID(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF)) + iwl_mvm_tlc_update_notif(mvm, pkt); else iwl_mvm_rx_common(mvm, rxb, pkt); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c index b4a0264329b7..690559bdf421 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c @@ -202,6 +202,10 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) + return 0; + /* update all upon completion */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) return 0; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c new file mode 100644 index 000000000000..55d1274c6092 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -0,0 +1,314 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include "rs.h" +#include "fw-api.h" +#include "sta.h" +#include "iwl-op-mode.h" +#include "mvm.h" + +static u8 rs_fw_bw_from_sta_bw(struct ieee80211_sta *sta) +{ + switch (sta->bandwidth) { + case IEEE80211_STA_RX_BW_160: + return IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ; + case IEEE80211_STA_RX_BW_80: + return IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ; + case IEEE80211_STA_RX_BW_40: + return IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ; + case IEEE80211_STA_RX_BW_20: + default: + return IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ; + } +} + +static u8 rs_fw_set_active_chains(u8 chains) +{ + u8 fw_chains = 0; + + if (chains & ANT_A) + fw_chains |= IWL_TLC_MNG_CHAIN_A_MSK; + if (chains & ANT_B) + fw_chains |= IWL_TLC_MNG_CHAIN_B_MSK; + if (chains & ANT_C) + fw_chains |= IWL_TLC_MNG_CHAIN_C_MSK; + + return fw_chains; +} + +static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta) +{ + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; + u8 supp = 0; + + if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) + supp |= IWL_TLC_MNG_SGI_20MHZ_MSK; + if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40) + supp |= IWL_TLC_MNG_SGI_40MHZ_MSK; + if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80) + supp |= IWL_TLC_MNG_SGI_80MHZ_MSK; + if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_160) + supp |= IWL_TLC_MNG_SGI_160MHZ_MSK; + + return supp; +} + +static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm, + struct ieee80211_sta *sta) +{ + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; + bool vht_ena = vht_cap && vht_cap->vht_supported; + u16 flags = IWL_TLC_MNG_CFG_FLAGS_CCK_MSK | + IWL_TLC_MNG_CFG_FLAGS_DCM_MSK | + IWL_TLC_MNG_CFG_FLAGS_DD_MSK; + + if (mvm->cfg->ht_params->stbc && + (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) && + ((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)) || + (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)))) + flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK; + + if (mvm->cfg->ht_params->ldpc && + ((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)) || + (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC)))) + flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK; + + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) && + (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) && + (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) + flags |= IWL_TLC_MNG_CFG_FLAGS_BF_MSK; + + return flags; +} + +static +int rs_fw_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap, + int nss) +{ + u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) & + (0x3 << (2 * (nss - 1))); + rx_mcs >>= (2 * (nss - 1)); + + switch (rx_mcs) { + case IEEE80211_VHT_MCS_SUPPORT_0_7: + return IWL_TLC_MNG_HT_RATE_MCS7; + case IEEE80211_VHT_MCS_SUPPORT_0_8: + return IWL_TLC_MNG_HT_RATE_MCS8; + case IEEE80211_VHT_MCS_SUPPORT_0_9: + return IWL_TLC_MNG_HT_RATE_MCS9; + default: + WARN_ON_ONCE(1); + break; + } + + return 0; +} + +static void rs_fw_vht_set_enabled_rates(struct ieee80211_sta *sta, + struct ieee80211_sta_vht_cap *vht_cap, + struct iwl_tlc_config_cmd *cmd) +{ + u16 supp; + int i, highest_mcs; + + for (i = 0; i < sta->rx_nss; i++) { + if (i == MAX_RS_ANT_NUM) + break; + + highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, i + 1); + if (!highest_mcs) + continue; + + supp = BIT(highest_mcs + 1) - 1; + if (sta->bandwidth == IEEE80211_STA_RX_BW_20) + supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9); + + cmd->ht_supp_rates[i] = cpu_to_le16(supp); + } +} + +static void rs_fw_set_supp_rates(struct ieee80211_sta *sta, + struct ieee80211_supported_band *sband, + struct iwl_tlc_config_cmd *cmd) +{ + int i; + unsigned long tmp; + unsigned long supp; /* must be unsigned long for for_each_set_bit */ + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; + + /* non HT rates */ + supp = 0; + tmp = sta->supp_rates[sband->band]; + for_each_set_bit(i, &tmp, BITS_PER_LONG) + supp |= BIT(sband->bitrates[i].hw_value); + + cmd->non_ht_supp_rates = cpu_to_le16(supp); + cmd->mode = IWL_TLC_MNG_MODE_NON_HT; + + /* HT/VHT rates */ + if (vht_cap && vht_cap->vht_supported) { + cmd->mode = IWL_TLC_MNG_MODE_VHT; + rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd); + } else if (ht_cap && ht_cap->ht_supported) { + cmd->mode = IWL_TLC_MNG_MODE_HT; + cmd->ht_supp_rates[0] = cpu_to_le16(ht_cap->mcs.rx_mask[0]); + cmd->ht_supp_rates[1] = cpu_to_le16(ht_cap->mcs.rx_mask[1]); + } +} + +static void rs_fw_tlc_mng_notif_req_config(struct iwl_mvm *mvm, u8 sta_id) +{ + u32 cmd_id = iwl_cmd_id(TLC_MNG_NOTIF_REQ_CMD, DATA_PATH_GROUP, 0); + struct iwl_tlc_notif_req_config_cmd cfg_cmd = { + .sta_id = sta_id, + .flags = cpu_to_le16(IWL_TLC_NOTIF_INIT_RATE_MSK), + .interval = cpu_to_le16(IWL_TLC_NOTIF_REQ_INTERVAL), + }; + int ret; + + ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cfg_cmd), &cfg_cmd); + if (ret) + IWL_ERR(mvm, "Failed to send TLC notif request (%d)\n", ret); +} + +void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) +{ + struct iwl_tlc_update_notif *notif; + struct iwl_mvm_sta *mvmsta; + struct iwl_lq_sta_rs_fw *lq_sta; + + notif = (void *)pkt->data; + mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, notif->sta_id); + + if (!mvmsta) { + IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n", + notif->sta_id); + return; + } + + lq_sta = &mvmsta->lq_sta.rs_fw; + + if (le16_to_cpu(notif->flags) & IWL_TLC_NOTIF_INIT_RATE_MSK) { + lq_sta->last_rate_n_flags = + le32_to_cpu(notif->values[IWL_TLC_NOTIF_INIT_RATE_POS]); + IWL_DEBUG_RATE(mvm, "new rate_n_flags: 0x%X\n", + lq_sta->last_rate_n_flags); + } +} + +void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + enum nl80211_band band) +{ + struct ieee80211_hw *hw = mvm->hw; + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; + u32 cmd_id = iwl_cmd_id(TLC_MNG_CONFIG_CMD, DATA_PATH_GROUP, 0); + struct ieee80211_supported_band *sband; + struct iwl_tlc_config_cmd cfg_cmd = { + .sta_id = mvmsta->sta_id, + .max_supp_ch_width = rs_fw_bw_from_sta_bw(sta), + .flags = cpu_to_le16(rs_fw_set_config_flags(mvm, sta)), + .chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)), + .max_supp_ss = sta->rx_nss, + .max_ampdu_cnt = cpu_to_le32(mvmsta->max_agg_bufsize), + .sgi_ch_width_supp = rs_fw_sgi_cw_support(sta), + }; + int ret; + + memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers)); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + iwl_mvm_reset_frame_stats(mvm); +#endif + sband = hw->wiphy->bands[band]; + rs_fw_set_supp_rates(sta, sband, &cfg_cmd); + + ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cfg_cmd), &cfg_cmd); + if (ret) + IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret); + + rs_fw_tlc_mng_notif_req_config(mvm, cfg_cmd.sta_id); +} + +int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, + bool enable) +{ + /* TODO: need to introduce a new FW cmd since LQ cmd is not relevant */ + IWL_DEBUG_RATE(mvm, "tx protection - not implemented yet.\n"); + return 0; +} + +void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta) +{ + struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; + + IWL_DEBUG_RATE(mvm, "create station rate scale window\n"); + + lq_sta->pers.drv = mvm; + lq_sta->pers.sta_id = mvmsta->sta_id; + lq_sta->pers.chains = 0; + memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal)); + lq_sta->pers.last_rssi = S8_MIN; + lq_sta->last_rate_n_flags = 0; + +#ifdef CONFIG_MAC80211_DEBUGFS + lq_sta->pers.dbg_fixed_rate = 0; +#endif +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index c69515ed72df..60abb0084ee5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -42,8 +42,6 @@ #include "mvm.h" #include "debugfs.h" -#define RS_NAME "iwl-mvm-rs" - #define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */ /* Calculations of success ratio are done in fixed point where 12800 is 100%. @@ -809,7 +807,7 @@ static int rs_collect_tlc_data(struct iwl_mvm *mvm, return -EINVAL; if (tbl->column != RS_COLUMN_INVALID) { - struct lq_sta_pers *pers = &mvmsta->lq_sta.pers; + struct lq_sta_pers *pers = &mvmsta->lq_sta.rs_drv.pers; pers->tx_stats[tbl->column][scale_index].total += attempts; pers->tx_stats[tbl->column][scale_index].success += successes; @@ -1206,7 +1204,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info); u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1]; struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta; + struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv; /* Treat uninitialized rate scaling data same as non-existing. */ if (!lq_sta) { @@ -1416,13 +1414,13 @@ done: /* * mac80211 sends us Tx status */ -static void rs_mac80211_tx_status(void *mvm_r, - struct ieee80211_supported_band *sband, - struct ieee80211_sta *sta, void *priv_sta, - struct sk_buff *skb) +static void rs_drv_mac80211_tx_status(void *mvm_r, + struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta, + struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_r; + struct iwl_op_mode *op_mode = mvm_r; struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); @@ -1877,12 +1875,10 @@ static int rs_switch_to_column(struct iwl_mvm *mvm, struct rs_rate *rate = &search_tbl->rate; const struct rs_tx_column *column = &rs_tx_columns[col_id]; const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column]; - u32 sz = (sizeof(struct iwl_scale_tbl_info) - - (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); unsigned long rate_mask = 0; u32 rate_idx = 0; - memcpy(search_tbl, tbl, sz); + memcpy(search_tbl, tbl, offsetof(struct iwl_scale_tbl_info, win)); rate->sgi = column->sgi; rate->ant = column->ant; @@ -2787,9 +2783,10 @@ out: /* Save info about RSSI of last Rx */ void rs_update_last_rssi(struct iwl_mvm *mvm, - struct iwl_lq_sta *lq_sta, + struct iwl_mvm_sta *mvmsta, struct ieee80211_rx_status *rx_status) { + struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv; int i; lq_sta->pers.chains = rx_status->chains; @@ -2858,15 +2855,15 @@ static void rs_initialize_lq(struct iwl_mvm *mvm, iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, init); } -static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta, - struct ieee80211_tx_rate_control *txrc) +static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta, + void *mvm_sta, + struct ieee80211_tx_rate_control *txrc) { - struct sk_buff *skb = txrc->skb; - struct iwl_op_mode *op_mode __maybe_unused = - (struct iwl_op_mode *)mvm_r; + struct iwl_op_mode *op_mode = mvm_r; struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode); + struct sk_buff *skb = txrc->skb; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct iwl_lq_sta *lq_sta = mvm_sta; + struct iwl_lq_sta *lq_sta; struct rs_rate *optimal_rate; u32 last_ucode_rate; @@ -2878,18 +2875,14 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta, mvm_sta = NULL; } - /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */ - - /* Treat uninitialized rate scaling data same as non-existing. */ - if (lq_sta && !lq_sta->pers.drv) { - IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n"); - mvm_sta = NULL; - } - /* Send management frames and NO_ACK data using lowest rate. */ if (rate_control_send_low(sta, mvm_sta, txrc)) return; + if (!mvm_sta) + return; + + lq_sta = mvm_sta; iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags, info->band, &info->control.rates[0]); info->control.rates[0].count = 1; @@ -2906,13 +2899,13 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta, } } -static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, - gfp_t gfp) +static void *rs_drv_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, + gfp_t gfp) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_rate; struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta; + struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv; IWL_DEBUG_RATE(mvm, "create station rate scale window\n"); @@ -2926,7 +2919,7 @@ static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal)); lq_sta->pers.last_rssi = S8_MIN; - return &mvmsta->lq_sta; + return lq_sta; } static int rs_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap, @@ -3043,7 +3036,7 @@ static void rs_vht_init(struct iwl_mvm *mvm, } #ifdef CONFIG_IWLWIFI_DEBUGFS -static void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm) +void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm) { spin_lock_bh(&mvm->drv_stats_lock); memset(&mvm->drv_rx_stats, 0, sizeof(mvm->drv_rx_stats)); @@ -3111,15 +3104,15 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg) /* * Called after adding a new station to initialize rate scaling */ -void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - enum nl80211_band band, bool init) +static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + enum nl80211_band band, bool init) { int i, j; struct ieee80211_hw *hw = mvm->hw; struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta; + struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv; struct ieee80211_supported_band *sband; unsigned long supp; /* must be unsigned long for for_each_set_bit */ @@ -3194,16 +3187,15 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, rs_initialize_lq(mvm, sta, lq_sta, band, init); } -static void rs_rate_update(void *mvm_r, - struct ieee80211_supported_band *sband, - struct cfg80211_chan_def *chandef, - struct ieee80211_sta *sta, void *priv_sta, - u32 changed) +static void rs_drv_rate_update(void *mvm_r, + struct ieee80211_supported_band *sband, + struct cfg80211_chan_def *chandef, + struct ieee80211_sta *sta, + void *priv_sta, u32 changed) { + struct iwl_op_mode *op_mode = mvm_r; + struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode); u8 tid; - struct iwl_op_mode *op_mode = - (struct iwl_op_mode *)mvm_r; - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); if (!iwl_mvm_sta_from_mac80211(sta)->vif) return; @@ -3385,7 +3377,7 @@ static void rs_bfer_active_iter(void *_data, { struct rs_bfer_active_iter_data *data = _data; struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - struct iwl_lq_cmd *lq_cmd = &mvmsta->lq_sta.lq; + struct iwl_lq_cmd *lq_cmd = &mvmsta->lq_sta.rs_drv.lq; u32 ss_params = le32_to_cpu(lq_cmd->ss_params); if (sta == data->exclude_sta) @@ -3497,7 +3489,8 @@ static void rs_set_lq_ss_params(struct iwl_mvm *mvm, /* Disallow BFER on another STA if active and we're a higher priority */ if (rs_bfer_priority_cmp(mvmsta, bfer_mvmsta) > 0) { - struct iwl_lq_cmd *bfersta_lq_cmd = &bfer_mvmsta->lq_sta.lq; + struct iwl_lq_cmd *bfersta_lq_cmd = + &bfer_mvmsta->lq_sta.rs_drv.lq; u32 bfersta_ss_params = le32_to_cpu(bfersta_lq_cmd->ss_params); bfersta_ss_params &= ~LQ_SS_BFER_ALLOWED; @@ -3569,14 +3562,14 @@ static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) { return hw->priv; } + /* rate scale requires free function to be implemented */ static void rs_free(void *mvm_rate) { return; } -static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta, - void *mvm_sta) +static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta) { struct iwl_op_mode *op_mode __maybe_unused = mvm_r; struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode); @@ -3586,7 +3579,7 @@ static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta, } #ifdef CONFIG_MAC80211_DEBUGFS -int rs_pretty_print_rate(char *buf, const u32 rate) +int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate) { char *type, *bw; @@ -3597,10 +3590,10 @@ int rs_pretty_print_rate(char *buf, const u32 rate) !(rate & RATE_MCS_VHT_MSK)) { int index = iwl_hwrate_to_plcp_idx(rate); - return sprintf(buf, "Legacy | ANT: %s Rate: %s Mbps\n", - rs_pretty_ant(ant), - index == IWL_RATE_INVALID ? "BAD" : - iwl_rate_mcs[index].mbps); + return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps\n", + rs_pretty_ant(ant), + index == IWL_RATE_INVALID ? "BAD" : + iwl_rate_mcs[index].mbps); } if (rate & RATE_MCS_VHT_MSK) { @@ -3634,12 +3627,13 @@ int rs_pretty_print_rate(char *buf, const u32 rate) bw = "BAD BW"; } - return sprintf(buf, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s\n", - type, rs_pretty_ant(ant), bw, mcs, nss, - (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ", - (rate & RATE_MCS_STBC_MSK) ? "STBC " : "", - (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "", - (rate & RATE_MCS_BF_MSK) ? "BF " : ""); + return scnprintf(buf, bufsz, + "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s\n", + type, rs_pretty_ant(ant), bw, mcs, nss, + (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ", + (rate & RATE_MCS_STBC_MSK) ? "STBC " : "", + (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "", + (rate & RATE_MCS_BF_MSK) ? "BF " : ""); } /** @@ -3696,65 +3690,70 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, int desc = 0; int i = 0; ssize_t ret; + static const size_t bufsz = 2048; struct iwl_lq_sta *lq_sta = file->private_data; struct iwl_mvm_sta *mvmsta = - container_of(lq_sta, struct iwl_mvm_sta, lq_sta); + container_of(lq_sta, struct iwl_mvm_sta, lq_sta.rs_drv); struct iwl_mvm *mvm; struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); struct rs_rate *rate = &tbl->rate; u32 ss_params; mvm = lq_sta->pers.drv; - buff = kmalloc(2048, GFP_KERNEL); + buff = kmalloc(bufsz, GFP_KERNEL); if (!buff) return -ENOMEM; - desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id); - desc += sprintf(buff+desc, "failed=%d success=%d rate=0%lX\n", - lq_sta->total_failed, lq_sta->total_success, - lq_sta->active_legacy_rate); - desc += sprintf(buff+desc, "fixed rate 0x%X\n", - lq_sta->pers.dbg_fixed_rate); - desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n", + desc += scnprintf(buff + desc, bufsz - desc, + "sta_id %d\n", lq_sta->lq.sta_id); + desc += scnprintf(buff + desc, bufsz - desc, + "failed=%d success=%d rate=0%lX\n", + lq_sta->total_failed, lq_sta->total_success, + lq_sta->active_legacy_rate); + desc += scnprintf(buff + desc, bufsz - desc, "fixed rate 0x%X\n", + lq_sta->pers.dbg_fixed_rate); + desc += scnprintf(buff + desc, bufsz - desc, "valid_tx_ant %s%s%s\n", (iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "", (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "", (iwl_mvm_get_valid_tx_ant(mvm) & ANT_C) ? "ANT_C" : ""); - desc += sprintf(buff+desc, "lq type %s\n", - (is_legacy(rate)) ? "legacy" : - is_vht(rate) ? "VHT" : "HT"); + desc += scnprintf(buff + desc, bufsz - desc, "lq type %s\n", + (is_legacy(rate)) ? "legacy" : + is_vht(rate) ? "VHT" : "HT"); if (!is_legacy(rate)) { - desc += sprintf(buff + desc, " %s", + desc += scnprintf(buff + desc, bufsz - desc, " %s", (is_siso(rate)) ? "SISO" : "MIMO2"); - desc += sprintf(buff + desc, " %s", + desc += scnprintf(buff + desc, bufsz - desc, " %s", (is_ht20(rate)) ? "20MHz" : (is_ht40(rate)) ? "40MHz" : (is_ht80(rate)) ? "80MHz" : (is_ht160(rate)) ? "160MHz" : "BAD BW"); - desc += sprintf(buff + desc, " %s %s %s %s\n", + desc += scnprintf(buff + desc, bufsz - desc, " %s %s %s %s\n", (rate->sgi) ? "SGI" : "NGI", (rate->ldpc) ? "LDPC" : "BCC", (lq_sta->is_agg) ? "AGG on" : "", (mvmsta->tlc_amsdu) ? "AMSDU on" : ""); } - desc += sprintf(buff+desc, "last tx rate=0x%X\n", + desc += scnprintf(buff + desc, bufsz - desc, "last tx rate=0x%X\n", lq_sta->last_rate_n_flags); - desc += sprintf(buff+desc, + desc += scnprintf(buff + desc, bufsz - desc, "general: flags=0x%X mimo-d=%d s-ant=0x%x d-ant=0x%x\n", lq_sta->lq.flags, lq_sta->lq.mimo_delim, lq_sta->lq.single_stream_ant_msk, lq_sta->lq.dual_stream_ant_msk); - desc += sprintf(buff+desc, + desc += scnprintf(buff + desc, bufsz - desc, "agg: time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n", le16_to_cpu(lq_sta->lq.agg_time_limit), lq_sta->lq.agg_disable_start_th, lq_sta->lq.agg_frame_cnt_limit); - desc += sprintf(buff+desc, "reduced tpc=%d\n", lq_sta->lq.reduced_tpc); + desc += scnprintf(buff + desc, bufsz - desc, "reduced tpc=%d\n", + lq_sta->lq.reduced_tpc); ss_params = le32_to_cpu(lq_sta->lq.ss_params); - desc += sprintf(buff+desc, "single stream params: %s%s%s%s\n", + desc += scnprintf(buff + desc, bufsz - desc, + "single stream params: %s%s%s%s\n", (ss_params & LQ_SS_PARAMS_VALID) ? "VALID" : "INVALID", (ss_params & LQ_SS_BFER_ALLOWED) ? @@ -3763,7 +3762,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, ", STBC" : "", (ss_params & LQ_SS_FORCE) ? ", FORCE" : ""); - desc += sprintf(buff+desc, + desc += scnprintf(buff + desc, bufsz - desc, "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n", lq_sta->lq.initial_rate_index[0], lq_sta->lq.initial_rate_index[1], @@ -3773,8 +3772,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { u32 r = le32_to_cpu(lq_sta->lq.rs_table[i]); - desc += sprintf(buff+desc, " rate[%d] 0x%X ", i, r); - desc += rs_pretty_print_rate(buff+desc, r); + desc += scnprintf(buff + desc, bufsz - desc, + " rate[%d] 0x%X ", i, r); + desc += rs_pretty_print_rate(buff + desc, bufsz - desc, r); } ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); @@ -3987,12 +3987,13 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf, MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32); -static void rs_add_debugfs(void *mvm, void *priv_sta, struct dentry *dir) +static void rs_drv_add_sta_debugfs(void *mvm, void *priv_sta, + struct dentry *dir) { struct iwl_lq_sta *lq_sta = priv_sta; struct iwl_mvm_sta *mvmsta; - mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta); + mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta.rs_drv); if (!mvmsta->vif) return; @@ -4014,7 +4015,7 @@ err: IWL_ERR((struct iwl_mvm *)mvm, "Can't create debugfs entity\n"); } -static void rs_remove_debugfs(void *mvm, void *mvm_sta) +void rs_remove_sta_debugfs(void *mvm, void *mvm_sta) { } #endif @@ -4024,50 +4025,53 @@ static void rs_remove_debugfs(void *mvm, void *mvm_sta) * the station is added. Since mac80211 calls this function before a * station is added we ignore it. */ -static void rs_rate_init_stub(void *mvm_r, - struct ieee80211_supported_band *sband, - struct cfg80211_chan_def *chandef, - struct ieee80211_sta *sta, void *mvm_sta) +static void rs_rate_init_ops(void *mvm_r, + struct ieee80211_supported_band *sband, + struct cfg80211_chan_def *chandef, + struct ieee80211_sta *sta, void *mvm_sta) { } -static const struct rate_control_ops rs_mvm_ops = { +/* ops for rate scaling implemented in the driver */ +static const struct rate_control_ops rs_mvm_ops_drv = { .name = RS_NAME, - .tx_status = rs_mac80211_tx_status, - .get_rate = rs_get_rate, - .rate_init = rs_rate_init_stub, + .tx_status = rs_drv_mac80211_tx_status, + .get_rate = rs_drv_get_rate, + .rate_init = rs_rate_init_ops, .alloc = rs_alloc, .free = rs_free, - .alloc_sta = rs_alloc_sta, + .alloc_sta = rs_drv_alloc_sta, .free_sta = rs_free_sta, - .rate_update = rs_rate_update, + .rate_update = rs_drv_rate_update, #ifdef CONFIG_MAC80211_DEBUGFS - .add_sta_debugfs = rs_add_debugfs, - .remove_sta_debugfs = rs_remove_debugfs, + .add_sta_debugfs = rs_drv_add_sta_debugfs, + .remove_sta_debugfs = rs_remove_sta_debugfs, #endif }; +void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + enum nl80211_band band, bool init) +{ + if (iwl_mvm_has_tlc_offload(mvm)) + rs_fw_rate_init(mvm, sta, band); + else + rs_drv_rate_init(mvm, sta, band, init); +} + int iwl_mvm_rate_control_register(void) { - return ieee80211_rate_control_register(&rs_mvm_ops); + return ieee80211_rate_control_register(&rs_mvm_ops_drv); } void iwl_mvm_rate_control_unregister(void) { - ieee80211_rate_control_unregister(&rs_mvm_ops); + ieee80211_rate_control_unregister(&rs_mvm_ops_drv); } -/** - * iwl_mvm_tx_protection - Gets LQ command, change it to enable/disable - * Tx protection, according to this request and previous requests, - * and send the LQ command. - * @mvmsta: The station - * @enable: Enable Tx protection? - */ -int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, - bool enable) +static int rs_drv_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, + bool enable) { - struct iwl_lq_cmd *lq = &mvmsta->lq_sta.lq; + struct iwl_lq_cmd *lq = &mvmsta->lq_sta.rs_drv.lq; lockdep_assert_held(&mvm->mutex); @@ -4083,3 +4087,17 @@ int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, return iwl_mvm_send_lq_cmd(mvm, lq, false); } + +/** + * iwl_mvm_tx_protection - ask FW to enable RTS/CTS protection + * @mvmsta: The station + * @enable: Enable Tx protection? + */ +int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, + bool enable) +{ + if (iwl_mvm_has_tlc_offload(mvm)) + return rs_fw_tx_protection(mvm, mvmsta, enable); + else + return rs_drv_tx_protection(mvm, mvmsta, enable); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h index 32b4d66debea..fb18cb8c233d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h @@ -36,6 +36,8 @@ #include "fw-api.h" #include "iwl-trans.h" +#define RS_NAME "iwl-mvm-rs" + struct iwl_rs_rate_info { u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ u8 plcp_ht_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ @@ -218,6 +220,38 @@ struct iwl_rate_mcs_info { }; /** + * struct iwl_lq_sta_rs_fw - rate and related statistics for RS in FW + * @last_rate_n_flags: last rate reported by FW + * @sta_id: the id of the station +#ifdef CONFIG_MAC80211_DEBUGFS + * @dbg_fixed_rate: for debug, use fixed rate if not 0 + * @dbg_agg_frame_count_lim: for debug, max number of frames in A-MPDU +#endif + * @chains: bitmask of chains reported in %chain_signal + * @chain_signal: per chain signal strength + * @last_rssi: last rssi reported + * @drv: pointer back to the driver data + */ + +struct iwl_lq_sta_rs_fw { + /* last tx rate_n_flags */ + u32 last_rate_n_flags; + + /* persistent fields - initialized only once - keep last! */ + struct lq_sta_pers_rs_fw { + u32 sta_id; +#ifdef CONFIG_MAC80211_DEBUGFS + u32 dbg_fixed_rate; + u16 dbg_agg_frame_count_lim; +#endif + u8 chains; + s8 chain_signal[IEEE80211_MAX_CHAINS]; + s8 last_rssi; + struct iwl_mvm *drv; + } pers; +}; + +/** * struct iwl_rate_scale_data -- tx success history for one rate */ struct iwl_rate_scale_data { @@ -407,4 +441,18 @@ struct iwl_mvm_sta; int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool enable); +#ifdef CONFIG_IWLWIFI_DEBUGFS +void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm); +#endif + +#ifdef CONFIG_MAC80211_DEBUGFS +void rs_remove_sta_debugfs(void *mvm, void *mvm_sta); +#endif + +void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta); +void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + enum nl80211_band band); +int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, + bool enable); +void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt); #endif /* __rs__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index d1a40688d5e1..d26833c5ce1f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -222,7 +222,9 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm, case RX_MPDU_RES_STATUS_SEC_TKIP_ENC: /* Don't drop the frame and decrypt it in SW */ - if (!(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK)) + if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_DEPRECATE_TTAK) && + !(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK)) return 0; *crypt_len = IEEE80211_TKIP_IV_LEN; /* fall through if TTAK OK */ @@ -383,7 +385,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, false); } - rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status); + rs_update_last_rssi(mvm, mvmsta, rx_status); if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) && ieee80211_is_beacon(hdr->frame_control)) { @@ -439,7 +441,8 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->bw = RATE_INFO_BW_160; break; } - if (rate_n_flags & RATE_MCS_SGI_MSK) + if (!(rate_n_flags & RATE_MCS_CCK_MSK) && + rate_n_flags & RATE_MCS_SGI_MSK) rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rate_n_flags & RATE_HT_MCS_GF_MSK) rx_status->enc_flags |= RX_ENC_FLAG_HT_GF; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 3b8d44361380..a3f7c1bf3cc8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -261,7 +261,9 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, return 0; case IWL_RX_MPDU_STATUS_SEC_TKIP: /* Don't drop the frame and decrypt it in SW */ - if (!(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK)) + if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_DEPRECATE_TTAK) && + !(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK)) return 0; *crypt_len = IEEE80211_TKIP_IV_LEN; @@ -943,7 +945,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, false); } - rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status); + rs_update_last_rssi(mvm, mvmsta, rx_status); if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) && ieee80211_is_beacon(hdr->frame_control)) { @@ -1020,7 +1022,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->bw = RATE_INFO_BW_160; break; } - if (rate_n_flags & RATE_MCS_SGI_MSK) + + if (!(rate_n_flags & RATE_MCS_CCK_MSK) && + rate_n_flags & RATE_MCS_SGI_MSK) rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rate_n_flags & RATE_HT_MCS_GF_MSK) rx_status->enc_flags |= RX_ENC_FLAG_HT_GF; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index e4fd476e9ccb..356b16f40e78 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -664,6 +664,22 @@ static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies, return newpos; } +#define WFA_TPC_IE_LEN 9 + +static void iwl_mvm_add_tpc_report_ie(u8 *pos) +{ + pos[0] = WLAN_EID_VENDOR_SPECIFIC; + pos[1] = WFA_TPC_IE_LEN - 2; + pos[2] = (WLAN_OUI_MICROSOFT >> 16) & 0xff; + pos[3] = (WLAN_OUI_MICROSOFT >> 8) & 0xff; + pos[4] = WLAN_OUI_MICROSOFT & 0xff; + pos[5] = WLAN_OUI_TYPE_MICROSOFT_TPC; + pos[6] = 0; + /* pos[7] - tx power will be inserted by the FW */ + pos[7] = 0; + pos[8] = 0; +} + static void iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_scan_ies *ies, @@ -716,7 +732,16 @@ iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif, memcpy(pos, ies->common_ies, ies->common_ie_len); params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf); - params->preq.common_data.len = cpu_to_le16(ies->common_ie_len); + + if (iwl_mvm_rrm_scan_needed(mvm) && + !fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) { + iwl_mvm_add_tpc_report_ie(pos + ies->common_ie_len); + params->preq.common_data.len = cpu_to_le16(ies->common_ie_len + + WFA_TPC_IE_LEN); + } else { + params->preq.common_data.len = cpu_to_le16(ies->common_ie_len); + } } static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm, @@ -781,7 +806,9 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm, if (params->type == IWL_SCAN_TYPE_FRAGMENTED) flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED; - if (iwl_mvm_rrm_scan_needed(mvm)) + if (iwl_mvm_rrm_scan_needed(mvm) && + fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) flags |= IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED; if (params->pass_all) @@ -1183,7 +1210,9 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm, flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED; } - if (iwl_mvm_rrm_scan_needed(mvm)) + if (iwl_mvm_rrm_scan_needed(mvm) && + fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED; if (params->pass_all) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 1add5615fc3a..6b2674e02606 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -1439,6 +1439,13 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, goto err; } + /* + * if rs is registered with mac80211, then "add station" will be handled + * via the corresponding ops, otherwise need to notify rate scaling here + */ + if (iwl_mvm_has_tlc_offload(mvm)) + iwl_mvm_rs_add_sta(mvm, mvm_sta); + update_fw: ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags); if (ret) @@ -1762,7 +1769,7 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) } /* - * For a000 firmware and on we cannot add queue to a station unknown + * For 22000 firmware and on we cannot add queue to a station unknown * to firmware so enable queue here - after the station was added */ if (iwl_mvm_has_new_tx_api(mvm)) @@ -1885,7 +1892,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) return ret; /* - * For a000 firmware and on we cannot add queue to a station unknown + * For 22000 firmware and on we cannot add queue to a station unknown * to firmware so enable queue here - after the station was added */ if (iwl_mvm_has_new_tx_api(mvm)) { @@ -2064,7 +2071,7 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) /* * Enable cab queue after the ADD_STA command is sent. - * This is needed for a000 firmware which won't accept SCD_QUEUE_CFG + * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG * command with unknown station id, and for FW that doesn't support * station API since the cab queue is not included in the * tfd_queue_mask. @@ -2530,7 +2537,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, tid_data->next_reclaimed); /* - * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need + * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need * to align the wrap around of ssn so we compare relevant values. */ normalized_ssn = tid_data->ssn; @@ -2575,6 +2582,13 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, .aggregate = true, }; + /* + * When FW supports TLC_OFFLOAD, it also implements Tx aggregation + * manager, so this function should never be called in this case. + */ + if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm))) + return -EINVAL; + BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE) != IWL_MAX_TID_COUNT); @@ -2672,12 +2686,12 @@ out: */ mvmsta->max_agg_bufsize = min(mvmsta->max_agg_bufsize, buf_size); - mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize; + mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize; IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n", sta->addr, tid); - return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false); + return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false); } static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, @@ -3615,7 +3629,7 @@ u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data) u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); /* - * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need + * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need * to align the wrap around of ssn so we compare relevant values. */ if (mvm->trans->cfg->gen2) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index aedabe101cf0..5ffd6adbc383 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -383,6 +383,8 @@ struct iwl_mvm_rxq_dup_data { * and from Tx response flow, it needs a spinlock. * @tid_data: per tid data + mgmt. Look at %iwl_mvm_tid_data. * @tid_to_baid: a simple map of TID to baid + * @lq_sta: holds rate scaling data, either for the case when RS is done in + * the driver - %rs_drv or in the FW - %rs_fw. * @reserved_queue: the queue reserved for this STA for DQA purposes * Every STA has is given one reserved queue to allow it to operate. If no * such queue can be guaranteed, the STA addition will fail. @@ -417,7 +419,10 @@ struct iwl_mvm_sta { spinlock_t lock; struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT + 1]; u8 tid_to_baid[IWL_MAX_TID_COUNT]; - struct iwl_lq_sta lq_sta; + union { + struct iwl_lq_sta_rs_fw rs_fw; + struct iwl_lq_sta rs_drv; + } lq_sta; struct ieee80211_vif *vif; struct iwl_mvm_key_pn __rcu *ptk_pn[4]; struct iwl_mvm_rxq_dup_data *dup_data; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index e25cda9fbf6c..200ab50ec86b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -101,7 +101,6 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm, void iwl_mvm_roc_done_wk(struct work_struct *wk) { struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk); - u32 queues = 0; /* * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit. @@ -110,14 +109,10 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk) * in the case that the time event actually completed in the firmware * (which is handled in iwl_mvm_te_handle_notif). */ - if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) { - queues |= BIT(IWL_MVM_OFFCHANNEL_QUEUE); + if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) iwl_mvm_unref(mvm, IWL_MVM_REF_ROC); - } - if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) { - queues |= BIT(mvm->aux_queue); + if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX); - } synchronize_net(); @@ -777,12 +772,6 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return -EBUSY; } - /* - * Flush the done work, just in case it's still pending, so that - * the work it does can complete and we can accept new frames. - */ - flush_work(&mvm->roc_done_wk); - time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); time_cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 333bcb75b8af..dda77b327c98 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -888,10 +888,9 @@ static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm, /* * The first deferred frame should've stopped the MAC queues, so we * should never get a second deferred frame for the RA/TID. + * In case of GSO the first packet may have been split, so don't warn. */ - if (!WARN(skb_queue_len(deferred_tx_frames) != 1, - "RATID %d/%d has %d deferred frames\n", mvm_sta->sta_id, tid, - skb_queue_len(deferred_tx_frames))) { + if (skb_queue_len(deferred_tx_frames) == 1) { iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue)); schedule_work(&mvm->add_stream_wk); } @@ -1132,7 +1131,7 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, } /* - * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need + * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need * to align the wrap around of ssn so we compare relevant values. */ normalized_ssn = tid_data->ssn; @@ -1624,7 +1623,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, int freed; if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT || - tid >= IWL_MAX_TID_COUNT, + tid > IWL_MAX_TID_COUNT, "sta_id %d tid %d", sta_id, tid)) return; @@ -1679,7 +1678,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, if (ieee80211_is_data_qos(hdr->frame_control)) freed++; else - WARN_ON_ONCE(1); + WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT); iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); @@ -1719,8 +1718,11 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, ba_info->band = chanctx_conf->def.chan->band; iwl_mvm_hwrate_to_tx_status(rate, ba_info); - IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n"); - iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false); + if (!iwl_mvm_has_tlc_offload(mvm)) { + IWL_DEBUG_TX_REPLY(mvm, + "No reclaim. Update rs directly\n"); + iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false); + } } out: @@ -1771,8 +1773,12 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) struct iwl_mvm_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i]; + tid = ba_tfd->tid; + if (tid == IWL_MGMT_TID) + tid = IWL_MAX_TID_COUNT; + mvmsta->tid_data[i].lq_color = lq_color; - iwl_mvm_tx_reclaim(mvm, sta_id, ba_tfd->tid, + iwl_mvm_tx_reclaim(mvm, sta_id, tid, (int)(le16_to_cpu(ba_tfd->q_num)), le16_to_cpu(ba_tfd->tfd_index), &ba_info, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 03ffd84786ca..d65e1db7c097 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -278,8 +278,8 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx) u8 ind = last_idx; int i; - for (i = 0; i < RATE_MCS_ANT_NUM; i++) { - ind = (ind + 1) % RATE_MCS_ANT_NUM; + for (i = 0; i < MAX_RS_ANT_NUM; i++) { + ind = (ind + 1) % MAX_RS_ANT_NUM; if (valid & BIT(ind)) return ind; } @@ -516,8 +516,7 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base) IWL_ERR(trans, "HW error, resetting before reading\n"); /* reset the device */ - iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); - usleep_range(5000, 6000); + iwl_trans_sw_reset(trans); /* set INIT_DONE flag */ iwl_set_bit(trans, CSR_GP_CNTRL, @@ -595,6 +594,12 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base) void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) { + if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) { + IWL_ERR(mvm, + "DEVICE_ENABLED bit is not set. Aborting dump.\n"); + return; + } + iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[0]); if (mvm->error_event_table[1]) @@ -906,7 +911,8 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init) .data = { lq, }, }; - if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA)) + if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA || + iwl_mvm_has_tlc_offload(mvm))) return -EINVAL; return iwl_mvm_send_cmd(mvm, &cmd); @@ -1025,12 +1031,34 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int res; + bool low_latency; lockdep_assert_held(&mvm->mutex); - if (iwl_mvm_vif_low_latency(mvmvif) == prev) + low_latency = iwl_mvm_vif_low_latency(mvmvif); + + if (low_latency == prev) return 0; + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) { + struct iwl_mac_low_latency_cmd cmd = { + .mac_id = cpu_to_le32(mvmvif->id) + }; + + if (low_latency) { + /* currently we don't care about the direction */ + cmd.low_latency_rx = 1; + cmd.low_latency_tx = 1; + } + res = iwl_mvm_send_cmd_pdu(mvm, + iwl_cmd_id(LOW_LATENCY_CMD, + MAC_CONF_GROUP, 0), + 0, sizeof(cmd), &cmd); + if (res) + IWL_ERR(mvm, "Failed to send low latency command\n"); + } + res = iwl_mvm_update_quotas(mvm, false, NULL); if (res) return res; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index ccd7c33c4c28..56fc28750a41 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -652,20 +652,20 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)}, -/* a000 Series */ - {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr_cdb)}, - {IWL_PCI_DEVICE(0x34F0, 0x0310, iwla000_2ac_cfg_jf)}, - {IWL_PCI_DEVICE(0x2720, 0x0000, iwla000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0x34F0, 0x0070, iwla000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0x2720, 0x0078, iwla000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0x2720, 0x0070, iwla000_2ac_cfg_hr_cdb)}, - {IWL_PCI_DEVICE(0x2720, 0x0030, iwla000_2ac_cfg_hr_cdb)}, - {IWL_PCI_DEVICE(0x2720, 0x1080, iwla000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0x2720, 0x0090, iwla000_2ac_cfg_hr_cdb)}, - {IWL_PCI_DEVICE(0x2720, 0x0310, iwla000_2ac_cfg_hr_cdb)}, - {IWL_PCI_DEVICE(0x40C0, 0x0000, iwla000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwla000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwla000_2ax_cfg_hr)}, +/* 22000 Series */ + {IWL_PCI_DEVICE(0x2720, 0x0A10, iwl22000_2ac_cfg_hr_cdb)}, + {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)}, + {IWL_PCI_DEVICE(0x2720, 0x0000, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x2720, 0x0078, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x2720, 0x0070, iwl22000_2ac_cfg_hr_cdb)}, + {IWL_PCI_DEVICE(0x2720, 0x0030, iwl22000_2ac_cfg_hr_cdb)}, + {IWL_PCI_DEVICE(0x2720, 0x1080, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)}, + {IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)}, + {IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22000_2ax_cfg_hr)}, #endif /* CONFIG_IWLMVM */ @@ -707,7 +707,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) iwl_trans->cfg = cfg_7265d; } - if (iwl_trans->cfg->rf_id && cfg == &iwla000_2ac_cfg_hr_cdb && + if (iwl_trans->cfg->rf_id && cfg == &iwl22000_2ac_cfg_hr_cdb && iwl_trans->hw_rev != CSR_HW_REV_TYPE_HR_CDB) { u32 rf_id_chp = CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id); u32 jf_chp_id = CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF); @@ -715,14 +715,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (rf_id_chp == jf_chp_id) { if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ) - cfg = &iwla000_2ax_cfg_qnj_jf_b0; + cfg = &iwl22000_2ax_cfg_qnj_jf_b0; else - cfg = &iwla000_2ac_cfg_jf; + cfg = &iwl22000_2ac_cfg_jf; } else if (rf_id_chp == hr_chp_id) { if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ) - cfg = &iwla000_2ax_cfg_qnj_hr_a0; + cfg = &iwl22000_2ax_cfg_qnj_hr_a0; else - cfg = &iwla000_2ac_cfg_hr; + cfg = &iwl22000_2ac_cfg_hr; } iwl_trans->cfg = cfg; } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 403e65c309d0..ca3b64ff4dad 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -658,13 +658,6 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) } } -static inline void iwl_pcie_sw_reset(struct iwl_trans *trans) -{ - /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ - iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); - usleep_range(5000, 6000); -} - static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index) { return index & (q->n_window - 1); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index ac05fd1e74c4..cb4012541f45 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -137,7 +137,7 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave) /* Stop device's DMA activity */ iwl_pcie_apm_stop_master(trans); - iwl_pcie_sw_reset(trans); + iwl_trans_sw_reset(trans); /* * Clear "initialization complete" bit to move adapter from @@ -192,7 +192,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power) /* Stop the device, and put it in low power state */ iwl_pcie_gen2_apm_stop(trans, false); - iwl_pcie_sw_reset(trans); + iwl_trans_sw_reset(trans); /* * Upon stop, the IVAR table gets erased, so msi-x won't diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 4541c86881d6..b406b536c850 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -176,6 +176,13 @@ out: kfree(buf); } +static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans) +{ + /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ + iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + usleep_range(5000, 6000); +} + static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -446,7 +453,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_XTAL_ON); - iwl_pcie_sw_reset(trans); + iwl_trans_pcie_sw_reset(trans); /* * Set "initialization complete" bit to move adapter from @@ -487,7 +494,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) apmg_xtal_cfg_reg | SHR_APMG_XTAL_CFG_XTAL_ON_REQ); - iwl_pcie_sw_reset(trans); + iwl_trans_pcie_sw_reset(trans); /* Enable LP XTAL by indirect access through CSR */ apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG); @@ -580,7 +587,7 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) return; } - iwl_pcie_sw_reset(trans); + iwl_trans_pcie_sw_reset(trans); /* * Clear "initialization complete" bit to move adapter from @@ -915,14 +922,9 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, void iwl_pcie_apply_destination(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv; + const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg_dest_tlv; int i; - if (dest->version) - IWL_ERR(trans, - "DBG DEST version is %d - expect issues\n", - dest->version); - IWL_INFO(trans, "Applying debug destination %s\n", get_fw_dbg_mode_string(dest->monitor_mode)); @@ -1270,7 +1272,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) /* Stop the device, and put it in low power state */ iwl_pcie_apm_stop(trans, false); - iwl_pcie_sw_reset(trans); + iwl_trans_pcie_sw_reset(trans); /* * Upon stop, the IVAR table gets erased, so msi-x won't @@ -1744,7 +1746,7 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) return err; } - iwl_pcie_sw_reset(trans); + iwl_trans_pcie_sw_reset(trans); err = iwl_pcie_apm_init(trans); if (err) @@ -2816,8 +2818,17 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, * Update pointers to reflect actual values after * shifting */ - base = iwl_read_prph(trans, base) << - trans->dbg_dest_tlv->base_shift; + if (trans->dbg_dest_tlv->version) { + base = (iwl_read_prph(trans, base) & + IWL_LDBG_M2S_BUF_BA_MSK) << + trans->dbg_dest_tlv->base_shift; + base *= IWL_M2S_UNIT_SIZE; + base += trans->cfg->smem_offset; + } else { + base = iwl_read_prph(trans, base) << + trans->dbg_dest_tlv->base_shift; + } + iwl_trans_read_mem(trans, base, fw_mon_data->data, monitor_len / sizeof(u32)); } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) { @@ -2865,21 +2876,36 @@ static struct iwl_trans_dump_data trans_pcie->fw_mon_size; monitor_len = trans_pcie->fw_mon_size; } else if (trans->dbg_dest_tlv) { - u32 base, end; + u32 base, end, cfg_reg; - base = le32_to_cpu(trans->dbg_dest_tlv->base_reg); - end = le32_to_cpu(trans->dbg_dest_tlv->end_reg); + if (trans->dbg_dest_tlv->version == 1) { + cfg_reg = le32_to_cpu(trans->dbg_dest_tlv->base_reg); + cfg_reg = iwl_read_prph(trans, cfg_reg); + base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) << + trans->dbg_dest_tlv->base_shift; + base *= IWL_M2S_UNIT_SIZE; + base += trans->cfg->smem_offset; - base = iwl_read_prph(trans, base) << - trans->dbg_dest_tlv->base_shift; - end = iwl_read_prph(trans, end) << - trans->dbg_dest_tlv->end_shift; + monitor_len = + (cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >> + trans->dbg_dest_tlv->end_shift; + monitor_len *= IWL_M2S_UNIT_SIZE; + } else { + base = le32_to_cpu(trans->dbg_dest_tlv->base_reg); + end = le32_to_cpu(trans->dbg_dest_tlv->end_reg); - /* Make "end" point to the actual end */ - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000 || - trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) - end += (1 << trans->dbg_dest_tlv->end_shift); - monitor_len = end - base; + base = iwl_read_prph(trans, base) << + trans->dbg_dest_tlv->base_shift; + end = iwl_read_prph(trans, end) << + trans->dbg_dest_tlv->end_shift; + + /* Make "end" point to the actual end */ + if (trans->cfg->device_family >= + IWL_DEVICE_FAMILY_8000 || + trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) + end += (1 << trans->dbg_dest_tlv->end_shift); + monitor_len = end - base; + } len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) + monitor_len; } else { @@ -3025,6 +3051,7 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans) .write_mem = iwl_trans_pcie_write_mem, \ .configure = iwl_trans_pcie_configure, \ .set_pmi = iwl_trans_pcie_set_pmi, \ + .sw_reset = iwl_trans_pcie_sw_reset, \ .grab_nic_access = iwl_trans_pcie_grab_nic_access, \ .release_nic_access = iwl_trans_pcie_release_nic_access, \ .set_bits_mask = iwl_trans_pcie_set_bits_mask, \ @@ -3250,9 +3277,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS); if (hw_status & UMAG_GEN_HW_IS_FPGA) - trans->cfg = &iwla000_2ax_cfg_qnj_hr_f0; + trans->cfg = &iwl22000_2ax_cfg_qnj_hr_f0; else - trans->cfg = &iwla000_2ac_cfg_hr; + trans->cfg = &iwl22000_2ac_cfg_hr; } #endif diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index f6d4a50f1bdb..1cf22e62e3dd 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -32,6 +32,7 @@ #include <net/genetlink.h> #include <net/net_namespace.h> #include <net/netns/generic.h> +#include <linux/rhashtable.h> #include "mac80211_hwsim.h" #define WARN_QUEUE 100 @@ -490,6 +491,7 @@ static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = { static spinlock_t hwsim_radio_lock; static LIST_HEAD(hwsim_radios); static struct workqueue_struct *hwsim_wq; +static struct rhashtable hwsim_radios_rht; static int hwsim_radio_idx; static struct platform_driver mac80211_hwsim_driver = { @@ -500,6 +502,7 @@ static struct platform_driver mac80211_hwsim_driver = { struct mac80211_hwsim_data { struct list_head list; + struct rhash_head rht; struct ieee80211_hw *hw; struct device *dev; struct ieee80211_supported_band bands[NUM_NL80211_BANDS]; @@ -574,6 +577,13 @@ struct mac80211_hwsim_data { u64 tx_failed; }; +static const struct rhashtable_params hwsim_rht_params = { + .nelem_hint = 2, + .automatic_shrinking = true, + .key_len = ETH_ALEN, + .key_offset = offsetof(struct mac80211_hwsim_data, addresses[1]), + .head_offset = offsetof(struct mac80211_hwsim_data, rht), +}; struct hwsim_radiotap_hdr { struct ieee80211_radiotap_header hdr; @@ -729,16 +739,21 @@ static int hwsim_fops_ps_write(void *dat, u64 val) val != PS_MANUAL_POLL) return -EINVAL; - old_ps = data->ps; - data->ps = val; - - local_bh_disable(); if (val == PS_MANUAL_POLL) { + if (data->ps != PS_ENABLED) + return -EINVAL; + local_bh_disable(); ieee80211_iterate_active_interfaces_atomic( data->hw, IEEE80211_IFACE_ITER_NORMAL, hwsim_send_ps_poll, data); - data->ps_poll_pending = true; - } else if (old_ps == PS_DISABLED && val != PS_DISABLED) { + local_bh_enable(); + return 0; + } + old_ps = data->ps; + data->ps = val; + + local_bh_disable(); + if (old_ps == PS_DISABLED && val != PS_DISABLED) { ieee80211_iterate_active_interfaces_atomic( data->hw, IEEE80211_IFACE_ITER_NORMAL, hwsim_send_nullfunc_ps, data); @@ -1004,6 +1019,36 @@ static int hwsim_unicast_netgroup(struct mac80211_hwsim_data *data, return res; } +static inline u16 trans_tx_rate_flags_ieee2hwsim(struct ieee80211_tx_rate *rate) +{ + u16 result = 0; + + if (rate->flags & IEEE80211_TX_RC_USE_RTS_CTS) + result |= MAC80211_HWSIM_TX_RC_USE_RTS_CTS; + if (rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT) + result |= MAC80211_HWSIM_TX_RC_USE_CTS_PROTECT; + if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) + result |= MAC80211_HWSIM_TX_RC_USE_SHORT_PREAMBLE; + if (rate->flags & IEEE80211_TX_RC_MCS) + result |= MAC80211_HWSIM_TX_RC_MCS; + if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD) + result |= MAC80211_HWSIM_TX_RC_GREEN_FIELD; + if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + result |= MAC80211_HWSIM_TX_RC_40_MHZ_WIDTH; + if (rate->flags & IEEE80211_TX_RC_DUP_DATA) + result |= MAC80211_HWSIM_TX_RC_DUP_DATA; + if (rate->flags & IEEE80211_TX_RC_SHORT_GI) + result |= MAC80211_HWSIM_TX_RC_SHORT_GI; + if (rate->flags & IEEE80211_TX_RC_VHT_MCS) + result |= MAC80211_HWSIM_TX_RC_VHT_MCS; + if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) + result |= MAC80211_HWSIM_TX_RC_80_MHZ_WIDTH; + if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH) + result |= MAC80211_HWSIM_TX_RC_160_MHZ_WIDTH; + + return result; +} + static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, struct sk_buff *my_skb, int dst_portid) @@ -1016,6 +1061,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, unsigned int hwsim_flags = 0; int i; struct hwsim_tx_rate tx_attempts[IEEE80211_TX_MAX_RATES]; + struct hwsim_tx_rate_flag tx_attempts_flags[IEEE80211_TX_MAX_RATES]; uintptr_t cookie; if (data->ps != PS_DISABLED) @@ -1067,7 +1113,11 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { tx_attempts[i].idx = info->status.rates[i].idx; + tx_attempts_flags[i].idx = info->status.rates[i].idx; tx_attempts[i].count = info->status.rates[i].count; + tx_attempts_flags[i].flags = + trans_tx_rate_flags_ieee2hwsim( + &info->status.rates[i]); } if (nla_put(skb, HWSIM_ATTR_TX_INFO, @@ -1075,6 +1125,11 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, tx_attempts)) goto nla_put_failure; + if (nla_put(skb, HWSIM_ATTR_TX_INFO_FLAGS, + sizeof(struct hwsim_tx_rate_flag) * IEEE80211_TX_MAX_RATES, + tx_attempts_flags)) + goto nla_put_failure; + /* We create a cookie to identify this skb */ data->pending_cookie++; cookie = data->pending_cookie; @@ -2727,6 +2782,15 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); spin_lock_bh(&hwsim_radio_lock); + err = rhashtable_insert_fast(&hwsim_radios_rht, &data->rht, + hwsim_rht_params); + if (err < 0) { + pr_debug("mac80211_hwsim: radio index %d already present\n", + idx); + spin_unlock_bh(&hwsim_radio_lock); + goto failed_final_insert; + } + list_add_tail(&data->list, &hwsim_radios); spin_unlock_bh(&hwsim_radio_lock); @@ -2735,6 +2799,9 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, return idx; +failed_final_insert: + debugfs_remove_recursive(data->debugfs); + ieee80211_unregister_hw(data->hw); failed_hw: device_release_driver(data->dev); failed_bind: @@ -2870,22 +2937,9 @@ static void hwsim_mon_setup(struct net_device *dev) static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(const u8 *addr) { - struct mac80211_hwsim_data *data; - bool _found = false; - - spin_lock_bh(&hwsim_radio_lock); - list_for_each_entry(data, &hwsim_radios, list) { - if (memcmp(data->addresses[1].addr, addr, ETH_ALEN) == 0) { - _found = true; - break; - } - } - spin_unlock_bh(&hwsim_radio_lock); - - if (!_found) - return NULL; - - return data; + return rhashtable_lookup_fast(&hwsim_radios_rht, + addr, + hwsim_rht_params); } static void hwsim_register_wmediumd(struct net *net, u32 portid) @@ -2970,7 +3024,6 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { txi->status.rates[i].idx = tx_attempts[i].idx; txi->status.rates[i].count = tx_attempts[i].count; - /*txi->status.rates[i].flags = 0;*/ } txi->status.ack_signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]); @@ -3150,8 +3203,10 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) { u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]); - if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom)) + if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom)) { + kfree(hwname); return -EINVAL; + } param.regd = hwsim_world_regdom_custom[idx]; } @@ -3192,6 +3247,8 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info) continue; list_del(&data->list); + rhashtable_remove_fast(&hwsim_radios_rht, &data->rht, + hwsim_rht_params); spin_unlock_bh(&hwsim_radio_lock); mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), info); @@ -3347,6 +3404,8 @@ static void remove_user_radios(u32 portid) list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) { if (entry->destroy_on_close && entry->portid == portid) { list_del(&entry->list); + rhashtable_remove_fast(&hwsim_radios_rht, &entry->rht, + hwsim_rht_params); INIT_WORK(&entry->destroy_work, destroy_radio); queue_work(hwsim_wq, &entry->destroy_work); } @@ -3422,6 +3481,8 @@ static void __net_exit hwsim_exit_net(struct net *net) continue; list_del(&data->list); + rhashtable_remove_fast(&hwsim_radios_rht, &data->rht, + hwsim_rht_params); INIT_WORK(&data->destroy_work, destroy_radio); queue_work(hwsim_wq, &data->destroy_work); } @@ -3458,6 +3519,7 @@ static int __init init_mac80211_hwsim(void) hwsim_wq = alloc_workqueue("hwsim_wq",WQ_MEM_RECLAIM,0); if (!hwsim_wq) return -ENOMEM; + rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params); err = register_pernet_device(&hwsim_net_ops); if (err) @@ -3599,6 +3661,7 @@ static void __exit exit_mac80211_hwsim(void) mac80211_hwsim_free(); flush_workqueue(hwsim_wq); + rhashtable_destroy(&hwsim_radios_rht); unregister_netdev(hwsim_mon); platform_driver_unregister(&mac80211_hwsim_driver); unregister_pernet_device(&hwsim_net_ops); diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h index 3f5eda591dba..a96a79c1eff5 100644 --- a/drivers/net/wireless/mac80211_hwsim.h +++ b/drivers/net/wireless/mac80211_hwsim.h @@ -64,7 +64,8 @@ enum hwsim_tx_control_flags { * @HWSIM_CMD_TX_INFO_FRAME: Transmission info report from user space to * kernel, uses: * %HWSIM_ATTR_ADDR_TRANSMITTER, %HWSIM_ATTR_FLAGS, - * %HWSIM_ATTR_TX_INFO, %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE + * %HWSIM_ATTR_TX_INFO, %WSIM_ATTR_TX_INFO_FLAGS, + * %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE * @HWSIM_CMD_NEW_RADIO: create a new radio with the given parameters, * returns the radio ID (>= 0) or negative on errors, if successful * then multicast the result @@ -123,6 +124,8 @@ enum { * @HWSIM_ATTR_RADIO_NAME: Name of radio, e.g. phy666 * @HWSIM_ATTR_NO_VIF: Do not create vif (wlanX) when creating radio. * @HWSIM_ATTR_FREQ: Frequency at which packet is transmitted or received. + * @HWSIM_ATTR_TX_INFO_FLAGS: additional flags for corresponding + * rates of %HWSIM_ATTR_TX_INFO * @__HWSIM_ATTR_MAX: enum limit */ @@ -149,6 +152,7 @@ enum { HWSIM_ATTR_NO_VIF, HWSIM_ATTR_FREQ, HWSIM_ATTR_PAD, + HWSIM_ATTR_TX_INFO_FLAGS, __HWSIM_ATTR_MAX, }; #define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1) @@ -171,4 +175,66 @@ struct hwsim_tx_rate { u8 count; } __packed; +/** + * enum hwsim_tx_rate_flags - per-rate flags set by the rate control algorithm. + * Inspired by structure mac80211_rate_control_flags. New flags may be + * appended, but old flags not deleted, to keep compatibility for + * userspace. + * + * These flags are set by the Rate control algorithm for each rate during tx, + * in the @flags member of struct ieee80211_tx_rate. + * + * @MAC80211_HWSIM_TX_RC_USE_RTS_CTS: Use RTS/CTS exchange for this rate. + * @MAC80211_HWSIM_TX_RC_USE_CTS_PROTECT: CTS-to-self protection is required. + * This is set if the current BSS requires ERP protection. + * @MAC80211_HWSIM_TX_RC_USE_SHORT_PREAMBLE: Use short preamble. + * @MAC80211_HWSIM_TX_RC_MCS: HT rate. + * @MAC80211_HWSIM_TX_RC_VHT_MCS: VHT MCS rate, in this case the idx field is + * split into a higher 4 bits (Nss) and lower 4 bits (MCS number) + * @MAC80211_HWSIM_TX_RC_GREEN_FIELD: Indicates whether this rate should be used + * in Greenfield mode. + * @MAC80211_HWSIM_TX_RC_40_MHZ_WIDTH: Indicates if the Channel Width should be + * 40 MHz. + * @MAC80211_HWSIM_TX_RC_80_MHZ_WIDTH: Indicates 80 MHz transmission + * @MAC80211_HWSIM_TX_RC_160_MHZ_WIDTH: Indicates 160 MHz transmission + * (80+80 isn't supported yet) + * @MAC80211_HWSIM_TX_RC_DUP_DATA: The frame should be transmitted on both of + * the adjacent 20 MHz channels, if the current channel type is + * NL80211_CHAN_HT40MINUS or NL80211_CHAN_HT40PLUS. + * @MAC80211_HWSIM_TX_RC_SHORT_GI: Short Guard interval should be used for this + * rate. + */ +enum hwsim_tx_rate_flags { + MAC80211_HWSIM_TX_RC_USE_RTS_CTS = BIT(0), + MAC80211_HWSIM_TX_RC_USE_CTS_PROTECT = BIT(1), + MAC80211_HWSIM_TX_RC_USE_SHORT_PREAMBLE = BIT(2), + + /* rate index is an HT/VHT MCS instead of an index */ + MAC80211_HWSIM_TX_RC_MCS = BIT(3), + MAC80211_HWSIM_TX_RC_GREEN_FIELD = BIT(4), + MAC80211_HWSIM_TX_RC_40_MHZ_WIDTH = BIT(5), + MAC80211_HWSIM_TX_RC_DUP_DATA = BIT(6), + MAC80211_HWSIM_TX_RC_SHORT_GI = BIT(7), + MAC80211_HWSIM_TX_RC_VHT_MCS = BIT(8), + MAC80211_HWSIM_TX_RC_80_MHZ_WIDTH = BIT(9), + MAC80211_HWSIM_TX_RC_160_MHZ_WIDTH = BIT(10), +}; + +/** + * struct hwsim_tx_rate - rate selection/status + * + * @idx: rate index to attempt to send with + * @count: number of tries in this rate before going to the next rate + * + * A value of -1 for @idx indicates an invalid rate and, if used + * in an array of retry rates, that no more rates should be tried. + * + * When used for transmit status reporting, the driver should + * always report the rate and number of retries used. + * + */ +struct hwsim_tx_rate_flag { + s8 idx; + u16 flags; +} __packed; #endif /* __MAC80211_HWSIM_H */ diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 6e0d9a9c5cfb..ce4432c535f0 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -1116,6 +1116,12 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); enum nl80211_iftype curr_iftype = dev->ieee80211_ptr->iftype; + if (priv->scan_request) { + mwifiex_dbg(priv->adapter, ERROR, + "change virtual interface: scan in process\n"); + return -EBUSY; + } + switch (curr_iftype) { case NL80211_IFTYPE_ADHOC: switch (type) { @@ -1180,7 +1186,6 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, case NL80211_IFTYPE_AP: switch (type) { case NL80211_IFTYPE_ADHOC: - case NL80211_IFTYPE_STATION: return mwifiex_change_vif_to_sta_adhoc(dev, curr_iftype, type, params); break; diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c index dcc529e9c0ef..874660052055 100644 --- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c +++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c @@ -290,13 +290,16 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv, adapter->dbg.last_cmd_act[adapter->dbg.last_cmd_index] = get_unaligned_le16((u8 *)host_cmd + S_DS_GEN); + /* Setup the timer after transmit command, except that specific + * command might not have command response. + */ + if (cmd_code != HostCmd_CMD_FW_DUMP_EVENT) + mod_timer(&adapter->cmd_timer, + jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S)); + /* Clear BSS_NO_BITS from HostCmd */ cmd_code &= HostCmd_CMD_ID_MASK; - /* Setup the timer after transmit command */ - mod_timer(&adapter->cmd_timer, - jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S)); - return 0; } diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c index 6f4239be609d..db2872daae97 100644 --- a/drivers/net/wireless/marvell/mwifiex/debugfs.c +++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c @@ -168,10 +168,15 @@ mwifiex_device_dump_read(struct file *file, char __user *ubuf, { struct mwifiex_private *priv = file->private_data; - if (!priv->adapter->if_ops.device_dump) - return -EIO; - - priv->adapter->if_ops.device_dump(priv->adapter); + /* For command timeouts, USB firmware will automatically emit + * firmware dump events, so we don't implement device_dump(). + * For user-initiated dumps, we trigger it ourselves. + */ + if (priv->adapter->iface_type == MWIFIEX_USB) + mwifiex_send_cmd(priv, HostCmd_CMD_FW_DUMP_EVENT, + HostCmd_ACT_GEN_SET, 0, NULL, true); + else + priv->adapter->if_ops.device_dump(priv->adapter); return 0; } diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index 13cd58e963b3..9c2cdef54074 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -56,6 +56,15 @@ struct mwifiex_fw_data { u8 data[1]; } __packed; +struct mwifiex_fw_dump_header { + __le16 seq_num; + __le16 reserved; + __le16 type; + __le16 len; +} __packed; + +#define FW_DUMP_INFO_ENDED 0x0002 + #define MWIFIEX_FW_DNLD_CMD_1 0x1 #define MWIFIEX_FW_DNLD_CMD_5 0x5 #define MWIFIEX_FW_DNLD_CMD_6 0x6 @@ -400,6 +409,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define HostCmd_CMD_TDLS_CONFIG 0x0100 #define HostCmd_CMD_MC_POLICY 0x0121 #define HostCmd_CMD_TDLS_OPER 0x0122 +#define HostCmd_CMD_FW_DUMP_EVENT 0x0125 #define HostCmd_CMD_SDIO_SP_RX_AGGR_CFG 0x0223 #define HostCmd_CMD_CHAN_REGION_CFG 0x0242 #define HostCmd_CMD_PACKET_AGGR_CTRL 0x0251 @@ -570,6 +580,7 @@ enum mwifiex_channel_flags { #define EVENT_BG_SCAN_STOPPED 0x00000065 #define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f #define EVENT_MULTI_CHAN_INFO 0x0000006a +#define EVENT_FW_DUMP_INFO 0x00000073 #define EVENT_TX_STATUS_REPORT 0x00000074 #define EVENT_BT_COEX_WLAN_PARA_CHANGE 0X00000076 diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c index e1aa86042469..d239e9248c05 100644 --- a/drivers/net/wireless/marvell/mwifiex/init.c +++ b/drivers/net/wireless/marvell/mwifiex/init.c @@ -64,6 +64,13 @@ static void wakeup_timer_fn(struct timer_list *t) adapter->if_ops.card_reset(adapter); } +static void fw_dump_timer_fn(struct timer_list *t) +{ + struct mwifiex_adapter *adapter = from_timer(adapter, t, devdump_timer); + + mwifiex_upload_device_dump(adapter); +} + /* * This function initializes the private structure and sets default * values to the members. @@ -314,6 +321,8 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter) adapter->iface_limit.p2p_intf = MWIFIEX_MAX_P2P_NUM; adapter->active_scan_triggered = false; timer_setup(&adapter->wakeup_timer, wakeup_timer_fn, 0); + adapter->devdump_len = 0; + timer_setup(&adapter->devdump_timer, fw_dump_timer_fn, 0); } /* @@ -396,6 +405,7 @@ static void mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter) { del_timer(&adapter->wakeup_timer); + del_timer_sync(&adapter->devdump_timer); mwifiex_cancel_all_pending_cmd(adapter); wake_up_interruptible(&adapter->cmd_wait_q.wait); wake_up_interruptible(&adapter->hs_activate_wait_q); diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index a96bd7e653bf..12e739950332 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -1051,9 +1051,30 @@ void mwifiex_multi_chan_resync(struct mwifiex_adapter *adapter) } EXPORT_SYMBOL_GPL(mwifiex_multi_chan_resync); -int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info) +void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter) { - void *p; + /* Dump all the memory data into single file, a userspace script will + * be used to split all the memory data to multiple files + */ + mwifiex_dbg(adapter, MSG, + "== mwifiex dump information to /sys/class/devcoredump start\n"); + dev_coredumpv(adapter->dev, adapter->devdump_data, adapter->devdump_len, + GFP_KERNEL); + mwifiex_dbg(adapter, MSG, + "== mwifiex dump information to /sys/class/devcoredump end\n"); + + /* Device dump data will be freed in device coredump release function + * after 5 min. Here reset adapter->devdump_data and ->devdump_len + * to avoid it been accidentally reused. + */ + adapter->devdump_data = NULL; + adapter->devdump_len = 0; +} +EXPORT_SYMBOL_GPL(mwifiex_upload_device_dump); + +void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter) +{ + char *p; char drv_version[64]; struct usb_card_rec *cardp; struct sdio_mmc_card *sdio_card; @@ -1061,17 +1082,12 @@ int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info) int i, idx; struct netdev_queue *txq; struct mwifiex_debug_info *debug_info; - void *drv_info_dump; mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump start===\n"); - /* memory allocate here should be free in mwifiex_upload_device_dump*/ - drv_info_dump = vzalloc(MWIFIEX_DRV_INFO_SIZE_MAX); - - if (!drv_info_dump) - return 0; - - p = (char *)(drv_info_dump); + p = adapter->devdump_data; + strcpy(p, "========Start dump driverinfo========\n"); + p += strlen("========Start dump driverinfo========\n"); p += sprintf(p, "driver_name = " "\"mwifiex\"\n"); mwifiex_drv_get_driver_version(adapter, drv_version, @@ -1155,21 +1171,18 @@ int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info) kfree(debug_info); } + strcpy(p, "\n========End dump========\n"); + p += strlen("\n========End dump========\n"); mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump end===\n"); - *drv_info = drv_info_dump; - return p - drv_info_dump; + adapter->devdump_len = p - (char *)adapter->devdump_data; } EXPORT_SYMBOL_GPL(mwifiex_drv_info_dump); -void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info, - int drv_info_size) +void mwifiex_prepare_fw_dump_info(struct mwifiex_adapter *adapter) { - u8 idx, *dump_data, *fw_dump_ptr; - u32 dump_len; - - dump_len = (strlen("========Start dump driverinfo========\n") + - drv_info_size + - strlen("\n========End dump========\n")); + u8 idx; + char *fw_dump_ptr; + u32 dump_len = 0; for (idx = 0; idx < adapter->num_mem_types; idx++) { struct memory_type_mapping *entry = @@ -1184,24 +1197,24 @@ void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info, } } - dump_data = vzalloc(dump_len + 1); - if (!dump_data) - goto done; - - fw_dump_ptr = dump_data; + if (dump_len + 1 + adapter->devdump_len > MWIFIEX_FW_DUMP_SIZE) { + /* Realloc in case buffer overflow */ + fw_dump_ptr = vzalloc(dump_len + 1 + adapter->devdump_len); + mwifiex_dbg(adapter, MSG, "Realloc device dump data.\n"); + if (!fw_dump_ptr) { + vfree(adapter->devdump_data); + mwifiex_dbg(adapter, ERROR, + "vzalloc devdump data failure!\n"); + return; + } - /* Dump all the memory data into single file, a userspace script will - * be used to split all the memory data to multiple files - */ - mwifiex_dbg(adapter, MSG, - "== mwifiex dump information to /sys/class/devcoredump start"); + memmove(fw_dump_ptr, adapter->devdump_data, + adapter->devdump_len); + vfree(adapter->devdump_data); + adapter->devdump_data = fw_dump_ptr; + } - strcpy(fw_dump_ptr, "========Start dump driverinfo========\n"); - fw_dump_ptr += strlen("========Start dump driverinfo========\n"); - memcpy(fw_dump_ptr, drv_info, drv_info_size); - fw_dump_ptr += drv_info_size; - strcpy(fw_dump_ptr, "\n========End dump========\n"); - fw_dump_ptr += strlen("\n========End dump========\n"); + fw_dump_ptr = (char *)adapter->devdump_data + adapter->devdump_len; for (idx = 0; idx < adapter->num_mem_types; idx++) { struct memory_type_mapping *entry = @@ -1225,14 +1238,8 @@ void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info, } } - /* device dump data will be free in device coredump release function - * after 5 min - */ - dev_coredumpv(adapter->dev, dump_data, dump_len, GFP_KERNEL); - mwifiex_dbg(adapter, MSG, - "== mwifiex dump information to /sys/class/devcoredump end"); + adapter->devdump_len = fw_dump_ptr - (char *)adapter->devdump_data; -done: for (idx = 0; idx < adapter->num_mem_types; idx++) { struct memory_type_mapping *entry = &adapter->mem_type_mapping_tbl[idx]; @@ -1241,10 +1248,8 @@ done: entry->mem_ptr = NULL; entry->mem_size = 0; } - - vfree(drv_info); } -EXPORT_SYMBOL_GPL(mwifiex_upload_device_dump); +EXPORT_SYMBOL_GPL(mwifiex_prepare_fw_dump_info); /* * CFG802.11 network device handler for statistics retrieval. diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 154c0796c0c5..6b5539b1f4d8 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -94,6 +94,8 @@ enum { #define MAX_EVENT_SIZE 2048 +#define MWIFIEX_FW_DUMP_SIZE (2 * 1024 * 1024) + #define ARP_FILTER_MAX_BUF_SIZE 68 #define MWIFIEX_KEY_BUFFER_SIZE 16 @@ -1032,6 +1034,10 @@ struct mwifiex_adapter { bool wake_by_wifi; /* Aggregation parameters*/ struct bus_aggr_params bus_aggr; + /* Device dump data/length */ + void *devdump_data; + int devdump_len; + struct timer_list devdump_timer; }; void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter); @@ -1656,9 +1662,9 @@ void mwifiex_hist_data_add(struct mwifiex_private *priv, u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv, u8 rx_rate, u8 ht_info); -int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info); -void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info, - int drv_info_size); +void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter); +void mwifiex_prepare_fw_dump_info(struct mwifiex_adapter *adapter); +void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter); void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags); void mwifiex_queue_main_work(struct mwifiex_adapter *adapter); int mwifiex_get_wakeup_reason(struct mwifiex_private *priv, u16 action, @@ -1677,6 +1683,7 @@ void mwifiex_process_multi_chan_event(struct mwifiex_private *priv, void mwifiex_multi_chan_resync(struct mwifiex_adapter *adapter); int mwifiex_set_mac_address(struct mwifiex_private *priv, struct net_device *dev); +void mwifiex_devdump_tmo_func(unsigned long function_context); #ifdef CONFIG_DEBUG_FS void mwifiex_debugfs_init(void); diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index cd314946452c..97a6199692ab 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -2769,19 +2769,27 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) static void mwifiex_pcie_device_dump_work(struct mwifiex_adapter *adapter) { - int drv_info_size; - void *drv_info; + adapter->devdump_data = vzalloc(MWIFIEX_FW_DUMP_SIZE); + if (!adapter->devdump_data) { + mwifiex_dbg(adapter, ERROR, + "vzalloc devdump data failure!\n"); + return; + } - drv_info_size = mwifiex_drv_info_dump(adapter, &drv_info); + mwifiex_drv_info_dump(adapter); mwifiex_pcie_fw_dump(adapter); - mwifiex_upload_device_dump(adapter, drv_info, drv_info_size); + mwifiex_prepare_fw_dump_info(adapter); + mwifiex_upload_device_dump(adapter); } static void mwifiex_pcie_card_reset_work(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; - pci_reset_function(card->dev); + /* We can't afford to wait here; remove() might be waiting on us. If we + * can't grab the device lock, maybe we'll get another chance later. + */ + pci_try_reset_function(card->dev); } static void mwifiex_pcie_work(struct work_struct *work) diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index fd5183c10c4e..a82880132af4 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -2505,15 +2505,21 @@ done: static void mwifiex_sdio_device_dump_work(struct mwifiex_adapter *adapter) { struct sdio_mmc_card *card = adapter->card; - int drv_info_size; - void *drv_info; - drv_info_size = mwifiex_drv_info_dump(adapter, &drv_info); + adapter->devdump_data = vzalloc(MWIFIEX_FW_DUMP_SIZE); + if (!adapter->devdump_data) { + mwifiex_dbg(adapter, ERROR, + "vzalloc devdump data failure!\n"); + return; + } + + mwifiex_drv_info_dump(adapter); if (card->fw_dump_enh) mwifiex_sdio_generic_fw_dump(adapter); else mwifiex_sdio_fw_dump(adapter); - mwifiex_upload_device_dump(adapter, drv_info, drv_info_size); + mwifiex_prepare_fw_dump_info(adapter); + mwifiex_upload_device_dump(adapter); } static void mwifiex_sdio_work(struct work_struct *work) diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c index fb090144a6d8..211e47d8b318 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c @@ -2206,6 +2206,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, case HostCmd_CMD_CHAN_REGION_CFG: ret = mwifiex_cmd_chan_region_cfg(priv, cmd_ptr, cmd_action); break; + case HostCmd_CMD_FW_DUMP_EVENT: + cmd_ptr->command = cpu_to_le16(cmd_no); + cmd_ptr->size = cpu_to_le16(S_DS_GEN); + break; default: mwifiex_dbg(priv->adapter, ERROR, "PREP_CMD: unknown cmd- %#x\n", cmd_no); diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c index d8db412b76c6..93dfb76cd8a6 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_event.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c @@ -584,6 +584,62 @@ void mwifiex_bt_coex_wlan_param_update_event(struct mwifiex_private *priv, adapter->coex_rx_win_size); } +static void +mwifiex_fw_dump_info_event(struct mwifiex_private *priv, + struct sk_buff *event_skb) +{ + struct mwifiex_adapter *adapter = priv->adapter; + struct mwifiex_fw_dump_header *fw_dump_hdr = + (void *)adapter->event_body; + + if (adapter->iface_type != MWIFIEX_USB) { + mwifiex_dbg(adapter, MSG, + "event is not on usb interface, ignore it\n"); + return; + } + + if (!adapter->devdump_data) { + /* When receive the first event, allocate device dump + * buffer, dump driver info. + */ + adapter->devdump_data = vzalloc(MWIFIEX_FW_DUMP_SIZE); + if (!adapter->devdump_data) { + mwifiex_dbg(adapter, ERROR, + "vzalloc devdump data failure!\n"); + return; + } + + mwifiex_drv_info_dump(adapter); + + /* If no proceeded event arrive in 10s, upload device + * dump data, this will be useful if the end of + * transmission event get lost, in this cornel case, + * user would still get partial of the dump. + */ + mod_timer(&adapter->devdump_timer, + jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S)); + } + + /* Overflow check */ + if (adapter->devdump_len + event_skb->len >= MWIFIEX_FW_DUMP_SIZE) + goto upload_dump; + + memmove(adapter->devdump_data + adapter->devdump_len, + adapter->event_skb->data, event_skb->len); + adapter->devdump_len += event_skb->len; + + if (le16_to_cpu(fw_dump_hdr->type == FW_DUMP_INFO_ENDED)) { + mwifiex_dbg(adapter, MSG, + "receive end of transmission flag event!\n"); + goto upload_dump; + } + return; + +upload_dump: + del_timer_sync(&adapter->devdump_timer); + mwifiex_upload_device_dump(adapter); +} + /* * This function handles events generated by firmware. * @@ -636,6 +692,7 @@ void mwifiex_bt_coex_wlan_param_update_event(struct mwifiex_private *priv, * - EVENT_DELBA * - EVENT_BA_STREAM_TIEMOUT * - EVENT_AMSDU_AGGR_CTRL + * - EVENT_FW_DUMP_INFO */ int mwifiex_process_sta_event(struct mwifiex_private *priv) { @@ -1007,6 +1064,10 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) adapter->event_skb->len - sizeof(eventcause)); break; + case EVENT_FW_DUMP_INFO: + mwifiex_dbg(adapter, EVENT, "event: firmware debug info\n"); + mwifiex_fw_dump_info_event(priv, adapter->event_skb); + break; /* Debugging event; not used, but let's not print an ERROR for it. */ case EVENT_UNKNOWN_DEBUG: mwifiex_dbg(adapter, EVENT, "event: debug\n"); diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c index e813b2ca740c..8e4e9b6919e0 100644 --- a/drivers/net/wireless/marvell/mwl8k.c +++ b/drivers/net/wireless/marvell/mwl8k.c @@ -199,7 +199,7 @@ struct mwl8k_priv { struct ieee80211_channel channels_24[14]; struct ieee80211_rate rates_24[13]; struct ieee80211_supported_band band_50; - struct ieee80211_channel channels_50[4]; + struct ieee80211_channel channels_50[9]; struct ieee80211_rate rates_50[8]; u32 ap_macids_supported; u32 sta_macids_supported; @@ -383,6 +383,11 @@ static const struct ieee80211_channel mwl8k_channels_50[] = { { .band = NL80211_BAND_5GHZ, .center_freq = 5200, .hw_value = 40, }, { .band = NL80211_BAND_5GHZ, .center_freq = 5220, .hw_value = 44, }, { .band = NL80211_BAND_5GHZ, .center_freq = 5240, .hw_value = 48, }, + { .band = NL80211_BAND_5GHZ, .center_freq = 5745, .hw_value = 149, }, + { .band = NL80211_BAND_5GHZ, .center_freq = 5765, .hw_value = 153, }, + { .band = NL80211_BAND_5GHZ, .center_freq = 5785, .hw_value = 157, }, + { .band = NL80211_BAND_5GHZ, .center_freq = 5805, .hw_value = 161, }, + { .band = NL80211_BAND_5GHZ, .center_freq = 5825, .hw_value = 165, }, }; static const struct ieee80211_rate mwl8k_rates_50[] = { diff --git a/drivers/net/wireless/mediatek/Kconfig b/drivers/net/wireless/mediatek/Kconfig index 28843fed750a..92ce4062f307 100644 --- a/drivers/net/wireless/mediatek/Kconfig +++ b/drivers/net/wireless/mediatek/Kconfig @@ -11,4 +11,5 @@ config WLAN_VENDOR_MEDIATEK if WLAN_VENDOR_MEDIATEK source "drivers/net/wireless/mediatek/mt7601u/Kconfig" +source "drivers/net/wireless/mediatek/mt76/Kconfig" endif # WLAN_VENDOR_MEDIATEK diff --git a/drivers/net/wireless/mediatek/Makefile b/drivers/net/wireless/mediatek/Makefile index 9d5f182fd7fd..00f945f59b38 100644 --- a/drivers/net/wireless/mediatek/Makefile +++ b/drivers/net/wireless/mediatek/Makefile @@ -1 +1,2 @@ obj-$(CONFIG_MT7601U) += mt7601u/ +obj-$(CONFIG_MT76_CORE) += mt76/ diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig new file mode 100644 index 000000000000..fc05d79c80d0 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/Kconfig @@ -0,0 +1,10 @@ +config MT76_CORE + tristate + +config MT76x2E + tristate "MediaTek MT76x2E (PCIe) support" + select MT76_CORE + depends on MAC80211 + depends on PCI + ---help--- + This adds support for MT7612/MT7602/MT7662-based wireless PCIe devices. diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile new file mode 100644 index 000000000000..a0156bc01dea --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/Makefile @@ -0,0 +1,15 @@ +obj-$(CONFIG_MT76_CORE) += mt76.o +obj-$(CONFIG_MT76x2E) += mt76x2e.o + +mt76-y := \ + mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o tx.o agg-rx.o + +CFLAGS_trace.o := -I$(src) + +mt76x2e-y := \ + mt76x2_pci.o mt76x2_dma.o \ + mt76x2_main.o mt76x2_init.o mt76x2_debugfs.o mt76x2_tx.o \ + mt76x2_core.o mt76x2_mac.o mt76x2_eeprom.o mt76x2_mcu.o mt76x2_phy.o \ + mt76x2_dfs.o mt76x2_trace.o + +CFLAGS_mt76x2_trace.o := -I$(src) diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c new file mode 100644 index 000000000000..8027bb7c03c2 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c @@ -0,0 +1,258 @@ +/* + * Copyright (C) 2018 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#include "mt76.h" + +#define REORDER_TIMEOUT (HZ / 10) + +static void +mt76_aggr_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames, int idx) +{ + struct sk_buff *skb; + + tid->head = ieee80211_sn_inc(tid->head); + + skb = tid->reorder_buf[idx]; + if (!skb) + return; + + tid->reorder_buf[idx] = NULL; + tid->nframes--; + __skb_queue_tail(frames, skb); +} + +static void +mt76_rx_aggr_release_frames(struct mt76_rx_tid *tid, struct sk_buff_head *frames, + u16 head) +{ + int idx; + + while (ieee80211_sn_less(tid->head, head)) { + idx = tid->head % tid->size; + mt76_aggr_release(tid, frames, idx); + } +} + +static void +mt76_rx_aggr_release_head(struct mt76_rx_tid *tid, struct sk_buff_head *frames) +{ + int idx = tid->head % tid->size; + + while (tid->reorder_buf[idx]) { + mt76_aggr_release(tid, frames, idx); + idx = tid->head % tid->size; + } +} + +static void +mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames) +{ + struct mt76_rx_status *status; + struct sk_buff *skb; + int start, idx, nframes; + + if (!tid->nframes) + return; + + mt76_rx_aggr_release_head(tid, frames); + + start = tid->head % tid->size; + nframes = tid->nframes; + + for (idx = (tid->head + 1) % tid->size; + idx != start && nframes; + idx = (idx + 1) % tid->size) { + + skb = tid->reorder_buf[idx]; + if (!skb) + continue; + + nframes--; + status = (struct mt76_rx_status *) skb->cb; + if (!time_after(jiffies, status->reorder_time + + REORDER_TIMEOUT)) + continue; + + mt76_rx_aggr_release_frames(tid, frames, status->seqno); + } + + mt76_rx_aggr_release_head(tid, frames); +} + +static void +mt76_rx_aggr_reorder_work(struct work_struct *work) +{ + struct mt76_rx_tid *tid = container_of(work, struct mt76_rx_tid, + reorder_work.work); + struct mt76_dev *dev = tid->dev; + struct sk_buff_head frames; + + __skb_queue_head_init(&frames); + + local_bh_disable(); + + spin_lock(&tid->lock); + mt76_rx_aggr_check_release(tid, &frames); + spin_unlock(&tid->lock); + + ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work, REORDER_TIMEOUT); + mt76_rx_complete(dev, &frames, -1); + + local_bh_enable(); +} + +void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames) +{ + struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb; + struct mt76_wcid *wcid = status->wcid; + struct ieee80211_sta *sta; + struct mt76_rx_tid *tid; + bool sn_less; + u16 seqno, head, size; + u8 idx; + + __skb_queue_tail(frames, skb); + + sta = wcid_to_sta(wcid); + if (!sta || !status->aggr) + return; + + tid = rcu_dereference(wcid->aggr[status->tid]); + if (!tid) + return; + + spin_lock_bh(&tid->lock); + + if (tid->stopped) + goto out; + + head = tid->head; + seqno = status->seqno; + size = tid->size; + sn_less = ieee80211_sn_less(seqno, head); + + if (!tid->started) { + if (sn_less) + goto out; + + tid->started = true; + } + + if (sn_less) { + __skb_unlink(skb, frames); + dev_kfree_skb(skb); + goto out; + } + + if (seqno == head) { + tid->head = ieee80211_sn_inc(head); + if (tid->nframes) + mt76_rx_aggr_release_head(tid, frames); + goto out; + } + + __skb_unlink(skb, frames); + + /* + * Frame sequence number exceeds buffering window, free up some space + * by releasing previous frames + */ + if (!ieee80211_sn_less(seqno, head + size)) { + head = ieee80211_sn_inc(ieee80211_sn_sub(seqno, size)); + mt76_rx_aggr_release_frames(tid, frames, head); + } + + idx = seqno % size; + + /* Discard if the current slot is already in use */ + if (tid->reorder_buf[idx]) { + dev_kfree_skb(skb); + goto out; + } + + status->reorder_time = jiffies; + tid->reorder_buf[idx] = skb; + tid->nframes++; + mt76_rx_aggr_release_head(tid, frames); + + ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work, REORDER_TIMEOUT); + +out: + spin_unlock_bh(&tid->lock); +} + +int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno, + u16 ssn, u8 size) +{ + struct mt76_rx_tid *tid; + + mt76_rx_aggr_stop(dev, wcid, tidno); + + tid = kzalloc(sizeof(*tid) + size * sizeof(tid->reorder_buf[0]), + GFP_KERNEL); + if (!tid) + return -ENOMEM; + + tid->dev = dev; + tid->head = ssn; + tid->size = size; + INIT_DELAYED_WORK(&tid->reorder_work, mt76_rx_aggr_reorder_work); + spin_lock_init(&tid->lock); + + rcu_assign_pointer(wcid->aggr[tidno], tid); + + return 0; +} +EXPORT_SYMBOL_GPL(mt76_rx_aggr_start); + +static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid) +{ + u8 size = tid->size; + int i; + + spin_lock_bh(&tid->lock); + + tid->stopped = true; + for (i = 0; tid->nframes && i < size; i++) { + struct sk_buff *skb = tid->reorder_buf[i]; + + if (!skb) + continue; + + tid->nframes--; + dev_kfree_skb(skb); + } + + spin_unlock_bh(&tid->lock); + + cancel_delayed_work_sync(&tid->reorder_work); +} + +void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno) +{ + struct mt76_rx_tid *tid; + + rcu_read_lock(); + + tid = rcu_dereference(wcid->aggr[tidno]); + if (tid) { + rcu_assign_pointer(wcid->aggr[tidno], NULL); + mt76_rx_aggr_shutdown(dev, tid); + kfree_rcu(tid, rcu_head); + } + + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(mt76_rx_aggr_stop); diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c new file mode 100644 index 000000000000..c121b502a462 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/debugfs.c @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#include "mt76.h" + +static int +mt76_reg_set(void *data, u64 val) +{ + struct mt76_dev *dev = data; + + dev->bus->wr(dev, dev->debugfs_reg, val); + return 0; +} + +static int +mt76_reg_get(void *data, u64 *val) +{ + struct mt76_dev *dev = data; + + *val = dev->bus->rr(dev, dev->debugfs_reg); + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, + "0x%08llx\n"); + +static int +mt76_queues_read(struct seq_file *s, void *data) +{ + struct mt76_dev *dev = dev_get_drvdata(s->private); + int i; + + for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) { + struct mt76_queue *q = &dev->q_tx[i]; + + if (!q->ndesc) + continue; + + seq_printf(s, + "%d: queued=%d head=%d tail=%d swq_queued=%d\n", + i, q->queued, q->head, q->tail, q->swq_queued); + } + + return 0; +} + +struct dentry *mt76_register_debugfs(struct mt76_dev *dev) +{ + struct dentry *dir; + + dir = debugfs_create_dir("mt76", dev->hw->wiphy->debugfsdir); + if (!dir) + return NULL; + + debugfs_create_u8("led_pin", S_IRUSR | S_IWUSR, dir, &dev->led_pin); + debugfs_create_u32("regidx", S_IRUSR | S_IWUSR, dir, &dev->debugfs_reg); + debugfs_create_file_unsafe("regval", S_IRUSR | S_IWUSR, dir, dev, + &fops_regval); + debugfs_create_blob("eeprom", S_IRUSR, dir, &dev->eeprom); + if (dev->otp.data) + debugfs_create_blob("otp", S_IRUSR, dir, &dev->otp); + debugfs_create_devm_seqfile(dev->dev, "queues", dir, mt76_queues_read); + + return dir; +} +EXPORT_SYMBOL_GPL(mt76_register_debugfs); diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c new file mode 100644 index 000000000000..3518703524e7 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -0,0 +1,459 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/dma-mapping.h> +#include "mt76.h" +#include "dma.h" + +#define DMA_DUMMY_TXWI ((void *) ~0) + +static int +mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q) +{ + int size; + int i; + + spin_lock_init(&q->lock); + INIT_LIST_HEAD(&q->swq); + + size = q->ndesc * sizeof(struct mt76_desc); + q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL); + if (!q->desc) + return -ENOMEM; + + size = q->ndesc * sizeof(*q->entry); + q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); + if (!q->entry) + return -ENOMEM; + + /* clear descriptors */ + for (i = 0; i < q->ndesc; i++) + q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); + + iowrite32(q->desc_dma, &q->regs->desc_base); + iowrite32(0, &q->regs->cpu_idx); + iowrite32(0, &q->regs->dma_idx); + iowrite32(q->ndesc, &q->regs->ring_size); + + return 0; +} + +static int +mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, + struct mt76_queue_buf *buf, int nbufs, u32 info, + struct sk_buff *skb, void *txwi) +{ + struct mt76_desc *desc; + u32 ctrl; + int i, idx = -1; + + if (txwi) + q->entry[q->head].txwi = DMA_DUMMY_TXWI; + + for (i = 0; i < nbufs; i += 2, buf += 2) { + u32 buf0 = buf[0].addr, buf1 = 0; + + ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); + if (i < nbufs - 1) { + buf1 = buf[1].addr; + ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len); + } + + if (i == nbufs - 1) + ctrl |= MT_DMA_CTL_LAST_SEC0; + else if (i == nbufs - 2) + ctrl |= MT_DMA_CTL_LAST_SEC1; + + idx = q->head; + q->head = (q->head + 1) % q->ndesc; + + desc = &q->desc[idx]; + + WRITE_ONCE(desc->buf0, cpu_to_le32(buf0)); + WRITE_ONCE(desc->buf1, cpu_to_le32(buf1)); + WRITE_ONCE(desc->info, cpu_to_le32(info)); + WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); + + q->queued++; + } + + q->entry[idx].txwi = txwi; + q->entry[idx].skb = skb; + + return idx; +} + +static void +mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, + struct mt76_queue_entry *prev_e) +{ + struct mt76_queue_entry *e = &q->entry[idx]; + __le32 __ctrl = READ_ONCE(q->desc[idx].ctrl); + u32 ctrl = le32_to_cpu(__ctrl); + + if (!e->txwi || !e->skb) { + __le32 addr = READ_ONCE(q->desc[idx].buf0); + u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl); + + dma_unmap_single(dev->dev, le32_to_cpu(addr), len, + DMA_TO_DEVICE); + } + + if (!(ctrl & MT_DMA_CTL_LAST_SEC0)) { + __le32 addr = READ_ONCE(q->desc[idx].buf1); + u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl); + + dma_unmap_single(dev->dev, le32_to_cpu(addr), len, + DMA_TO_DEVICE); + } + + if (e->txwi == DMA_DUMMY_TXWI) + e->txwi = NULL; + + *prev_e = *e; + memset(e, 0, sizeof(*e)); +} + +static void +mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) +{ + q->head = ioread32(&q->regs->dma_idx); + q->tail = q->head; + iowrite32(q->head, &q->regs->cpu_idx); +} + +static void +mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush) +{ + struct mt76_queue *q = &dev->q_tx[qid]; + struct mt76_queue_entry entry; + bool wake = false; + int last; + + if (!q->ndesc) + return; + + spin_lock_bh(&q->lock); + if (flush) + last = -1; + else + last = ioread32(&q->regs->dma_idx); + + while (q->queued && q->tail != last) { + mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); + if (entry.schedule) + q->swq_queued--; + + if (entry.skb) + dev->drv->tx_complete_skb(dev, q, &entry, flush); + + if (entry.txwi) { + mt76_put_txwi(dev, entry.txwi); + wake = true; + } + + q->tail = (q->tail + 1) % q->ndesc; + q->queued--; + + if (!flush && q->tail == last) + last = ioread32(&q->regs->dma_idx); + } + + if (!flush) + mt76_txq_schedule(dev, q); + else + mt76_dma_sync_idx(dev, q); + + wake = wake && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; + spin_unlock_bh(&q->lock); + + if (wake) + ieee80211_wake_queue(dev->hw, qid); +} + +static void * +mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, + int *len, u32 *info, bool *more) +{ + struct mt76_queue_entry *e = &q->entry[idx]; + struct mt76_desc *desc = &q->desc[idx]; + dma_addr_t buf_addr; + void *buf = e->buf; + int buf_len = SKB_WITH_OVERHEAD(q->buf_size); + + buf_addr = le32_to_cpu(READ_ONCE(desc->buf0)); + if (len) { + u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl)); + *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl); + *more = !(ctl & MT_DMA_CTL_LAST_SEC0); + } + + if (info) + *info = le32_to_cpu(desc->info); + + dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE); + e->buf = NULL; + + return buf; +} + +static void * +mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, + int *len, u32 *info, bool *more) +{ + int idx = q->tail; + + *more = false; + if (!q->queued) + return NULL; + + if (!flush && !(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) + return NULL; + + q->tail = (q->tail + 1) % q->ndesc; + q->queued--; + + return mt76_dma_get_buf(dev, q, idx, len, info, more); +} + +static void +mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) +{ + iowrite32(q->head, &q->regs->cpu_idx); +} + +static int +mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi) +{ + dma_addr_t addr; + void *buf; + int frames = 0; + int len = SKB_WITH_OVERHEAD(q->buf_size); + int offset = q->buf_offset; + int idx; + void *(*alloc)(unsigned int fragsz); + + if (napi) + alloc = napi_alloc_frag; + else + alloc = netdev_alloc_frag; + + spin_lock_bh(&q->lock); + + while (q->queued < q->ndesc - 1) { + struct mt76_queue_buf qbuf; + + buf = alloc(q->buf_size); + if (!buf) + break; + + addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE); + if (dma_mapping_error(dev->dev, addr)) { + skb_free_frag(buf); + break; + } + + qbuf.addr = addr + offset; + qbuf.len = len - offset; + idx = mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL); + frames++; + } + + if (frames) + mt76_dma_kick_queue(dev, q); + + spin_unlock_bh(&q->lock); + + return frames; +} + +static void +mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) +{ + void *buf; + bool more; + + spin_lock_bh(&q->lock); + do { + buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more); + if (!buf) + break; + + skb_free_frag(buf); + } while (1); + spin_unlock_bh(&q->lock); +} + +static void +mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid) +{ + struct mt76_queue *q = &dev->q_rx[qid]; + int i; + + for (i = 0; i < q->ndesc; i++) + q->desc[i].ctrl &= ~cpu_to_le32(MT_DMA_CTL_DMA_DONE); + + mt76_dma_rx_cleanup(dev, q); + mt76_dma_sync_idx(dev, q); + mt76_dma_rx_fill(dev, q, false); +} + +static void +mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, + int len, bool more) +{ + struct page *page = virt_to_head_page(data); + int offset = data - page_address(page); + struct sk_buff *skb = q->rx_head; + + offset += q->buf_offset; + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, len, + q->buf_size); + + if (more) + return; + + q->rx_head = NULL; + dev->drv->rx_skb(dev, q - dev->q_rx, skb); +} + +static int +mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) +{ + struct sk_buff *skb; + unsigned char *data; + int len; + int done = 0; + bool more; + + while (done < budget) { + u32 info; + + data = mt76_dma_dequeue(dev, q, false, &len, &info, &more); + if (!data) + break; + + if (q->rx_head) { + mt76_add_fragment(dev, q, data, len, more); + continue; + } + + skb = build_skb(data, q->buf_size); + if (!skb) { + skb_free_frag(data); + continue; + } + + skb_reserve(skb, q->buf_offset); + if (skb->tail + len > skb->end) { + dev_kfree_skb(skb); + continue; + } + + if (q == &dev->q_rx[MT_RXQ_MCU]) { + u32 *rxfce = (u32 *) skb->cb; + *rxfce = info; + } + + __skb_put(skb, len); + done++; + + if (more) { + q->rx_head = skb; + continue; + } + + dev->drv->rx_skb(dev, q - dev->q_rx, skb); + } + + mt76_dma_rx_fill(dev, q, true); + return done; +} + +static int +mt76_dma_rx_poll(struct napi_struct *napi, int budget) +{ + struct mt76_dev *dev; + int qid, done = 0, cur; + + dev = container_of(napi->dev, struct mt76_dev, napi_dev); + qid = napi - dev->napi; + + rcu_read_lock(); + + do { + cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done); + mt76_rx_poll_complete(dev, qid); + done += cur; + } while (cur && done < budget); + + rcu_read_unlock(); + + if (done < budget) { + napi_complete(napi); + dev->drv->rx_poll_complete(dev, qid); + } + + return done; +} + +static int +mt76_dma_init(struct mt76_dev *dev) +{ + int i; + + init_dummy_netdev(&dev->napi_dev); + + for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) { + netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll, + 64); + mt76_dma_rx_fill(dev, &dev->q_rx[i], false); + skb_queue_head_init(&dev->rx_skb[i]); + napi_enable(&dev->napi[i]); + } + + return 0; +} + +static const struct mt76_queue_ops mt76_dma_ops = { + .init = mt76_dma_init, + .alloc = mt76_dma_alloc_queue, + .add_buf = mt76_dma_add_buf, + .tx_cleanup = mt76_dma_tx_cleanup, + .rx_reset = mt76_dma_rx_reset, + .kick = mt76_dma_kick_queue, +}; + +int mt76_dma_attach(struct mt76_dev *dev) +{ + dev->queue_ops = &mt76_dma_ops; + return 0; +} +EXPORT_SYMBOL_GPL(mt76_dma_attach); + +void mt76_dma_cleanup(struct mt76_dev *dev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) + mt76_dma_tx_cleanup(dev, i, true); + + for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) { + netif_napi_del(&dev->napi[i]); + mt76_dma_rx_cleanup(dev, &dev->q_rx[i]); + } +} +EXPORT_SYMBOL_GPL(mt76_dma_cleanup); diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h new file mode 100644 index 000000000000..1dad39697929 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/dma.h @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef __MT76_DMA_H +#define __MT76_DMA_H + +#define MT_RING_SIZE 0x10 + +#define MT_DMA_CTL_SD_LEN1 GENMASK(13, 0) +#define MT_DMA_CTL_LAST_SEC1 BIT(14) +#define MT_DMA_CTL_BURST BIT(15) +#define MT_DMA_CTL_SD_LEN0 GENMASK(29, 16) +#define MT_DMA_CTL_LAST_SEC0 BIT(30) +#define MT_DMA_CTL_DMA_DONE BIT(31) + +struct mt76_desc { + __le32 buf0; + __le32 ctrl; + __le32 buf1; + __le32 info; +} __packed __aligned(4); + +int mt76_dma_attach(struct mt76_dev *dev); +void mt76_dma_cleanup(struct mt76_dev *dev); + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c new file mode 100644 index 000000000000..530e5593765c --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/eeprom.c @@ -0,0 +1,112 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#include <linux/of.h> +#include <linux/of_net.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/partitions.h> +#include <linux/etherdevice.h> +#include "mt76.h" + +static int +mt76_get_of_eeprom(struct mt76_dev *dev, int len) +{ +#if defined(CONFIG_OF) && defined(CONFIG_MTD) + struct device_node *np = dev->dev->of_node; + struct mtd_info *mtd; + const __be32 *list; + const char *part; + phandle phandle; + int offset = 0; + int size; + size_t retlen; + int ret; + + if (!np) + return -ENOENT; + + list = of_get_property(np, "mediatek,mtd-eeprom", &size); + if (!list) + return -ENOENT; + + phandle = be32_to_cpup(list++); + if (!phandle) + return -ENOENT; + + np = of_find_node_by_phandle(phandle); + if (!np) + return -EINVAL; + + part = of_get_property(np, "label", NULL); + if (!part) + part = np->name; + + mtd = get_mtd_device_nm(part); + if (IS_ERR(mtd)) + return PTR_ERR(mtd); + + if (size <= sizeof(*list)) + return -EINVAL; + + offset = be32_to_cpup(list); + ret = mtd_read(mtd, offset, len, &retlen, dev->eeprom.data); + put_mtd_device(mtd); + if (ret) + return ret; + + if (retlen < len) + return -EINVAL; + + return 0; +#else + return -ENOENT; +#endif +} + +void +mt76_eeprom_override(struct mt76_dev *dev) +{ +#ifdef CONFIG_OF + struct device_node *np = dev->dev->of_node; + const u8 *mac; + + if (!np) + return; + + mac = of_get_mac_address(np); + if (mac) + memcpy(dev->macaddr, mac, ETH_ALEN); +#endif + + if (!is_valid_ether_addr(dev->macaddr)) { + eth_random_addr(dev->macaddr); + dev_info(dev->dev, + "Invalid MAC address, using random address %pM\n", + dev->macaddr); + } +} +EXPORT_SYMBOL_GPL(mt76_eeprom_override); + +int +mt76_eeprom_init(struct mt76_dev *dev, int len) +{ + dev->eeprom.size = len; + dev->eeprom.data = devm_kzalloc(dev->dev, len, GFP_KERNEL); + if (!dev->eeprom.data) + return -ENOMEM; + + return !mt76_get_of_eeprom(dev, len); +} +EXPORT_SYMBOL_GPL(mt76_eeprom_init); diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c new file mode 100644 index 000000000000..5fcb2deb89a2 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -0,0 +1,505 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#include <linux/of.h> +#include "mt76.h" + +#define CHAN2G(_idx, _freq) { \ + .band = NL80211_BAND_2GHZ, \ + .center_freq = (_freq), \ + .hw_value = (_idx), \ + .max_power = 30, \ +} + +#define CHAN5G(_idx, _freq) { \ + .band = NL80211_BAND_5GHZ, \ + .center_freq = (_freq), \ + .hw_value = (_idx), \ + .max_power = 30, \ +} + +static const struct ieee80211_channel mt76_channels_2ghz[] = { + CHAN2G(1, 2412), + CHAN2G(2, 2417), + CHAN2G(3, 2422), + CHAN2G(4, 2427), + CHAN2G(5, 2432), + CHAN2G(6, 2437), + CHAN2G(7, 2442), + CHAN2G(8, 2447), + CHAN2G(9, 2452), + CHAN2G(10, 2457), + CHAN2G(11, 2462), + CHAN2G(12, 2467), + CHAN2G(13, 2472), + CHAN2G(14, 2484), +}; + +static const struct ieee80211_channel mt76_channels_5ghz[] = { + CHAN5G(36, 5180), + CHAN5G(40, 5200), + CHAN5G(44, 5220), + CHAN5G(48, 5240), + + CHAN5G(52, 5260), + CHAN5G(56, 5280), + CHAN5G(60, 5300), + CHAN5G(64, 5320), + + CHAN5G(100, 5500), + CHAN5G(104, 5520), + CHAN5G(108, 5540), + CHAN5G(112, 5560), + CHAN5G(116, 5580), + CHAN5G(120, 5600), + CHAN5G(124, 5620), + CHAN5G(128, 5640), + CHAN5G(132, 5660), + CHAN5G(136, 5680), + CHAN5G(140, 5700), + + CHAN5G(149, 5745), + CHAN5G(153, 5765), + CHAN5G(157, 5785), + CHAN5G(161, 5805), + CHAN5G(165, 5825), +}; + +static const struct ieee80211_tpt_blink mt76_tpt_blink[] = { + { .throughput = 0 * 1024, .blink_time = 334 }, + { .throughput = 1 * 1024, .blink_time = 260 }, + { .throughput = 5 * 1024, .blink_time = 220 }, + { .throughput = 10 * 1024, .blink_time = 190 }, + { .throughput = 20 * 1024, .blink_time = 170 }, + { .throughput = 50 * 1024, .blink_time = 150 }, + { .throughput = 70 * 1024, .blink_time = 130 }, + { .throughput = 100 * 1024, .blink_time = 110 }, + { .throughput = 200 * 1024, .blink_time = 80 }, + { .throughput = 300 * 1024, .blink_time = 50 }, +}; + +static int mt76_led_init(struct mt76_dev *dev) +{ + struct device_node *np = dev->dev->of_node; + struct ieee80211_hw *hw = dev->hw; + int led_pin; + + if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set) + return 0; + + snprintf(dev->led_name, sizeof(dev->led_name), + "mt76-%s", wiphy_name(hw->wiphy)); + + dev->led_cdev.name = dev->led_name; + dev->led_cdev.default_trigger = + ieee80211_create_tpt_led_trigger(hw, + IEEE80211_TPT_LEDTRIG_FL_RADIO, + mt76_tpt_blink, + ARRAY_SIZE(mt76_tpt_blink)); + + np = of_get_child_by_name(np, "led"); + if (np) { + if (!of_property_read_u32(np, "led-sources", &led_pin)) + dev->led_pin = led_pin; + dev->led_al = of_property_read_bool(np, "led-active-low"); + } + + return devm_led_classdev_register(dev->dev, &dev->led_cdev); +} + +static int +mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband, + const struct ieee80211_channel *chan, int n_chan, + struct ieee80211_rate *rates, int n_rates, bool vht) +{ + struct ieee80211_supported_band *sband = &msband->sband; + struct ieee80211_sta_ht_cap *ht_cap; + struct ieee80211_sta_vht_cap *vht_cap; + void *chanlist; + u16 mcs_map; + int size; + + size = n_chan * sizeof(*chan); + chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL); + if (!chanlist) + return -ENOMEM; + + msband->chan = devm_kzalloc(dev->dev, n_chan * sizeof(*msband->chan), + GFP_KERNEL); + if (!msband->chan) + return -ENOMEM; + + sband->channels = chanlist; + sband->n_channels = n_chan; + sband->bitrates = rates; + sband->n_bitrates = n_rates; + dev->chandef.chan = &sband->channels[0]; + + ht_cap = &sband->ht_cap; + ht_cap->ht_supported = true; + ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 | + IEEE80211_HT_CAP_GRN_FLD | + IEEE80211_HT_CAP_SGI_20 | + IEEE80211_HT_CAP_SGI_40 | + IEEE80211_HT_CAP_TX_STBC | + (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); + + ht_cap->mcs.rx_mask[0] = 0xff; + ht_cap->mcs.rx_mask[1] = 0xff; + ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; + ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; + ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4; + + if (!vht) + return 0; + + vht_cap = &sband->vht_cap; + vht_cap->vht_supported = true; + + mcs_map = (IEEE80211_VHT_MCS_SUPPORT_0_9 << (0 * 2)) | + (IEEE80211_VHT_MCS_SUPPORT_0_9 << (1 * 2)) | + (IEEE80211_VHT_MCS_NOT_SUPPORTED << (2 * 2)) | + (IEEE80211_VHT_MCS_NOT_SUPPORTED << (3 * 2)) | + (IEEE80211_VHT_MCS_NOT_SUPPORTED << (4 * 2)) | + (IEEE80211_VHT_MCS_NOT_SUPPORTED << (5 * 2)) | + (IEEE80211_VHT_MCS_NOT_SUPPORTED << (6 * 2)) | + (IEEE80211_VHT_MCS_NOT_SUPPORTED << (7 * 2)); + + vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); + vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); + vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC | + IEEE80211_VHT_CAP_TXSTBC | + IEEE80211_VHT_CAP_RXSTBC_1 | + IEEE80211_VHT_CAP_SHORT_GI_80; + + return 0; +} + +static int +mt76_init_sband_2g(struct mt76_dev *dev, struct ieee80211_rate *rates, + int n_rates) +{ + dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = &dev->sband_2g.sband; + + return mt76_init_sband(dev, &dev->sband_2g, + mt76_channels_2ghz, + ARRAY_SIZE(mt76_channels_2ghz), + rates, n_rates, false); +} + +static int +mt76_init_sband_5g(struct mt76_dev *dev, struct ieee80211_rate *rates, + int n_rates, bool vht) +{ + dev->hw->wiphy->bands[NL80211_BAND_5GHZ] = &dev->sband_5g.sband; + + return mt76_init_sband(dev, &dev->sband_5g, + mt76_channels_5ghz, + ARRAY_SIZE(mt76_channels_5ghz), + rates, n_rates, vht); +} + +static void +mt76_check_sband(struct mt76_dev *dev, int band) +{ + struct ieee80211_supported_band *sband = dev->hw->wiphy->bands[band]; + bool found = false; + int i; + + if (!sband) + return; + + for (i = 0; i < sband->n_channels; i++) { + if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED) + continue; + + found = true; + break; + } + + if (found) + return; + + sband->n_channels = 0; + dev->hw->wiphy->bands[band] = NULL; +} + +int mt76_register_device(struct mt76_dev *dev, bool vht, + struct ieee80211_rate *rates, int n_rates) +{ + struct ieee80211_hw *hw = dev->hw; + struct wiphy *wiphy = hw->wiphy; + int ret; + + dev_set_drvdata(dev->dev, dev); + + spin_lock_init(&dev->lock); + spin_lock_init(&dev->cc_lock); + INIT_LIST_HEAD(&dev->txwi_cache); + + SET_IEEE80211_DEV(hw, dev->dev); + SET_IEEE80211_PERM_ADDR(hw, dev->macaddr); + + wiphy->interface_modes = + BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP) | +#ifdef CONFIG_MAC80211_MESH + BIT(NL80211_IFTYPE_MESH_POINT) | +#endif + BIT(NL80211_IFTYPE_ADHOC); + + wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; + + hw->txq_data_size = sizeof(struct mt76_txq); + hw->max_tx_fragments = 16; + + ieee80211_hw_set(hw, SIGNAL_DBM); + ieee80211_hw_set(hw, PS_NULLFUNC_STACK); + ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING); + ieee80211_hw_set(hw, AMPDU_AGGREGATION); + ieee80211_hw_set(hw, SUPPORTS_RC_TABLE); + ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); + ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); + ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); + ieee80211_hw_set(hw, TX_AMSDU); + ieee80211_hw_set(hw, TX_FRAG_LIST); + ieee80211_hw_set(hw, MFP_CAPABLE); + + wiphy->flags |= WIPHY_FLAG_IBSS_RSN; + + if (dev->cap.has_2ghz) { + ret = mt76_init_sband_2g(dev, rates, n_rates); + if (ret) + return ret; + } + + if (dev->cap.has_5ghz) { + ret = mt76_init_sband_5g(dev, rates + 4, n_rates - 4, vht); + if (ret) + return ret; + } + + wiphy_read_of_freq_limits(dev->hw->wiphy); + mt76_check_sband(dev, NL80211_BAND_2GHZ); + mt76_check_sband(dev, NL80211_BAND_5GHZ); + + ret = mt76_led_init(dev); + if (ret) + return ret; + + return ieee80211_register_hw(hw); +} +EXPORT_SYMBOL_GPL(mt76_register_device); + +void mt76_unregister_device(struct mt76_dev *dev) +{ + struct ieee80211_hw *hw = dev->hw; + + ieee80211_unregister_hw(hw); + mt76_tx_free(dev); +} +EXPORT_SYMBOL_GPL(mt76_unregister_device); + +void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb) +{ + if (!test_bit(MT76_STATE_RUNNING, &dev->state)) { + dev_kfree_skb(skb); + return; + } + + __skb_queue_tail(&dev->rx_skb[q], skb); +} +EXPORT_SYMBOL_GPL(mt76_rx); + +void mt76_set_channel(struct mt76_dev *dev) +{ + struct ieee80211_hw *hw = dev->hw; + struct cfg80211_chan_def *chandef = &hw->conf.chandef; + struct mt76_channel_state *state; + bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL; + + if (dev->drv->update_survey) + dev->drv->update_survey(dev); + + dev->chandef = *chandef; + + if (!offchannel) + dev->main_chan = chandef->chan; + + if (chandef->chan != dev->main_chan) { + state = mt76_channel_state(dev, chandef->chan); + memset(state, 0, sizeof(*state)); + } +} +EXPORT_SYMBOL_GPL(mt76_set_channel); + +int mt76_get_survey(struct ieee80211_hw *hw, int idx, + struct survey_info *survey) +{ + struct mt76_dev *dev = hw->priv; + struct mt76_sband *sband; + struct ieee80211_channel *chan; + struct mt76_channel_state *state; + int ret = 0; + + if (idx == 0 && dev->drv->update_survey) + dev->drv->update_survey(dev); + + sband = &dev->sband_2g; + if (idx >= sband->sband.n_channels) { + idx -= sband->sband.n_channels; + sband = &dev->sband_5g; + } + + if (idx >= sband->sband.n_channels) + return -ENOENT; + + chan = &sband->sband.channels[idx]; + state = mt76_channel_state(dev, chan); + + memset(survey, 0, sizeof(*survey)); + survey->channel = chan; + survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY; + if (chan == dev->main_chan) + survey->filled |= SURVEY_INFO_IN_USE; + + spin_lock_bh(&dev->cc_lock); + survey->time = div_u64(state->cc_active, 1000); + survey->time_busy = div_u64(state->cc_busy, 1000); + spin_unlock_bh(&dev->cc_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(mt76_get_survey); + +void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, + struct ieee80211_key_conf *key) +{ + struct ieee80211_key_seq seq; + int i; + + wcid->rx_check_pn = false; + + if (!key) + return; + + if (key->cipher == WLAN_CIPHER_SUITE_CCMP) + wcid->rx_check_pn = true; + + for (i = 0; i < IEEE80211_NUM_TIDS; i++) { + ieee80211_get_key_rx_seq(key, i, &seq); + memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn)); + } +} +EXPORT_SYMBOL(mt76_wcid_key_setup); + +static struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb) +{ + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct mt76_rx_status mstat; + + mstat = *((struct mt76_rx_status *) skb->cb); + memset(status, 0, sizeof(*status)); + + status->flag = mstat.flag; + status->freq = mstat.freq; + status->enc_flags = mstat.enc_flags; + status->encoding = mstat.encoding; + status->bw = mstat.bw; + status->rate_idx = mstat.rate_idx; + status->nss = mstat.nss; + status->band = mstat.band; + status->signal = mstat.signal; + status->chains = mstat.chains; + + BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb)); + BUILD_BUG_ON(sizeof(status->chain_signal) != sizeof(mstat.chain_signal)); + memcpy(status->chain_signal, mstat.chain_signal, sizeof(mstat.chain_signal)); + + return wcid_to_sta(mstat.wcid); +} + +static int +mt76_check_ccmp_pn(struct sk_buff *skb) +{ + struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb; + struct mt76_wcid *wcid = status->wcid; + struct ieee80211_hdr *hdr; + int ret; + + if (!(status->flag & RX_FLAG_DECRYPTED)) + return 0; + + if (!wcid || !wcid->rx_check_pn) + return 0; + + if (!(status->flag & RX_FLAG_IV_STRIPPED)) { + /* + * Validate the first fragment both here and in mac80211 + * All further fragments will be validated by mac80211 only. + */ + hdr = (struct ieee80211_hdr *) skb->data; + if (ieee80211_is_frag(hdr) && + !ieee80211_is_first_frag(hdr->frame_control)) + return 0; + } + + BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0])); + ret = memcmp(status->iv, wcid->rx_key_pn[status->tid], + sizeof(status->iv)); + if (ret <= 0) + return -EINVAL; /* replay */ + + memcpy(wcid->rx_key_pn[status->tid], status->iv, sizeof(status->iv)); + + if (status->flag & RX_FLAG_IV_STRIPPED) + status->flag |= RX_FLAG_PN_VALIDATED; + + return 0; +} + +void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, + int queue) +{ + struct napi_struct *napi = NULL; + struct ieee80211_sta *sta; + struct sk_buff *skb; + + if (queue >= 0) + napi = &dev->napi[queue]; + + while ((skb = __skb_dequeue(frames)) != NULL) { + if (mt76_check_ccmp_pn(skb)) { + dev_kfree_skb(skb); + continue; + } + + sta = mt76_rx_convert(skb); + ieee80211_rx_napi(dev->hw, sta, skb, napi); + } +} + +void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q) +{ + struct sk_buff_head frames; + struct sk_buff *skb; + + __skb_queue_head_init(&frames); + + while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) + mt76_rx_aggr_reorder(skb, &frames); + + mt76_rx_complete(dev, &frames, q); +} diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c new file mode 100644 index 000000000000..09a14dead6e3 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mmio.c @@ -0,0 +1,61 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "mt76.h" +#include "trace.h" + +static u32 mt76_mmio_rr(struct mt76_dev *dev, u32 offset) +{ + u32 val; + + val = ioread32(dev->regs + offset); + trace_reg_rr(dev, offset, val); + + return val; +} + +static void mt76_mmio_wr(struct mt76_dev *dev, u32 offset, u32 val) +{ + trace_reg_wr(dev, offset, val); + iowrite32(val, dev->regs + offset); +} + +static u32 mt76_mmio_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val) +{ + val |= mt76_mmio_rr(dev, offset) & ~mask; + mt76_mmio_wr(dev, offset, val); + return val; +} + +static void mt76_mmio_copy(struct mt76_dev *dev, u32 offset, const void *data, + int len) +{ + __iowrite32_copy(dev->regs + offset, data, len >> 2); +} + +void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs) +{ + static const struct mt76_bus_ops mt76_mmio_ops = { + .rr = mt76_mmio_rr, + .rmw = mt76_mmio_rmw, + .wr = mt76_mmio_wr, + .copy = mt76_mmio_copy, + }; + + dev->bus = &mt76_mmio_ops; + dev->regs = regs; +} +EXPORT_SYMBOL_GPL(mt76_mmio_init); diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h new file mode 100644 index 000000000000..129015c9d116 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -0,0 +1,432 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __MT76_H +#define __MT76_H + +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/spinlock.h> +#include <linux/skbuff.h> +#include <linux/leds.h> +#include <net/mac80211.h> +#include "util.h" + +#define MT_TX_RING_SIZE 256 +#define MT_MCU_RING_SIZE 32 +#define MT_RX_BUF_SIZE 2048 + +struct mt76_dev; + +struct mt76_bus_ops { + u32 (*rr)(struct mt76_dev *dev, u32 offset); + void (*wr)(struct mt76_dev *dev, u32 offset, u32 val); + u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val); + void (*copy)(struct mt76_dev *dev, u32 offset, const void *data, + int len); +}; + +enum mt76_txq_id { + MT_TXQ_VO = IEEE80211_AC_VO, + MT_TXQ_VI = IEEE80211_AC_VI, + MT_TXQ_BE = IEEE80211_AC_BE, + MT_TXQ_BK = IEEE80211_AC_BK, + MT_TXQ_PSD, + MT_TXQ_MCU, + MT_TXQ_BEACON, + MT_TXQ_CAB, + __MT_TXQ_MAX +}; + +enum mt76_rxq_id { + MT_RXQ_MAIN, + MT_RXQ_MCU, + __MT_RXQ_MAX +}; + +struct mt76_queue_buf { + dma_addr_t addr; + int len; +}; + +struct mt76_queue_entry { + union { + void *buf; + struct sk_buff *skb; + }; + struct mt76_txwi_cache *txwi; + bool schedule; +}; + +struct mt76_queue_regs { + u32 desc_base; + u32 ring_size; + u32 cpu_idx; + u32 dma_idx; +} __packed __aligned(4); + +struct mt76_queue { + struct mt76_queue_regs __iomem *regs; + + spinlock_t lock; + struct mt76_queue_entry *entry; + struct mt76_desc *desc; + + struct list_head swq; + int swq_queued; + + u16 head; + u16 tail; + int ndesc; + int queued; + int buf_size; + + u8 buf_offset; + u8 hw_idx; + + dma_addr_t desc_dma; + struct sk_buff *rx_head; +}; + +struct mt76_queue_ops { + int (*init)(struct mt76_dev *dev); + + int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q); + + int (*add_buf)(struct mt76_dev *dev, struct mt76_queue *q, + struct mt76_queue_buf *buf, int nbufs, u32 info, + struct sk_buff *skb, void *txwi); + + void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush, + int *len, u32 *info, bool *more); + + void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid); + + void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid, + bool flush); + + void (*kick)(struct mt76_dev *dev, struct mt76_queue *q); +}; + +struct mt76_wcid { + struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS]; + + struct work_struct aggr_work; + + u8 idx; + u8 hw_key_idx; + + u8 sta:1; + + u8 rx_check_pn; + u8 rx_key_pn[IEEE80211_NUM_TIDS][6]; + + __le16 tx_rate; + bool tx_rate_set; + u8 tx_rate_nss; + s8 max_txpwr_adj; + bool sw_iv; +}; + +struct mt76_txq { + struct list_head list; + struct mt76_queue *hwq; + struct mt76_wcid *wcid; + + struct sk_buff_head retry_q; + + u16 agg_ssn; + bool send_bar; + bool aggr; +}; + +struct mt76_txwi_cache { + u32 txwi[8]; + dma_addr_t dma_addr; + struct list_head list; +}; + + +struct mt76_rx_tid { + struct rcu_head rcu_head; + + struct mt76_dev *dev; + + spinlock_t lock; + struct delayed_work reorder_work; + + u16 head; + u8 size; + u8 nframes; + + u8 started:1, stopped:1, timer_pending:1; + + struct sk_buff *reorder_buf[]; +}; + +enum { + MT76_STATE_INITIALIZED, + MT76_STATE_RUNNING, + MT76_SCANNING, + MT76_RESET, +}; + +struct mt76_hw_cap { + bool has_2ghz; + bool has_5ghz; +}; + +struct mt76_driver_ops { + u16 txwi_size; + + void (*update_survey)(struct mt76_dev *dev); + + int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr, + struct sk_buff *skb, struct mt76_queue *q, + struct mt76_wcid *wcid, + struct ieee80211_sta *sta, u32 *tx_info); + + void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q, + struct mt76_queue_entry *e, bool flush); + + void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q, + struct sk_buff *skb); + + void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q); +}; + +struct mt76_channel_state { + u64 cc_active; + u64 cc_busy; +}; + +struct mt76_sband { + struct ieee80211_supported_band sband; + struct mt76_channel_state *chan; +}; + +struct mt76_dev { + struct ieee80211_hw *hw; + struct cfg80211_chan_def chandef; + struct ieee80211_channel *main_chan; + + spinlock_t lock; + spinlock_t cc_lock; + const struct mt76_bus_ops *bus; + const struct mt76_driver_ops *drv; + void __iomem *regs; + struct device *dev; + + struct net_device napi_dev; + struct napi_struct napi[__MT_RXQ_MAX]; + struct sk_buff_head rx_skb[__MT_RXQ_MAX]; + + struct list_head txwi_cache; + struct mt76_queue q_tx[__MT_TXQ_MAX]; + struct mt76_queue q_rx[__MT_RXQ_MAX]; + const struct mt76_queue_ops *queue_ops; + + u8 macaddr[ETH_ALEN]; + u32 rev; + unsigned long state; + + struct mt76_sband sband_2g; + struct mt76_sband sband_5g; + struct debugfs_blob_wrapper eeprom; + struct debugfs_blob_wrapper otp; + struct mt76_hw_cap cap; + + u32 debugfs_reg; + + struct led_classdev led_cdev; + char led_name[32]; + bool led_al; + u8 led_pin; +}; + +enum mt76_phy_type { + MT_PHY_TYPE_CCK, + MT_PHY_TYPE_OFDM, + MT_PHY_TYPE_HT, + MT_PHY_TYPE_HT_GF, + MT_PHY_TYPE_VHT, +}; + +struct mt76_rate_power { + union { + struct { + s8 cck[4]; + s8 ofdm[8]; + s8 ht[16]; + s8 vht[10]; + }; + s8 all[38]; + }; +}; + +struct mt76_rx_status { + struct mt76_wcid *wcid; + + unsigned long reorder_time; + + u8 iv[6]; + + u8 aggr:1; + u8 tid; + u16 seqno; + + u16 freq; + u32 flag; + u8 enc_flags; + u8 encoding:2, bw:3; + u8 rate_idx; + u8 nss; + u8 band; + u8 signal; + u8 chains; + s8 chain_signal[IEEE80211_MAX_CHAINS]; +}; + +#define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__) +#define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__) +#define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__) +#define mt76_wr_copy(dev, ...) (dev)->mt76.bus->copy(&((dev)->mt76), __VA_ARGS__) + +#define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val) +#define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0) + +#define mt76_get_field(_dev, _reg, _field) \ + FIELD_GET(_field, mt76_rr(dev, _reg)) + +#define mt76_rmw_field(_dev, _reg, _field, _val) \ + mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) + +#define mt76_hw(dev) (dev)->mt76.hw + +bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, + int timeout); + +#define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__) + +bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, + int timeout); + +#define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__) + +void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs); + +static inline u16 mt76_chip(struct mt76_dev *dev) +{ + return dev->rev >> 16; +} + +static inline u16 mt76_rev(struct mt76_dev *dev) +{ + return dev->rev & 0xffff; +} + +#define mt76xx_chip(dev) mt76_chip(&((dev)->mt76)) +#define mt76xx_rev(dev) mt76_rev(&((dev)->mt76)) + +#define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76)) +#define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__) +#define mt76_queue_add_buf(dev, ...) (dev)->mt76.queue_ops->add_buf(&((dev)->mt76), __VA_ARGS__) +#define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__) +#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__) +#define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__) + +static inline struct mt76_channel_state * +mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c) +{ + struct mt76_sband *msband; + int idx; + + if (c->band == NL80211_BAND_2GHZ) + msband = &dev->sband_2g; + else + msband = &dev->sband_5g; + + idx = c - &msband->sband.channels[0]; + return &msband->chan[idx]; +} + +int mt76_register_device(struct mt76_dev *dev, bool vht, + struct ieee80211_rate *rates, int n_rates); +void mt76_unregister_device(struct mt76_dev *dev); + +struct dentry *mt76_register_debugfs(struct mt76_dev *dev); + +int mt76_eeprom_init(struct mt76_dev *dev, int len); +void mt76_eeprom_override(struct mt76_dev *dev); + +static inline struct ieee80211_txq * +mtxq_to_txq(struct mt76_txq *mtxq) +{ + void *ptr = mtxq; + + return container_of(ptr, struct ieee80211_txq, drv_priv); +} + +static inline struct ieee80211_sta * +wcid_to_sta(struct mt76_wcid *wcid) +{ + void *ptr = wcid; + + if (!wcid || !wcid->sta) + return NULL; + + return container_of(ptr, struct ieee80211_sta, drv_priv); +} + +int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, + struct sk_buff *skb, struct mt76_wcid *wcid, + struct ieee80211_sta *sta); + +void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb); +void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, + struct mt76_wcid *wcid, struct sk_buff *skb); +void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq); +void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq); +void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq); +void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, + bool send_bar); +void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq); +void mt76_txq_schedule_all(struct mt76_dev *dev); +void mt76_release_buffered_frames(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + u16 tids, int nframes, + enum ieee80211_frame_release_type reason, + bool more_data); +void mt76_set_channel(struct mt76_dev *dev); +int mt76_get_survey(struct ieee80211_hw *hw, int idx, + struct survey_info *survey); + +int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid, + u16 ssn, u8 size); +void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid); + +void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, + struct ieee80211_key_conf *key); + +/* internal */ +void mt76_tx_free(struct mt76_dev *dev); +void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); +void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, + int queue); +void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q); +void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames); + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h new file mode 100644 index 000000000000..17df17afd9bf --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h @@ -0,0 +1,228 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __MT76x2_H +#define __MT76x2_H + +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/spinlock.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/irq.h> +#include <linux/interrupt.h> +#include <linux/mutex.h> +#include <linux/bitops.h> +#include <linux/kfifo.h> + +#define MT7662_FIRMWARE "mt7662.bin" +#define MT7662_ROM_PATCH "mt7662_rom_patch.bin" +#define MT7662_EEPROM_SIZE 512 + +#define MT76x2_RX_RING_SIZE 256 +#define MT_RX_HEADROOM 32 + +#define MT_MAX_CHAINS 2 + +#define MT_CALIBRATE_INTERVAL HZ + +#include "mt76.h" +#include "mt76x2_regs.h" +#include "mt76x2_mac.h" +#include "mt76x2_dfs.h" + +struct mt76x2_mcu { + struct mutex mutex; + + wait_queue_head_t wait; + struct sk_buff_head res_q; + + u32 msg_seq; +}; + +struct mt76x2_rx_freq_cal { + s8 high_gain[MT_MAX_CHAINS]; + s8 rssi_offset[MT_MAX_CHAINS]; + s8 lna_gain; + u32 mcu_gain; +}; + +struct mt76x2_calibration { + struct mt76x2_rx_freq_cal rx; + + u8 agc_gain_init[MT_MAX_CHAINS]; + u8 agc_gain_cur[MT_MAX_CHAINS]; + + int avg_rssi[MT_MAX_CHAINS]; + int avg_rssi_all; + + s8 agc_gain_adjust; + s8 low_gain; + + u8 temp; + + bool init_cal_done; + bool tssi_cal_done; + bool tssi_comp_pending; + bool dpd_cal_done; + bool channel_cal_done; +}; + +struct mt76x2_dev { + struct mt76_dev mt76; /* must be first */ + + struct mac_address macaddr_list[8]; + + struct mutex mutex; + + const u16 *beacon_offsets; + unsigned long wcid_mask[128 / BITS_PER_LONG]; + + int txpower_conf; + int txpower_cur; + + u8 txdone_seq; + DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x2_tx_status); + + struct mt76x2_mcu mcu; + struct sk_buff *rx_head; + + struct tasklet_struct tx_tasklet; + struct tasklet_struct pre_tbtt_tasklet; + struct delayed_work cal_work; + struct delayed_work mac_work; + + u32 aggr_stats[32]; + + struct mt76_wcid global_wcid; + struct mt76_wcid __rcu *wcid[128]; + + spinlock_t irq_lock; + u32 irqmask; + + struct sk_buff *beacons[8]; + u8 beacon_mask; + u8 beacon_data_mask; + + u32 rev; + u32 rxfilter; + + u16 chainmask; + + struct mt76x2_calibration cal; + + s8 target_power; + s8 target_power_delta[2]; + struct mt76_rate_power rate_power; + bool enable_tpc; + + u8 coverage_class; + u8 slottime; + + struct mt76x2_dfs_pattern_detector dfs_pd; +}; + +struct mt76x2_vif { + u8 idx; + + struct mt76_wcid group_wcid; +}; + +struct mt76x2_sta { + struct mt76_wcid wcid; /* must be first */ + + struct mt76x2_vif *vif; + struct mt76x2_tx_status status; + int n_frames; +}; + +static inline bool is_mt7612(struct mt76x2_dev *dev) +{ + return (dev->rev >> 16) == 0x7612; +} + +void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set); + +static inline void mt76x2_irq_enable(struct mt76x2_dev *dev, u32 mask) +{ + mt76x2_set_irq_mask(dev, 0, mask); +} + +static inline void mt76x2_irq_disable(struct mt76x2_dev *dev, u32 mask) +{ + mt76x2_set_irq_mask(dev, mask, 0); +} + +extern const struct ieee80211_ops mt76x2_ops; + +struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev); +int mt76x2_register_device(struct mt76x2_dev *dev); +void mt76x2_init_debugfs(struct mt76x2_dev *dev); + +irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance); +void mt76x2_phy_power_on(struct mt76x2_dev *dev); +int mt76x2_init_hardware(struct mt76x2_dev *dev); +void mt76x2_stop_hardware(struct mt76x2_dev *dev); +int mt76x2_eeprom_init(struct mt76x2_dev *dev); +int mt76x2_apply_calibration_data(struct mt76x2_dev *dev, int channel); +void mt76x2_set_tx_ackto(struct mt76x2_dev *dev); + +int mt76x2_phy_start(struct mt76x2_dev *dev); +int mt76x2_phy_set_channel(struct mt76x2_dev *dev, + struct cfg80211_chan_def *chandef); +int mt76x2_phy_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain); +void mt76x2_phy_calibrate(struct work_struct *work); +void mt76x2_phy_set_txpower(struct mt76x2_dev *dev); + +int mt76x2_mcu_init(struct mt76x2_dev *dev); +int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw, + u8 bw_index, bool scan); +int mt76x2_mcu_set_radio_state(struct mt76x2_dev *dev, bool on); +int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level, + u8 channel); +int mt76x2_mcu_cleanup(struct mt76x2_dev *dev); + +int mt76x2_dma_init(struct mt76x2_dev *dev); +void mt76x2_dma_cleanup(struct mt76x2_dev *dev); + +void mt76x2_cleanup(struct mt76x2_dev *dev); + +int mt76x2_tx_queue_mcu(struct mt76x2_dev *dev, enum mt76_txq_id qid, + struct sk_buff *skb, int cmd, int seq); +void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, + struct sk_buff *skb); +void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb); +int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi, + struct sk_buff *skb, struct mt76_queue *q, + struct mt76_wcid *wcid, struct ieee80211_sta *sta, + u32 *tx_info); +void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, + struct mt76_queue_entry *e, bool flush); + +void mt76x2_pre_tbtt_tasklet(unsigned long arg); + +void mt76x2_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q); +void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, + struct sk_buff *skb); + +void mt76x2_update_channel(struct mt76_dev *mdev); + +s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev, + const struct ieee80211_tx_rate *rate); +s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj); +void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr); + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_core.c b/drivers/net/wireless/mediatek/mt76/mt76x2_core.c new file mode 100644 index 000000000000..2629779e8d3e --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_core.c @@ -0,0 +1,88 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/delay.h> +#include "mt76x2.h" +#include "mt76x2_trace.h" + +void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->irq_lock, flags); + dev->irqmask &= ~clear; + dev->irqmask |= set; + mt76_wr(dev, MT_INT_MASK_CSR, dev->irqmask); + spin_unlock_irqrestore(&dev->irq_lock, flags); +} + +void mt76x2_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q) +{ + struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); + + mt76x2_irq_enable(dev, MT_INT_RX_DONE(q)); +} + +irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance) +{ + struct mt76x2_dev *dev = dev_instance; + u32 intr; + + intr = mt76_rr(dev, MT_INT_SOURCE_CSR); + mt76_wr(dev, MT_INT_SOURCE_CSR, intr); + + if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state)) + return IRQ_NONE; + + trace_dev_irq(dev, intr, dev->irqmask); + + intr &= dev->irqmask; + + if (intr & MT_INT_TX_DONE_ALL) { + mt76x2_irq_disable(dev, MT_INT_TX_DONE_ALL); + tasklet_schedule(&dev->tx_tasklet); + } + + if (intr & MT_INT_RX_DONE(0)) { + mt76x2_irq_disable(dev, MT_INT_RX_DONE(0)); + napi_schedule(&dev->mt76.napi[0]); + } + + if (intr & MT_INT_RX_DONE(1)) { + mt76x2_irq_disable(dev, MT_INT_RX_DONE(1)); + napi_schedule(&dev->mt76.napi[1]); + } + + if (intr & MT_INT_PRE_TBTT) + tasklet_schedule(&dev->pre_tbtt_tasklet); + + /* send buffered multicast frames now */ + if (intr & MT_INT_TBTT) + mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]); + + if (intr & MT_INT_TX_STAT) { + mt76x2_mac_poll_tx_status(dev, true); + tasklet_schedule(&dev->tx_tasklet); + } + + if (intr & MT_INT_GPTIMER) { + mt76x2_irq_disable(dev, MT_INT_GPTIMER); + tasklet_schedule(&dev->dfs_pd.dfs_tasklet); + } + + return IRQ_HANDLED; +} + diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c new file mode 100644 index 000000000000..612feb593d7d --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c @@ -0,0 +1,133 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/debugfs.h> +#include "mt76x2.h" + +static int +mt76x2_ampdu_stat_read(struct seq_file *file, void *data) +{ + struct mt76x2_dev *dev = file->private; + int i, j; + + for (i = 0; i < 4; i++) { + seq_puts(file, "Length: "); + for (j = 0; j < 8; j++) + seq_printf(file, "%8d | ", i * 8 + j + 1); + seq_puts(file, "\n"); + seq_puts(file, "Count: "); + for (j = 0; j < 8; j++) + seq_printf(file, "%8d | ", dev->aggr_stats[i * 8 + j]); + seq_puts(file, "\n"); + seq_puts(file, "--------"); + for (j = 0; j < 8; j++) + seq_puts(file, "-----------"); + seq_puts(file, "\n"); + } + + return 0; +} + +static int +mt76x2_ampdu_stat_open(struct inode *inode, struct file *f) +{ + return single_open(f, mt76x2_ampdu_stat_read, inode->i_private); +} + +static void +seq_puts_array(struct seq_file *file, const char *str, s8 *val, int len) +{ + int i; + + seq_printf(file, "%10s:", str); + for (i = 0; i < len; i++) + seq_printf(file, " %2d", val[i]); + seq_puts(file, "\n"); +} + +static int read_txpower(struct seq_file *file, void *data) +{ + struct mt76x2_dev *dev = dev_get_drvdata(file->private); + + seq_printf(file, "Target power: %d\n", dev->target_power); + + seq_puts_array(file, "Delta", dev->target_power_delta, + ARRAY_SIZE(dev->target_power_delta)); + seq_puts_array(file, "CCK", dev->rate_power.cck, + ARRAY_SIZE(dev->rate_power.cck)); + seq_puts_array(file, "OFDM", dev->rate_power.ofdm, + ARRAY_SIZE(dev->rate_power.ofdm)); + seq_puts_array(file, "HT", dev->rate_power.ht, + ARRAY_SIZE(dev->rate_power.ht)); + seq_puts_array(file, "VHT", dev->rate_power.vht, + ARRAY_SIZE(dev->rate_power.vht)); + return 0; +} + +static const struct file_operations fops_ampdu_stat = { + .open = mt76x2_ampdu_stat_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int +mt76x2_dfs_stat_read(struct seq_file *file, void *data) +{ + int i; + struct mt76x2_dev *dev = file->private; + struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + + for (i = 0; i < MT_DFS_NUM_ENGINES; i++) { + seq_printf(file, "engine: %d\n", i); + seq_printf(file, " hw pattern detected:\t%d\n", + dfs_pd->stats[i].hw_pattern); + seq_printf(file, " hw pulse discarded:\t%d\n", + dfs_pd->stats[i].hw_pulse_discarded); + } + + return 0; +} + +static int +mt76x2_dfs_stat_open(struct inode *inode, struct file *f) +{ + return single_open(f, mt76x2_dfs_stat_read, inode->i_private); +} + +static const struct file_operations fops_dfs_stat = { + .open = mt76x2_dfs_stat_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +void mt76x2_init_debugfs(struct mt76x2_dev *dev) +{ + struct dentry *dir; + + dir = mt76_register_debugfs(&dev->mt76); + if (!dir) + return; + + debugfs_create_u8("temperature", S_IRUSR, dir, &dev->cal.temp); + debugfs_create_bool("tpc", S_IRUSR | S_IWUSR, dir, &dev->enable_tpc); + + debugfs_create_file("ampdu_stat", S_IRUSR, dir, dev, &fops_ampdu_stat); + debugfs_create_file("dfs_stats", S_IRUSR, dir, dev, &fops_dfs_stat); + debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir, + read_txpower); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c new file mode 100644 index 000000000000..f936dc9a5476 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c @@ -0,0 +1,506 @@ +/* + * Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "mt76x2.h" + +#define RADAR_SPEC(m, len, el, eh, wl, wh, \ + w_tolerance, tl, th, t_tolerance, \ + bl, bh, event_exp, power_jmp) \ +{ \ + .mode = m, \ + .avg_len = len, \ + .e_low = el, \ + .e_high = eh, \ + .w_low = wl, \ + .w_high = wh, \ + .w_margin = w_tolerance, \ + .t_low = tl, \ + .t_high = th, \ + .t_margin = t_tolerance, \ + .b_low = bl, \ + .b_high = bh, \ + .event_expiration = event_exp, \ + .pwr_jmp = power_jmp \ +} + +static const struct mt76x2_radar_specs etsi_radar_specs[] = { + /* 20MHz */ + RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0, + 0x7fffffff, 0x155cc0, 0x19cc), + RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0, + 0x7fffffff, 0x155cc0, 0x19cc), + RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0, + 0x7fffffff, 0x155cc0, 0x19dd), + RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0, + 0x7fffffff, 0x2191c0, 0x15cc), + /* 40MHz */ + RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0, + 0x7fffffff, 0x155cc0, 0x19cc), + RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0, + 0x7fffffff, 0x155cc0, 0x19cc), + RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0, + 0x7fffffff, 0x155cc0, 0x19dd), + RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0, + 0x7fffffff, 0x2191c0, 0x15cc), + /* 80MHz */ + RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0, + 0x7fffffff, 0x155cc0, 0x19cc), + RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0, + 0x7fffffff, 0x155cc0, 0x19cc), + RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0, + 0x7fffffff, 0x155cc0, 0x19dd), + RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0, + 0x7fffffff, 0x2191c0, 0x15cc) +}; + +static const struct mt76x2_radar_specs fcc_radar_specs[] = { + /* 20MHz */ + RADAR_SPEC(0, 8, 2, 12, 106, 150, 5, 2900, 80100, 5, 0, + 0x7fffffff, 0xfe808, 0x13dc), + RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0, + 0x7fffffff, 0xfe808, 0x19dd), + RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0, + 0x7fffffff, 0xfe808, 0x12cc), + RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0, + 0x3938700, 0x57bcf00, 0x1289), + /* 40MHz */ + RADAR_SPEC(0, 8, 2, 12, 106, 150, 5, 2900, 80100, 5, 0, + 0x7fffffff, 0xfe808, 0x13dc), + RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0, + 0x7fffffff, 0xfe808, 0x19dd), + RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0, + 0x7fffffff, 0xfe808, 0x12cc), + RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0, + 0x3938700, 0x57bcf00, 0x1289), + /* 80MHz */ + RADAR_SPEC(0, 8, 2, 14, 106, 150, 15, 2900, 80100, 15, 0, + 0x7fffffff, 0xfe808, 0x16cc), + RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0, + 0x7fffffff, 0xfe808, 0x19dd), + RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0, + 0x7fffffff, 0xfe808, 0x12cc), + RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0, + 0x3938700, 0x57bcf00, 0x1289) +}; + +static const struct mt76x2_radar_specs jp_w56_radar_specs[] = { + /* 20MHz */ + RADAR_SPEC(0, 8, 2, 7, 106, 150, 5, 2900, 80100, 5, 0, + 0x7fffffff, 0x14c080, 0x13dc), + RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0, + 0x7fffffff, 0x14c080, 0x19dd), + RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0, + 0x7fffffff, 0x14c080, 0x12cc), + RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0, + 0x3938700, 0X57bcf00, 0x1289), + /* 40MHz */ + RADAR_SPEC(0, 8, 2, 7, 106, 150, 5, 2900, 80100, 5, 0, + 0x7fffffff, 0x14c080, 0x13dc), + RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0, + 0x7fffffff, 0x14c080, 0x19dd), + RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0, + 0x7fffffff, 0x14c080, 0x12cc), + RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0, + 0x3938700, 0X57bcf00, 0x1289), + /* 80MHz */ + RADAR_SPEC(0, 8, 2, 9, 106, 150, 15, 2900, 80100, 15, 0, + 0x7fffffff, 0x14c080, 0x16cc), + RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0, + 0x7fffffff, 0x14c080, 0x19dd), + RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0, + 0x7fffffff, 0x14c080, 0x12cc), + RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0, + 0x3938700, 0X57bcf00, 0x1289) +}; + +static const struct mt76x2_radar_specs jp_w53_radar_specs[] = { + /* 20MHz */ + RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0, + 0x7fffffff, 0x14c080, 0x16cc), + { 0 }, + RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0, + 0x7fffffff, 0x14c080, 0x16cc), + { 0 }, + /* 40MHz */ + RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0, + 0x7fffffff, 0x14c080, 0x16cc), + { 0 }, + RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0, + 0x7fffffff, 0x14c080, 0x16cc), + { 0 }, + /* 80MHz */ + RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0, + 0x7fffffff, 0x14c080, 0x16cc), + { 0 }, + RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0, + 0x7fffffff, 0x14c080, 0x16cc), + { 0 } +}; + +static void mt76x2_dfs_set_capture_mode_ctrl(struct mt76x2_dev *dev, + u8 enable) +{ + u32 data; + + data = (1 << 1) | enable; + mt76_wr(dev, MT_BBP(DFS, 36), data); +} + +static bool mt76x2_dfs_check_chirp(struct mt76x2_dev *dev) +{ + bool ret = false; + u32 current_ts, delta_ts; + struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + + current_ts = mt76_rr(dev, MT_PBF_LIFE_TIMER); + delta_ts = current_ts - dfs_pd->chirp_pulse_ts; + dfs_pd->chirp_pulse_ts = current_ts; + + /* 12 sec */ + if (delta_ts <= (12 * (1 << 20))) { + if (++dfs_pd->chirp_pulse_cnt > 8) + ret = true; + } else { + dfs_pd->chirp_pulse_cnt = 1; + } + + return ret; +} + +static void mt76x2_dfs_get_hw_pulse(struct mt76x2_dev *dev, + struct mt76x2_dfs_hw_pulse *pulse) +{ + u32 data; + + /* select channel */ + data = (MT_DFS_CH_EN << 16) | pulse->engine; + mt76_wr(dev, MT_BBP(DFS, 0), data); + + /* reported period */ + pulse->period = mt76_rr(dev, MT_BBP(DFS, 19)); + + /* reported width */ + pulse->w1 = mt76_rr(dev, MT_BBP(DFS, 20)); + pulse->w2 = mt76_rr(dev, MT_BBP(DFS, 23)); + + /* reported burst number */ + pulse->burst = mt76_rr(dev, MT_BBP(DFS, 22)); +} + +static bool mt76x2_dfs_check_hw_pulse(struct mt76x2_dev *dev, + struct mt76x2_dfs_hw_pulse *pulse) +{ + bool ret = false; + + if (!pulse->period || !pulse->w1) + return false; + + switch (dev->dfs_pd.region) { + case NL80211_DFS_FCC: + if (pulse->engine > 3) + break; + + if (pulse->engine == 3) { + ret = mt76x2_dfs_check_chirp(dev); + break; + } + + /* check short pulse*/ + if (pulse->w1 < 120) + ret = (pulse->period >= 2900 && + (pulse->period <= 4700 || + pulse->period >= 6400) && + (pulse->period <= 6800 || + pulse->period >= 10200) && + pulse->period <= 61600); + else if (pulse->w1 < 130) /* 120 - 130 */ + ret = (pulse->period >= 2900 && + pulse->period <= 61600); + else + ret = (pulse->period >= 3500 && + pulse->period <= 10100); + break; + case NL80211_DFS_ETSI: + if (pulse->engine >= 3) + break; + + ret = (pulse->period >= 4900 && + (pulse->period <= 10200 || + pulse->period >= 12400) && + pulse->period <= 100100); + break; + case NL80211_DFS_JP: + if (dev->mt76.chandef.chan->center_freq >= 5250 && + dev->mt76.chandef.chan->center_freq <= 5350) { + /* JPW53 */ + if (pulse->w1 <= 130) + ret = (pulse->period >= 28360 && + (pulse->period <= 28700 || + pulse->period >= 76900) && + pulse->period <= 76940); + break; + } + + if (pulse->engine > 3) + break; + + if (pulse->engine == 3) { + ret = mt76x2_dfs_check_chirp(dev); + break; + } + + /* check short pulse*/ + if (pulse->w1 < 120) + ret = (pulse->period >= 2900 && + (pulse->period <= 4700 || + pulse->period >= 6400) && + (pulse->period <= 6800 || + pulse->period >= 27560) && + (pulse->period <= 27960 || + pulse->period >= 28360) && + (pulse->period <= 28700 || + pulse->period >= 79900) && + pulse->period <= 80100); + else if (pulse->w1 < 130) /* 120 - 130 */ + ret = (pulse->period >= 2900 && + (pulse->period <= 10100 || + pulse->period >= 27560) && + (pulse->period <= 27960 || + pulse->period >= 28360) && + (pulse->period <= 28700 || + pulse->period >= 79900) && + pulse->period <= 80100); + else + ret = (pulse->period >= 3900 && + pulse->period <= 10100); + break; + case NL80211_DFS_UNSET: + default: + return false; + } + + return ret; +} + +static void mt76x2_dfs_tasklet(unsigned long arg) +{ + struct mt76x2_dev *dev = (struct mt76x2_dev *)arg; + struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + u32 engine_mask; + int i; + + if (test_bit(MT76_SCANNING, &dev->mt76.state)) + goto out; + + engine_mask = mt76_rr(dev, MT_BBP(DFS, 1)); + if (!(engine_mask & 0xf)) + goto out; + + for (i = 0; i < MT_DFS_NUM_ENGINES; i++) { + struct mt76x2_dfs_hw_pulse pulse; + + if (!(engine_mask & (1 << i))) + continue; + + pulse.engine = i; + mt76x2_dfs_get_hw_pulse(dev, &pulse); + + if (!mt76x2_dfs_check_hw_pulse(dev, &pulse)) { + dfs_pd->stats[i].hw_pulse_discarded++; + continue; + } + + /* hw detector rx radar pattern */ + dfs_pd->stats[i].hw_pattern++; + ieee80211_radar_detected(dev->mt76.hw); + + /* reset hw detector */ + mt76_wr(dev, MT_BBP(DFS, 1), 0xf); + + return; + } + + /* reset hw detector */ + mt76_wr(dev, MT_BBP(DFS, 1), 0xf); + +out: + mt76x2_irq_enable(dev, MT_INT_GPTIMER); +} + +static void mt76x2_dfs_set_bbp_params(struct mt76x2_dev *dev) +{ + u32 data; + u8 i, shift; + const struct mt76x2_radar_specs *radar_specs; + + switch (dev->mt76.chandef.width) { + case NL80211_CHAN_WIDTH_40: + shift = MT_DFS_NUM_ENGINES; + break; + case NL80211_CHAN_WIDTH_80: + shift = 2 * MT_DFS_NUM_ENGINES; + break; + default: + shift = 0; + break; + } + + switch (dev->dfs_pd.region) { + case NL80211_DFS_FCC: + radar_specs = &fcc_radar_specs[shift]; + break; + case NL80211_DFS_ETSI: + radar_specs = &etsi_radar_specs[shift]; + break; + case NL80211_DFS_JP: + if (dev->mt76.chandef.chan->center_freq >= 5250 && + dev->mt76.chandef.chan->center_freq <= 5350) + radar_specs = &jp_w53_radar_specs[shift]; + else + radar_specs = &jp_w56_radar_specs[shift]; + break; + case NL80211_DFS_UNSET: + default: + return; + } + + data = (MT_DFS_VGA_MASK << 16) | + (MT_DFS_PWR_GAIN_OFFSET << 12) | + (MT_DFS_PWR_DOWN_TIME << 8) | + (MT_DFS_SYM_ROUND << 4) | + (MT_DFS_DELTA_DELAY & 0xf); + mt76_wr(dev, MT_BBP(DFS, 2), data); + + data = (MT_DFS_RX_PE_MASK << 16) | MT_DFS_PKT_END_MASK; + mt76_wr(dev, MT_BBP(DFS, 3), data); + + for (i = 0; i < MT_DFS_NUM_ENGINES; i++) { + /* configure engine */ + mt76_wr(dev, MT_BBP(DFS, 0), i); + + /* detection mode + avg_len */ + data = ((radar_specs[i].avg_len & 0x1ff) << 16) | + (radar_specs[i].mode & 0xf); + mt76_wr(dev, MT_BBP(DFS, 4), data); + + /* dfs energy */ + data = ((radar_specs[i].e_high & 0x0fff) << 16) | + (radar_specs[i].e_low & 0x0fff); + mt76_wr(dev, MT_BBP(DFS, 5), data); + + /* dfs period */ + mt76_wr(dev, MT_BBP(DFS, 7), radar_specs[i].t_low); + mt76_wr(dev, MT_BBP(DFS, 9), radar_specs[i].t_high); + + /* dfs burst */ + mt76_wr(dev, MT_BBP(DFS, 11), radar_specs[i].b_low); + mt76_wr(dev, MT_BBP(DFS, 13), radar_specs[i].b_high); + + /* dfs width */ + data = ((radar_specs[i].w_high & 0x0fff) << 16) | + (radar_specs[i].w_low & 0x0fff); + mt76_wr(dev, MT_BBP(DFS, 14), data); + + /* dfs margins */ + data = (radar_specs[i].w_margin << 16) | + radar_specs[i].t_margin; + mt76_wr(dev, MT_BBP(DFS, 15), data); + + /* dfs event expiration */ + mt76_wr(dev, MT_BBP(DFS, 17), radar_specs[i].event_expiration); + + /* dfs pwr adj */ + mt76_wr(dev, MT_BBP(DFS, 30), radar_specs[i].pwr_jmp); + } + + /* reset status */ + mt76_wr(dev, MT_BBP(DFS, 1), 0xf); + mt76_wr(dev, MT_BBP(DFS, 36), 0x3); + + /* enable detection*/ + mt76_wr(dev, MT_BBP(DFS, 0), MT_DFS_CH_EN << 16); + mt76_wr(dev, 0x212c, 0x0c350001); +} + +void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev) +{ + u32 agc_r8, agc_r4, val_r8, val_r4, dfs_r31; + + agc_r8 = mt76_rr(dev, MT_BBP(AGC, 8)); + agc_r4 = mt76_rr(dev, MT_BBP(AGC, 4)); + + val_r8 = (agc_r8 & 0x00007e00) >> 9; + val_r4 = agc_r4 & ~0x1f000000; + val_r4 += (((val_r8 + 1) >> 1) << 24); + mt76_wr(dev, MT_BBP(AGC, 4), val_r4); + + dfs_r31 = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, val_r4); + dfs_r31 += val_r8; + dfs_r31 -= (agc_r8 & 0x00000038) >> 3; + dfs_r31 = (dfs_r31 << 16) | 0x00000307; + mt76_wr(dev, MT_BBP(DFS, 31), dfs_r31); + + mt76_wr(dev, MT_BBP(DFS, 32), 0x00040071); +} + +void mt76x2_dfs_init_params(struct mt76x2_dev *dev) +{ + struct cfg80211_chan_def *chandef = &dev->mt76.chandef; + + if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) && + dev->dfs_pd.region != NL80211_DFS_UNSET) { + mt76x2_dfs_set_bbp_params(dev); + /* enable debug mode */ + mt76x2_dfs_set_capture_mode_ctrl(dev, true); + + mt76x2_irq_enable(dev, MT_INT_GPTIMER); + mt76_rmw_field(dev, MT_INT_TIMER_EN, + MT_INT_TIMER_EN_GP_TIMER_EN, 1); + } else { + /* disable hw detector */ + mt76_wr(dev, MT_BBP(DFS, 0), 0); + /* clear detector status */ + mt76_wr(dev, MT_BBP(DFS, 1), 0xf); + mt76_wr(dev, 0x212c, 0); + + mt76x2_irq_disable(dev, MT_INT_GPTIMER); + mt76_rmw_field(dev, MT_INT_TIMER_EN, + MT_INT_TIMER_EN_GP_TIMER_EN, 0); + } +} + +void mt76x2_dfs_init_detector(struct mt76x2_dev *dev) +{ + struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + + dfs_pd->region = NL80211_DFS_UNSET; + tasklet_init(&dfs_pd->dfs_tasklet, mt76x2_dfs_tasklet, + (unsigned long)dev); +} + +void mt76x2_dfs_set_domain(struct mt76x2_dev *dev, + enum nl80211_dfs_regions region) +{ + struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + + if (dfs_pd->region != region) { + tasklet_disable(&dfs_pd->dfs_tasklet); + dfs_pd->region = region; + mt76x2_dfs_init_params(dev); + tasklet_enable(&dfs_pd->dfs_tasklet); + } +} + diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h new file mode 100644 index 000000000000..8dbc783cc6bc --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h @@ -0,0 +1,82 @@ +/* + * Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __MT76x2_DFS_H +#define __MT76x2_DFS_H + +#include <linux/types.h> +#include <linux/nl80211.h> + +#define MT_DFS_GP_INTERVAL (10 << 4) /* 64 us unit */ +#define MT_DFS_NUM_ENGINES 4 + +/* bbp params */ +#define MT_DFS_SYM_ROUND 0 +#define MT_DFS_DELTA_DELAY 2 +#define MT_DFS_VGA_MASK 0 +#define MT_DFS_PWR_GAIN_OFFSET 3 +#define MT_DFS_PWR_DOWN_TIME 0xf +#define MT_DFS_RX_PE_MASK 0xff +#define MT_DFS_PKT_END_MASK 0 +#define MT_DFS_CH_EN 0xf + +struct mt76x2_radar_specs { + u8 mode; + u16 avg_len; + u16 e_low; + u16 e_high; + u16 w_low; + u16 w_high; + u16 w_margin; + u32 t_low; + u32 t_high; + u16 t_margin; + u32 b_low; + u32 b_high; + u32 event_expiration; + u16 pwr_jmp; +}; + +struct mt76x2_dfs_hw_pulse { + u8 engine; + u32 period; + u32 w1; + u32 w2; + u32 burst; +}; + +struct mt76x2_dfs_engine_stats { + u32 hw_pattern; + u32 hw_pulse_discarded; +}; + +struct mt76x2_dfs_pattern_detector { + enum nl80211_dfs_regions region; + + u8 chirp_pulse_cnt; + u32 chirp_pulse_ts; + + struct mt76x2_dfs_engine_stats stats[MT_DFS_NUM_ENGINES]; + struct tasklet_struct dfs_tasklet; +}; + +void mt76x2_dfs_init_params(struct mt76x2_dev *dev); +void mt76x2_dfs_init_detector(struct mt76x2_dev *dev); +void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev); +void mt76x2_dfs_set_domain(struct mt76x2_dev *dev, + enum nl80211_dfs_regions region); + +#endif /* __MT76x2_DFS_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c new file mode 100644 index 000000000000..fd1ec4743e0b --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c @@ -0,0 +1,184 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "mt76x2.h" +#include "mt76x2_dma.h" + +int +mt76x2_tx_queue_mcu(struct mt76x2_dev *dev, enum mt76_txq_id qid, + struct sk_buff *skb, int cmd, int seq) +{ + struct mt76_queue *q = &dev->mt76.q_tx[qid]; + struct mt76_queue_buf buf; + dma_addr_t addr; + u32 tx_info; + + tx_info = MT_MCU_MSG_TYPE_CMD | + FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) | + FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) | + FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) | + FIELD_PREP(MT_MCU_MSG_LEN, skb->len); + + addr = dma_map_single(dev->mt76.dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(dev->mt76.dev, addr)) + return -ENOMEM; + + buf.addr = addr; + buf.len = skb->len; + spin_lock_bh(&q->lock); + mt76_queue_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); + mt76_queue_kick(dev, q); + spin_unlock_bh(&q->lock); + + return 0; +} + +static int +mt76x2_init_tx_queue(struct mt76x2_dev *dev, struct mt76_queue *q, + int idx, int n_desc) +{ + int ret; + + q->regs = dev->mt76.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE; + q->ndesc = n_desc; + q->hw_idx = idx; + + ret = mt76_queue_alloc(dev, q); + if (ret) + return ret; + + mt76x2_irq_enable(dev, MT_INT_TX_DONE(idx)); + + return 0; +} + +void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, + struct sk_buff *skb) +{ + struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); + void *rxwi = skb->data; + + if (q == MT_RXQ_MCU) { + skb_queue_tail(&dev->mcu.res_q, skb); + wake_up(&dev->mcu.wait); + return; + } + + skb_pull(skb, sizeof(struct mt76x2_rxwi)); + if (mt76x2_mac_process_rx(dev, skb, rxwi)) { + dev_kfree_skb(skb); + return; + } + + mt76_rx(&dev->mt76, q, skb); +} + +static int +mt76x2_init_rx_queue(struct mt76x2_dev *dev, struct mt76_queue *q, + int idx, int n_desc, int bufsize) +{ + int ret; + + q->regs = dev->mt76.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE; + q->ndesc = n_desc; + q->buf_size = bufsize; + + ret = mt76_queue_alloc(dev, q); + if (ret) + return ret; + + mt76x2_irq_enable(dev, MT_INT_RX_DONE(idx)); + + return 0; +} + +static void +mt76x2_tx_tasklet(unsigned long data) +{ + struct mt76x2_dev *dev = (struct mt76x2_dev *) data; + int i; + + mt76x2_mac_process_tx_status_fifo(dev); + + for (i = MT_TXQ_MCU; i >= 0; i--) + mt76_queue_tx_cleanup(dev, i, false); + + mt76x2_mac_poll_tx_status(dev, false); + mt76x2_irq_enable(dev, MT_INT_TX_DONE_ALL); +} + +int mt76x2_dma_init(struct mt76x2_dev *dev) +{ + static const u8 wmm_queue_map[] = { + [IEEE80211_AC_BE] = 0, + [IEEE80211_AC_BK] = 1, + [IEEE80211_AC_VI] = 2, + [IEEE80211_AC_VO] = 3, + }; + int ret; + int i; + struct mt76_txwi_cache __maybe_unused *t; + struct mt76_queue *q; + + BUILD_BUG_ON(sizeof(t->txwi) < sizeof(struct mt76x2_txwi)); + BUILD_BUG_ON(sizeof(struct mt76x2_rxwi) > MT_RX_HEADROOM); + + mt76_dma_attach(&dev->mt76); + + init_waitqueue_head(&dev->mcu.wait); + skb_queue_head_init(&dev->mcu.res_q); + + tasklet_init(&dev->tx_tasklet, mt76x2_tx_tasklet, (unsigned long) dev); + + mt76_wr(dev, MT_WPDMA_RST_IDX, ~0); + + for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) { + ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[i], + wmm_queue_map[i], MT_TX_RING_SIZE); + if (ret) + return ret; + } + + ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD], + MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE); + if (ret) + return ret; + + ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU], + MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE); + if (ret) + return ret; + + ret = mt76x2_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1, + MT_MCU_RING_SIZE, MT_RX_BUF_SIZE); + if (ret) + return ret; + + q = &dev->mt76.q_rx[MT_RXQ_MAIN]; + q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x2_rxwi); + ret = mt76x2_init_rx_queue(dev, q, 0, MT76x2_RX_RING_SIZE, MT_RX_BUF_SIZE); + if (ret) + return ret; + + return mt76_init_queues(dev); +} + +void mt76x2_dma_cleanup(struct mt76x2_dev *dev) +{ + tasklet_kill(&dev->tx_tasklet); + mt76_dma_cleanup(&dev->mt76); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h new file mode 100644 index 000000000000..47f79d83fcb4 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __MT76x2_DMA_H +#define __MT76x2_DMA_H + +#include "dma.h" + +#define MT_TXD_INFO_LEN GENMASK(13, 0) +#define MT_TXD_INFO_NEXT_VLD BIT(16) +#define MT_TXD_INFO_TX_BURST BIT(17) +#define MT_TXD_INFO_80211 BIT(19) +#define MT_TXD_INFO_TSO BIT(20) +#define MT_TXD_INFO_CSO BIT(21) +#define MT_TXD_INFO_WIV BIT(24) +#define MT_TXD_INFO_QSEL GENMASK(26, 25) +#define MT_TXD_INFO_TCO BIT(29) +#define MT_TXD_INFO_UCO BIT(30) +#define MT_TXD_INFO_ICO BIT(31) + +#define MT_RX_FCE_INFO_LEN GENMASK(13, 0) +#define MT_RX_FCE_INFO_SELF_GEN BIT(15) +#define MT_RX_FCE_INFO_CMD_SEQ GENMASK(19, 16) +#define MT_RX_FCE_INFO_EVT_TYPE GENMASK(23, 20) +#define MT_RX_FCE_INFO_PCIE_INTR BIT(24) +#define MT_RX_FCE_INFO_QSEL GENMASK(26, 25) +#define MT_RX_FCE_INFO_D_PORT GENMASK(29, 27) +#define MT_RX_FCE_INFO_TYPE GENMASK(31, 30) + +/* MCU request message header */ +#define MT_MCU_MSG_LEN GENMASK(15, 0) +#define MT_MCU_MSG_CMD_SEQ GENMASK(19, 16) +#define MT_MCU_MSG_CMD_TYPE GENMASK(26, 20) +#define MT_MCU_MSG_PORT GENMASK(29, 27) +#define MT_MCU_MSG_TYPE GENMASK(31, 30) +#define MT_MCU_MSG_TYPE_CMD BIT(30) + +enum mt76x2_qsel { + MT_QSEL_MGMT, + MT_QSEL_HCCA, + MT_QSEL_EDCA, + MT_QSEL_EDCA_2, +}; + +enum dma_msg_port { + WLAN_PORT, + CPU_RX_PORT, + CPU_TX_PORT, + HOST_PORT, + VIRTUAL_CPU_RX_PORT, + VIRTUAL_CPU_TX_PORT, + DISCARD, +}; + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c new file mode 100644 index 000000000000..9c9bf3e785ba --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c @@ -0,0 +1,664 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <asm/unaligned.h> +#include "mt76x2.h" +#include "mt76x2_eeprom.h" + +#define EE_FIELD(_name, _value) [MT_EE_##_name] = (_value) | 1 + +static int +mt76x2_eeprom_copy(struct mt76x2_dev *dev, enum mt76x2_eeprom_field field, + void *dest, int len) +{ + if (field + len > dev->mt76.eeprom.size) + return -1; + + memcpy(dest, dev->mt76.eeprom.data + field, len); + return 0; +} + +static int +mt76x2_eeprom_get_macaddr(struct mt76x2_dev *dev) +{ + void *src = dev->mt76.eeprom.data + MT_EE_MAC_ADDR; + + memcpy(dev->mt76.macaddr, src, ETH_ALEN); + return 0; +} + +static void +mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev) +{ + u16 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0); + + switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, val)) { + case BOARD_TYPE_5GHZ: + dev->mt76.cap.has_5ghz = true; + break; + case BOARD_TYPE_2GHZ: + dev->mt76.cap.has_2ghz = true; + break; + default: + dev->mt76.cap.has_2ghz = true; + dev->mt76.cap.has_5ghz = true; + break; + } +} + +static int +mt76x2_efuse_read(struct mt76x2_dev *dev, u16 addr, u8 *data) +{ + u32 val; + int i; + + val = mt76_rr(dev, MT_EFUSE_CTRL); + val &= ~(MT_EFUSE_CTRL_AIN | + MT_EFUSE_CTRL_MODE); + val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf); + val |= MT_EFUSE_CTRL_KICK; + mt76_wr(dev, MT_EFUSE_CTRL, val); + + if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000)) + return -ETIMEDOUT; + + udelay(2); + + val = mt76_rr(dev, MT_EFUSE_CTRL); + if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) { + memset(data, 0xff, 16); + return 0; + } + + for (i = 0; i < 4; i++) { + val = mt76_rr(dev, MT_EFUSE_DATA(i)); + put_unaligned_le32(val, data + 4 * i); + } + + return 0; +} + +static int +mt76x2_get_efuse_data(struct mt76x2_dev *dev, void *buf, int len) +{ + int ret, i; + + for (i = 0; i + 16 <= len; i += 16) { + ret = mt76x2_efuse_read(dev, i, buf + i); + if (ret) + return ret; + } + + return 0; +} + +static bool +mt76x2_has_cal_free_data(struct mt76x2_dev *dev, u8 *efuse) +{ + u16 *efuse_w = (u16 *) efuse; + + if (efuse_w[MT_EE_NIC_CONF_0] != 0) + return false; + + if (efuse_w[MT_EE_XTAL_TRIM_1] == 0xffff) + return false; + + if (efuse_w[MT_EE_TX_POWER_DELTA_BW40] != 0) + return false; + + if (efuse_w[MT_EE_TX_POWER_0_START_2G] == 0xffff) + return false; + + if (efuse_w[MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA] != 0) + return false; + + if (efuse_w[MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE] == 0xffff) + return false; + + return true; +} + +static void +mt76x2_apply_cal_free_data(struct mt76x2_dev *dev, u8 *efuse) +{ +#define GROUP_5G(_id) \ + MT_EE_TX_POWER_0_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id), \ + MT_EE_TX_POWER_0_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id) + 1, \ + MT_EE_TX_POWER_1_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id), \ + MT_EE_TX_POWER_1_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id) + 1 + + static const u8 cal_free_bytes[] = { + MT_EE_XTAL_TRIM_1, + MT_EE_TX_POWER_EXT_PA_5G + 1, + MT_EE_TX_POWER_0_START_2G, + MT_EE_TX_POWER_0_START_2G + 1, + MT_EE_TX_POWER_1_START_2G, + MT_EE_TX_POWER_1_START_2G + 1, + GROUP_5G(0), + GROUP_5G(1), + GROUP_5G(2), + GROUP_5G(3), + GROUP_5G(4), + GROUP_5G(5), + MT_EE_RF_2G_TSSI_OFF_TXPOWER, + MT_EE_RF_2G_RX_HIGH_GAIN + 1, + MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN, + MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN + 1, + MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN, + MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN + 1, + MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN, + MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN + 1, + }; + u8 *eeprom = dev->mt76.eeprom.data; + u8 prev_grp0[4] = { + eeprom[MT_EE_TX_POWER_0_START_5G], + eeprom[MT_EE_TX_POWER_0_START_5G + 1], + eeprom[MT_EE_TX_POWER_1_START_5G], + eeprom[MT_EE_TX_POWER_1_START_5G + 1] + }; + u16 val; + int i; + + if (!mt76x2_has_cal_free_data(dev, efuse)) + return; + + for (i = 0; i < ARRAY_SIZE(cal_free_bytes); i++) { + int offset = cal_free_bytes[i]; + + eeprom[offset] = efuse[offset]; + } + + if (!(efuse[MT_EE_TX_POWER_0_START_5G] | + efuse[MT_EE_TX_POWER_0_START_5G + 1])) + memcpy(eeprom + MT_EE_TX_POWER_0_START_5G, prev_grp0, 2); + if (!(efuse[MT_EE_TX_POWER_1_START_5G] | + efuse[MT_EE_TX_POWER_1_START_5G + 1])) + memcpy(eeprom + MT_EE_TX_POWER_1_START_5G, prev_grp0 + 2, 2); + + val = get_unaligned_le16(efuse + MT_EE_BT_RCAL_RESULT); + if (val != 0xffff) + eeprom[MT_EE_BT_RCAL_RESULT] = val & 0xff; + + val = get_unaligned_le16(efuse + MT_EE_BT_VCDL_CALIBRATION); + if (val != 0xffff) + eeprom[MT_EE_BT_VCDL_CALIBRATION + 1] = val >> 8; + + val = get_unaligned_le16(efuse + MT_EE_BT_PMUCFG); + if (val != 0xffff) + eeprom[MT_EE_BT_PMUCFG] = val & 0xff; +} + +static int mt76x2_check_eeprom(struct mt76x2_dev *dev) +{ + u16 val = get_unaligned_le16(dev->mt76.eeprom.data); + + if (!val) + val = get_unaligned_le16(dev->mt76.eeprom.data + MT_EE_PCI_ID); + + switch (val) { + case 0x7662: + case 0x7612: + return 0; + default: + dev_err(dev->mt76.dev, "EEPROM data check failed: %04x\n", val); + return -EINVAL; + } +} + +static int +mt76x2_eeprom_load(struct mt76x2_dev *dev) +{ + void *efuse; + int len = MT7662_EEPROM_SIZE; + bool found; + int ret; + + ret = mt76_eeprom_init(&dev->mt76, len); + if (ret < 0) + return ret; + + found = ret; + if (found) + found = !mt76x2_check_eeprom(dev); + + dev->mt76.otp.data = devm_kzalloc(dev->mt76.dev, len, GFP_KERNEL); + dev->mt76.otp.size = len; + if (!dev->mt76.otp.data) + return -ENOMEM; + + efuse = dev->mt76.otp.data; + + if (mt76x2_get_efuse_data(dev, efuse, len)) + goto out; + + if (found) { + mt76x2_apply_cal_free_data(dev, efuse); + } else { + /* FIXME: check if efuse data is complete */ + found = true; + memcpy(dev->mt76.eeprom.data, efuse, len); + } + +out: + if (!found) + return -ENOENT; + + return 0; +} + +static inline int +mt76x2_sign_extend(u32 val, unsigned int size) +{ + bool sign = val & BIT(size - 1); + + val &= BIT(size - 1) - 1; + + return sign ? val : -val; +} + +static inline int +mt76x2_sign_extend_optional(u32 val, unsigned int size) +{ + bool enable = val & BIT(size); + + return enable ? mt76x2_sign_extend(val, size) : 0; +} + +static bool +field_valid(u8 val) +{ + return val != 0 && val != 0xff; +} + +static void +mt76x2_set_rx_gain_group(struct mt76x2_dev *dev, u8 val) +{ + s8 *dest = dev->cal.rx.high_gain; + + if (!field_valid(val)) { + dest[0] = 0; + dest[1] = 0; + return; + } + + dest[0] = mt76x2_sign_extend(val, 4); + dest[1] = mt76x2_sign_extend(val >> 4, 4); +} + +static void +mt76x2_set_rssi_offset(struct mt76x2_dev *dev, int chain, u8 val) +{ + s8 *dest = dev->cal.rx.rssi_offset; + + if (!field_valid(val)) { + dest[chain] = 0; + return; + } + + dest[chain] = mt76x2_sign_extend_optional(val, 7); +} + +static enum mt76x2_cal_channel_group +mt76x2_get_cal_channel_group(int channel) +{ + if (channel >= 184 && channel <= 196) + return MT_CH_5G_JAPAN; + if (channel <= 48) + return MT_CH_5G_UNII_1; + if (channel <= 64) + return MT_CH_5G_UNII_2; + if (channel <= 114) + return MT_CH_5G_UNII_2E_1; + if (channel <= 144) + return MT_CH_5G_UNII_2E_2; + return MT_CH_5G_UNII_3; +} + +static u8 +mt76x2_get_5g_rx_gain(struct mt76x2_dev *dev, u8 channel) +{ + enum mt76x2_cal_channel_group group; + + group = mt76x2_get_cal_channel_group(channel); + switch (group) { + case MT_CH_5G_JAPAN: + return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN); + case MT_CH_5G_UNII_1: + return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN) >> 8; + case MT_CH_5G_UNII_2: + return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN); + case MT_CH_5G_UNII_2E_1: + return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN) >> 8; + case MT_CH_5G_UNII_2E_2: + return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN); + default: + return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN) >> 8; + } +} + +void mt76x2_read_rx_gain(struct mt76x2_dev *dev) +{ + struct ieee80211_channel *chan = dev->mt76.chandef.chan; + int channel = chan->hw_value; + s8 lna_5g[3], lna_2g; + u8 lna; + u16 val; + + if (chan->band == NL80211_BAND_2GHZ) + val = mt76x2_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN) >> 8; + else + val = mt76x2_get_5g_rx_gain(dev, channel); + + mt76x2_set_rx_gain_group(dev, val); + + if (chan->band == NL80211_BAND_2GHZ) { + val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_2G_0); + mt76x2_set_rssi_offset(dev, 0, val); + mt76x2_set_rssi_offset(dev, 1, val >> 8); + } else { + val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_5G_0); + mt76x2_set_rssi_offset(dev, 0, val); + mt76x2_set_rssi_offset(dev, 1, val >> 8); + } + + val = mt76x2_eeprom_get(dev, MT_EE_LNA_GAIN); + lna_2g = val & 0xff; + lna_5g[0] = val >> 8; + + val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_2G_1); + lna_5g[1] = val >> 8; + + val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_5G_1); + lna_5g[2] = val >> 8; + + if (!field_valid(lna_5g[1])) + lna_5g[1] = lna_5g[0]; + + if (!field_valid(lna_5g[2])) + lna_5g[2] = lna_5g[0]; + + dev->cal.rx.mcu_gain = (lna_2g & 0xff); + dev->cal.rx.mcu_gain |= (lna_5g[0] & 0xff) << 8; + dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16; + dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24; + + val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1); + if (val & MT_EE_NIC_CONF_1_LNA_EXT_2G) + lna_2g = 0; + if (val & MT_EE_NIC_CONF_1_LNA_EXT_5G) + memset(lna_5g, 0, sizeof(lna_5g)); + + if (chan->band == NL80211_BAND_2GHZ) + lna = lna_2g; + else if (channel <= 64) + lna = lna_5g[0]; + else if (channel <= 128) + lna = lna_5g[1]; + else + lna = lna_5g[2]; + + if (lna == 0xff) + lna = 0; + + dev->cal.rx.lna_gain = mt76x2_sign_extend(lna, 8); +} + +static s8 +mt76x2_rate_power_val(u8 val) +{ + if (!field_valid(val)) + return 0; + + return mt76x2_sign_extend_optional(val, 7); +} + +void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t, + struct ieee80211_channel *chan) +{ + bool is_5ghz; + u16 val; + + is_5ghz = chan->band == NL80211_BAND_5GHZ; + + memset(t, 0, sizeof(*t)); + + val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_CCK); + t->cck[0] = t->cck[1] = mt76x2_rate_power_val(val); + t->cck[2] = t->cck[3] = mt76x2_rate_power_val(val >> 8); + + if (is_5ghz) + val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_6M); + else + val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_6M); + t->ofdm[0] = t->ofdm[1] = mt76x2_rate_power_val(val); + t->ofdm[2] = t->ofdm[3] = mt76x2_rate_power_val(val >> 8); + + if (is_5ghz) + val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_24M); + else + val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_24M); + t->ofdm[4] = t->ofdm[5] = mt76x2_rate_power_val(val); + t->ofdm[6] = t->ofdm[7] = mt76x2_rate_power_val(val >> 8); + + val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS0); + t->ht[0] = t->ht[1] = mt76x2_rate_power_val(val); + t->ht[2] = t->ht[3] = mt76x2_rate_power_val(val >> 8); + + val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS4); + t->ht[4] = t->ht[5] = mt76x2_rate_power_val(val); + t->ht[6] = t->ht[7] = mt76x2_rate_power_val(val >> 8); + + val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS8); + t->ht[8] = t->ht[9] = mt76x2_rate_power_val(val); + t->ht[10] = t->ht[11] = mt76x2_rate_power_val(val >> 8); + + val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS12); + t->ht[12] = t->ht[13] = mt76x2_rate_power_val(val); + t->ht[14] = t->ht[15] = mt76x2_rate_power_val(val >> 8); + + val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS0); + t->vht[0] = t->vht[1] = mt76x2_rate_power_val(val); + t->vht[2] = t->vht[3] = mt76x2_rate_power_val(val >> 8); + + val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS4); + t->vht[4] = t->vht[5] = mt76x2_rate_power_val(val); + t->vht[6] = t->vht[7] = mt76x2_rate_power_val(val >> 8); + + val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS8); + if (!is_5ghz) + val >>= 8; + t->vht[8] = t->vht[9] = mt76x2_rate_power_val(val >> 8); +} + +int mt76x2_get_max_rate_power(struct mt76_rate_power *r) +{ + int i; + s8 ret = 0; + + for (i = 0; i < sizeof(r->all); i++) + ret = max(ret, r->all[i]); + + return ret; +} + +static void +mt76x2_get_power_info_2g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t, + struct ieee80211_channel *chan, int chain, int offset) +{ + int channel = chan->hw_value; + int delta_idx; + u8 data[6]; + u16 val; + + if (channel < 6) + delta_idx = 3; + else if (channel < 11) + delta_idx = 4; + else + delta_idx = 5; + + mt76x2_eeprom_copy(dev, offset, data, sizeof(data)); + + t->chain[chain].tssi_slope = data[0]; + t->chain[chain].tssi_offset = data[1]; + t->chain[chain].target_power = data[2]; + t->chain[chain].delta = mt76x2_sign_extend_optional(data[delta_idx], 7); + + val = mt76x2_eeprom_get(dev, MT_EE_RF_2G_TSSI_OFF_TXPOWER); + t->target_power = val >> 8; +} + +static void +mt76x2_get_power_info_5g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t, + struct ieee80211_channel *chan, int chain, int offset) +{ + int channel = chan->hw_value; + enum mt76x2_cal_channel_group group; + int delta_idx; + u16 val; + u8 data[5]; + + group = mt76x2_get_cal_channel_group(channel); + offset += group * MT_TX_POWER_GROUP_SIZE_5G; + + if (channel >= 192) + delta_idx = 4; + else if (channel >= 184) + delta_idx = 3; + else if (channel < 44) + delta_idx = 3; + else if (channel < 52) + delta_idx = 4; + else if (channel < 58) + delta_idx = 3; + else if (channel < 98) + delta_idx = 4; + else if (channel < 106) + delta_idx = 3; + else if (channel < 116) + delta_idx = 4; + else if (channel < 130) + delta_idx = 3; + else if (channel < 149) + delta_idx = 4; + else if (channel < 157) + delta_idx = 3; + else + delta_idx = 4; + + mt76x2_eeprom_copy(dev, offset, data, sizeof(data)); + + t->chain[chain].tssi_slope = data[0]; + t->chain[chain].tssi_offset = data[1]; + t->chain[chain].target_power = data[2]; + t->chain[chain].delta = mt76x2_sign_extend_optional(data[delta_idx], 7); + + val = mt76x2_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN); + t->target_power = val & 0xff; +} + +void mt76x2_get_power_info(struct mt76x2_dev *dev, + struct mt76x2_tx_power_info *t, + struct ieee80211_channel *chan) +{ + u16 bw40, bw80; + + memset(t, 0, sizeof(*t)); + + bw40 = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW40); + bw80 = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80); + + if (chan->band == NL80211_BAND_5GHZ) { + bw40 >>= 8; + mt76x2_get_power_info_5g(dev, t, chan, 0, + MT_EE_TX_POWER_0_START_5G); + mt76x2_get_power_info_5g(dev, t, chan, 1, + MT_EE_TX_POWER_1_START_5G); + } else { + mt76x2_get_power_info_2g(dev, t, chan, 0, + MT_EE_TX_POWER_0_START_2G); + mt76x2_get_power_info_2g(dev, t, chan, 1, + MT_EE_TX_POWER_1_START_2G); + } + + if (mt76x2_tssi_enabled(dev) || !field_valid(t->target_power)) + t->target_power = t->chain[0].target_power; + + t->delta_bw40 = mt76x2_rate_power_val(bw40); + t->delta_bw80 = mt76x2_rate_power_val(bw80); +} + +int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t) +{ + enum nl80211_band band = dev->mt76.chandef.chan->band; + u16 val, slope; + u8 bounds; + + memset(t, 0, sizeof(*t)); + + val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1); + if (!(val & MT_EE_NIC_CONF_1_TEMP_TX_ALC)) + return -EINVAL; + + if (!mt76x2_ext_pa_enabled(dev, band)) + return -EINVAL; + + val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G) >> 8; + if (!(val & BIT(7))) + return -EINVAL; + + t->temp_25_ref = val & 0x7f; + if (band == NL80211_BAND_5GHZ) { + slope = mt76x2_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_5G); + bounds = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G); + } else { + slope = mt76x2_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_2G); + bounds = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80) >> 8; + } + + t->high_slope = slope & 0xff; + t->low_slope = slope >> 8; + t->lower_bound = 0 - (bounds & 0xf); + t->upper_bound = (bounds >> 4) & 0xf; + + return 0; +} + +bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band) +{ + u16 conf0 = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0); + + if (band == NL80211_BAND_5GHZ) + return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_5G); + else + return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_2G); +} + +int mt76x2_eeprom_init(struct mt76x2_dev *dev) +{ + int ret; + + ret = mt76x2_eeprom_load(dev); + if (ret) + return ret; + + mt76x2_eeprom_parse_hw_cap(dev); + mt76x2_eeprom_get_macaddr(dev); + mt76_eeprom_override(&dev->mt76); + dev->mt76.macaddr[0] &= ~BIT(1); + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h new file mode 100644 index 000000000000..d79122728dca --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h @@ -0,0 +1,185 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __MT76x2_EEPROM_H +#define __MT76x2_EEPROM_H + +#include "mt76x2.h" + +enum mt76x2_eeprom_field { + MT_EE_CHIP_ID = 0x000, + MT_EE_VERSION = 0x002, + MT_EE_MAC_ADDR = 0x004, + MT_EE_PCI_ID = 0x00A, + MT_EE_NIC_CONF_0 = 0x034, + MT_EE_NIC_CONF_1 = 0x036, + MT_EE_NIC_CONF_2 = 0x042, + + MT_EE_XTAL_TRIM_1 = 0x03a, + MT_EE_XTAL_TRIM_2 = 0x09e, + + MT_EE_LNA_GAIN = 0x044, + MT_EE_RSSI_OFFSET_2G_0 = 0x046, + MT_EE_RSSI_OFFSET_2G_1 = 0x048, + MT_EE_RSSI_OFFSET_5G_0 = 0x04a, + MT_EE_RSSI_OFFSET_5G_1 = 0x04c, + + MT_EE_TX_POWER_DELTA_BW40 = 0x050, + MT_EE_TX_POWER_DELTA_BW80 = 0x052, + + MT_EE_TX_POWER_EXT_PA_5G = 0x054, + + MT_EE_TX_POWER_0_START_2G = 0x056, + MT_EE_TX_POWER_1_START_2G = 0x05c, + + /* used as byte arrays */ +#define MT_TX_POWER_GROUP_SIZE_5G 5 +#define MT_TX_POWER_GROUPS_5G 6 + MT_EE_TX_POWER_0_START_5G = 0x062, + + MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA = 0x074, + MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE = 0x076, + + MT_EE_TX_POWER_1_START_5G = 0x080, + + MT_EE_TX_POWER_CCK = 0x0a0, + MT_EE_TX_POWER_OFDM_2G_6M = 0x0a2, + MT_EE_TX_POWER_OFDM_2G_24M = 0x0a4, + MT_EE_TX_POWER_OFDM_5G_6M = 0x0b2, + MT_EE_TX_POWER_OFDM_5G_24M = 0x0b4, + MT_EE_TX_POWER_HT_MCS0 = 0x0a6, + MT_EE_TX_POWER_HT_MCS4 = 0x0a8, + MT_EE_TX_POWER_HT_MCS8 = 0x0aa, + MT_EE_TX_POWER_HT_MCS12 = 0x0ac, + MT_EE_TX_POWER_VHT_MCS0 = 0x0ba, + MT_EE_TX_POWER_VHT_MCS4 = 0x0bc, + MT_EE_TX_POWER_VHT_MCS8 = 0x0be, + + MT_EE_RF_TEMP_COMP_SLOPE_5G = 0x0f2, + MT_EE_RF_TEMP_COMP_SLOPE_2G = 0x0f4, + + MT_EE_RF_2G_TSSI_OFF_TXPOWER = 0x0f6, + MT_EE_RF_2G_RX_HIGH_GAIN = 0x0f8, + MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN = 0x0fa, + MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN = 0x0fc, + MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN = 0x0fe, + + MT_EE_BT_RCAL_RESULT = 0x138, + MT_EE_BT_VCDL_CALIBRATION = 0x13c, + MT_EE_BT_PMUCFG = 0x13e, + + __MT_EE_MAX +}; + +#define MT_EE_NIC_CONF_0_PA_INT_2G BIT(8) +#define MT_EE_NIC_CONF_0_PA_INT_5G BIT(9) +#define MT_EE_NIC_CONF_0_BOARD_TYPE GENMASK(13, 12) + +#define MT_EE_NIC_CONF_1_TEMP_TX_ALC BIT(1) +#define MT_EE_NIC_CONF_1_LNA_EXT_2G BIT(2) +#define MT_EE_NIC_CONF_1_LNA_EXT_5G BIT(3) +#define MT_EE_NIC_CONF_1_TX_ALC_EN BIT(13) + +#define MT_EE_NIC_CONF_2_RX_STREAM GENMASK(3, 0) +#define MT_EE_NIC_CONF_2_TX_STREAM GENMASK(7, 4) +#define MT_EE_NIC_CONF_2_HW_ANTDIV BIT(8) +#define MT_EE_NIC_CONF_2_XTAL_OPTION GENMASK(10, 9) +#define MT_EE_NIC_CONF_2_TEMP_DISABLE BIT(11) +#define MT_EE_NIC_CONF_2_COEX_METHOD GENMASK(15, 13) + +enum mt76x2_board_type { + BOARD_TYPE_2GHZ = 1, + BOARD_TYPE_5GHZ = 2, +}; + +enum mt76x2_cal_channel_group { + MT_CH_5G_JAPAN, + MT_CH_5G_UNII_1, + MT_CH_5G_UNII_2, + MT_CH_5G_UNII_2E_1, + MT_CH_5G_UNII_2E_2, + MT_CH_5G_UNII_3, + __MT_CH_MAX +}; + +struct mt76x2_tx_power_info { + u8 target_power; + + s8 delta_bw40; + s8 delta_bw80; + + struct { + s8 tssi_slope; + s8 tssi_offset; + s8 target_power; + s8 delta; + } chain[MT_MAX_CHAINS]; +}; + +struct mt76x2_temp_comp { + u8 temp_25_ref; + int lower_bound; /* J */ + int upper_bound; /* J */ + unsigned int high_slope; /* J / dB */ + unsigned int low_slope; /* J / dB */ +}; + +static inline int +mt76x2_eeprom_get(struct mt76x2_dev *dev, enum mt76x2_eeprom_field field) +{ + if ((field & 1) || field >= __MT_EE_MAX) + return -1; + + return get_unaligned_le16(dev->mt76.eeprom.data + field); +} + +void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t, + struct ieee80211_channel *chan); +int mt76x2_get_max_rate_power(struct mt76_rate_power *r); +void mt76x2_get_power_info(struct mt76x2_dev *dev, + struct mt76x2_tx_power_info *t, + struct ieee80211_channel *chan); +int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t); +bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band); +void mt76x2_read_rx_gain(struct mt76x2_dev *dev); + +static inline bool +mt76x2_temp_tx_alc_enabled(struct mt76x2_dev *dev) +{ + return mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) & + MT_EE_NIC_CONF_1_TEMP_TX_ALC; +} + +static inline bool +mt76x2_tssi_enabled(struct mt76x2_dev *dev) +{ + return !mt76x2_temp_tx_alc_enabled(dev) && + (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) & + MT_EE_NIC_CONF_1_TX_ALC_EN); +} + +static inline bool +mt76x2_has_ext_lna(struct mt76x2_dev *dev) +{ + u32 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1); + + if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ) + return val & MT_EE_NIC_CONF_1_LNA_EXT_2G; + else + return val & MT_EE_NIC_CONF_1_LNA_EXT_5G; +} + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c new file mode 100644 index 000000000000..1b00ae4465a2 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c @@ -0,0 +1,875 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/delay.h> +#include "mt76x2.h" +#include "mt76x2_eeprom.h" +#include "mt76x2_mcu.h" + +struct mt76x2_reg_pair { + u32 reg; + u32 value; +}; + +static bool +mt76x2_wait_for_mac(struct mt76x2_dev *dev) +{ + int i; + + for (i = 0; i < 500; i++) { + switch (mt76_rr(dev, MT_MAC_CSR0)) { + case 0: + case ~0: + break; + default: + return true; + } + usleep_range(5000, 10000); + } + + return false; +} + +static bool +wait_for_wpdma(struct mt76x2_dev *dev) +{ + return mt76_poll(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_TX_DMA_BUSY | + MT_WPDMA_GLO_CFG_RX_DMA_BUSY, + 0, 1000); +} + +static void +mt76x2_mac_pbf_init(struct mt76x2_dev *dev) +{ + u32 val; + + val = MT_PBF_SYS_CTRL_MCU_RESET | + MT_PBF_SYS_CTRL_DMA_RESET | + MT_PBF_SYS_CTRL_MAC_RESET | + MT_PBF_SYS_CTRL_PBF_RESET | + MT_PBF_SYS_CTRL_ASY_RESET; + + mt76_set(dev, MT_PBF_SYS_CTRL, val); + mt76_clear(dev, MT_PBF_SYS_CTRL, val); + + mt76_wr(dev, MT_PBF_TX_MAX_PCNT, 0xefef3f1f); + mt76_wr(dev, MT_PBF_RX_MAX_PCNT, 0xfebf); +} + +static void +mt76x2_write_reg_pairs(struct mt76x2_dev *dev, + const struct mt76x2_reg_pair *data, int len) +{ + while (len > 0) { + mt76_wr(dev, data->reg, data->value); + len--; + data++; + } +} + +static void +mt76_write_mac_initvals(struct mt76x2_dev *dev) +{ +#define DEFAULT_PROT_CFG \ + (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \ + FIELD_PREP(MT_PROT_CFG_NAV, 1) | \ + FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) | \ + MT_PROT_CFG_RTS_THRESH) + +#define DEFAULT_PROT_CFG_20 \ + (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \ + FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \ + FIELD_PREP(MT_PROT_CFG_NAV, 1) | \ + FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x17)) + +#define DEFAULT_PROT_CFG_40 \ + (FIELD_PREP(MT_PROT_CFG_RATE, 0x2084) | \ + FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \ + FIELD_PREP(MT_PROT_CFG_NAV, 1) | \ + FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f)) + + static const struct mt76x2_reg_pair vals[] = { + /* Copied from MediaTek reference source */ + { MT_PBF_SYS_CTRL, 0x00080c00 }, + { MT_PBF_CFG, 0x1efebcff }, + { MT_FCE_PSE_CTRL, 0x00000001 }, + { MT_MAC_SYS_CTRL, 0x0000000c }, + { MT_MAX_LEN_CFG, 0x003e3f00 }, + { MT_AMPDU_MAX_LEN_20M1S, 0xaaa99887 }, + { MT_AMPDU_MAX_LEN_20M2S, 0x000000aa }, + { MT_XIFS_TIME_CFG, 0x33a40d0a }, + { MT_BKOFF_SLOT_CFG, 0x00000209 }, + { MT_TBTT_SYNC_CFG, 0x00422010 }, + { MT_PWR_PIN_CFG, 0x00000000 }, + { 0x1238, 0x001700c8 }, + { MT_TX_SW_CFG0, 0x00101001 }, + { MT_TX_SW_CFG1, 0x00010000 }, + { MT_TX_SW_CFG2, 0x00000000 }, + { MT_TXOP_CTRL_CFG, 0x0400583f }, + { MT_TX_RTS_CFG, 0x00100020 }, + { MT_TX_TIMEOUT_CFG, 0x000a2290 }, + { MT_TX_RETRY_CFG, 0x47f01f0f }, + { MT_EXP_ACK_TIME, 0x002c00dc }, + { MT_TX_PROT_CFG6, 0xe3f42004 }, + { MT_TX_PROT_CFG7, 0xe3f42084 }, + { MT_TX_PROT_CFG8, 0xe3f42104 }, + { MT_PIFS_TX_CFG, 0x00060fff }, + { MT_RX_FILTR_CFG, 0x00015f97 }, + { MT_LEGACY_BASIC_RATE, 0x0000017f }, + { MT_HT_BASIC_RATE, 0x00004003 }, + { MT_PN_PAD_MODE, 0x00000003 }, + { MT_TXOP_HLDR_ET, 0x00000002 }, + { 0xa44, 0x00000000 }, + { MT_HEADER_TRANS_CTRL_REG, 0x00000000 }, + { MT_TSO_CTRL, 0x00000000 }, + { MT_AUX_CLK_CFG, 0x00000000 }, + { MT_DACCLK_EN_DLY_CFG, 0x00000000 }, + { MT_TX_ALC_CFG_4, 0x00000000 }, + { MT_TX_ALC_VGA3, 0x00000000 }, + { MT_TX_PWR_CFG_0, 0x3a3a3a3a }, + { MT_TX_PWR_CFG_1, 0x3a3a3a3a }, + { MT_TX_PWR_CFG_2, 0x3a3a3a3a }, + { MT_TX_PWR_CFG_3, 0x3a3a3a3a }, + { MT_TX_PWR_CFG_4, 0x3a3a3a3a }, + { MT_TX_PWR_CFG_7, 0x3a3a3a3a }, + { MT_TX_PWR_CFG_8, 0x0000003a }, + { MT_TX_PWR_CFG_9, 0x0000003a }, + { MT_EFUSE_CTRL, 0x0000d000 }, + { MT_PAUSE_ENABLE_CONTROL1, 0x0000000a }, + { MT_FCE_WLAN_FLOW_CONTROL1, 0x60401c18 }, + { MT_WPDMA_DELAY_INT_CFG, 0x94ff0000 }, + { MT_TX_SW_CFG3, 0x00000004 }, + { MT_HT_FBK_TO_LEGACY, 0x00001818 }, + { MT_VHT_HT_FBK_CFG1, 0xedcba980 }, + { MT_PROT_AUTO_TX_CFG, 0x00830083 }, + { MT_HT_CTRL_CFG, 0x000001ff }, + }; + struct mt76x2_reg_pair prot_vals[] = { + { MT_CCK_PROT_CFG, DEFAULT_PROT_CFG }, + { MT_OFDM_PROT_CFG, DEFAULT_PROT_CFG }, + { MT_MM20_PROT_CFG, DEFAULT_PROT_CFG_20 }, + { MT_MM40_PROT_CFG, DEFAULT_PROT_CFG_40 }, + { MT_GF20_PROT_CFG, DEFAULT_PROT_CFG_20 }, + { MT_GF40_PROT_CFG, DEFAULT_PROT_CFG_40 }, + }; + + mt76x2_write_reg_pairs(dev, vals, ARRAY_SIZE(vals)); + mt76x2_write_reg_pairs(dev, prot_vals, ARRAY_SIZE(prot_vals)); +} + +static void +mt76x2_fixup_xtal(struct mt76x2_dev *dev) +{ + u16 eep_val; + s8 offset = 0; + + eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_2); + + offset = eep_val & 0x7f; + if ((eep_val & 0xff) == 0xff) + offset = 0; + else if (eep_val & 0x80) + offset = 0 - offset; + + eep_val >>= 8; + if (eep_val == 0x00 || eep_val == 0xff) { + eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_1); + eep_val &= 0xff; + + if (eep_val == 0x00 || eep_val == 0xff) + eep_val = 0x14; + } + + eep_val &= 0x7f; + mt76_rmw_field(dev, MT_XO_CTRL5, MT_XO_CTRL5_C2_VAL, eep_val + offset); + mt76_set(dev, MT_XO_CTRL6, MT_XO_CTRL6_C2_CTRL); + + eep_val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2); + switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) { + case 0: + mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80); + break; + case 1: + mt76_wr(dev, MT_XO_CTRL7, 0x5c1feed0); + break; + default: + break; + } +} + +static void +mt76x2_init_beacon_offsets(struct mt76x2_dev *dev) +{ + u16 base = MT_BEACON_BASE; + u32 regs[4] = {}; + int i; + + for (i = 0; i < 16; i++) { + u16 addr = dev->beacon_offsets[i]; + + regs[i / 4] |= ((addr - base) / 64) << (8 * (i % 4)); + } + + for (i = 0; i < 4; i++) + mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]); +} + +int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard) +{ + static const u8 null_addr[ETH_ALEN] = {}; + const u8 *macaddr = dev->mt76.macaddr; + u32 val; + int i, k; + + if (!mt76x2_wait_for_mac(dev)) + return -ETIMEDOUT; + + val = mt76_rr(dev, MT_WPDMA_GLO_CFG); + + val &= ~(MT_WPDMA_GLO_CFG_TX_DMA_EN | + MT_WPDMA_GLO_CFG_TX_DMA_BUSY | + MT_WPDMA_GLO_CFG_RX_DMA_EN | + MT_WPDMA_GLO_CFG_RX_DMA_BUSY | + MT_WPDMA_GLO_CFG_DMA_BURST_SIZE); + val |= FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3); + + mt76_wr(dev, MT_WPDMA_GLO_CFG, val); + + mt76x2_mac_pbf_init(dev); + mt76_write_mac_initvals(dev); + mt76x2_fixup_xtal(dev); + + mt76_clear(dev, MT_MAC_SYS_CTRL, + MT_MAC_SYS_CTRL_RESET_CSR | + MT_MAC_SYS_CTRL_RESET_BBP); + + if (is_mt7612(dev)) + mt76_clear(dev, MT_COEXCFG0, MT_COEXCFG0_COEX_EN); + + mt76_set(dev, MT_EXT_CCA_CFG, 0x0000f000); + mt76_clear(dev, MT_TX_ALC_CFG_4, BIT(31)); + + mt76_wr(dev, MT_RF_BYPASS_0, 0x06000000); + mt76_wr(dev, MT_RF_SETTING_0, 0x08800000); + usleep_range(5000, 10000); + mt76_wr(dev, MT_RF_BYPASS_0, 0x00000000); + + mt76_wr(dev, MT_MCU_CLOCK_CTL, 0x1401); + mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN); + + mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(macaddr)); + mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(macaddr + 4)); + + mt76_wr(dev, MT_MAC_BSSID_DW0, get_unaligned_le32(macaddr)); + mt76_wr(dev, MT_MAC_BSSID_DW1, get_unaligned_le16(macaddr + 4) | + FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 beacons */ + MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT); + + /* Fire a pre-TBTT interrupt 8 ms before TBTT */ + mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT, + 8 << 4); + mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER, + MT_DFS_GP_INTERVAL); + mt76_wr(dev, MT_INT_TIMER_EN, 0); + + mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff); + if (!hard) + return 0; + + for (i = 0; i < 256 / 32; i++) + mt76_wr(dev, MT_WCID_DROP_BASE + i * 4, 0); + + for (i = 0; i < 256; i++) + mt76x2_mac_wcid_setup(dev, i, 0, NULL); + + for (i = 0; i < 16; i++) + for (k = 0; k < 4; k++) + mt76x2_mac_shared_key_setup(dev, i, k, NULL); + + for (i = 0; i < 8; i++) { + mt76x2_mac_set_bssid(dev, i, null_addr); + mt76x2_mac_set_beacon(dev, i, NULL); + } + + for (i = 0; i < 16; i++) + mt76_rr(dev, MT_TX_STAT_FIFO); + + mt76_wr(dev, MT_CH_TIME_CFG, + MT_CH_TIME_CFG_TIMER_EN | + MT_CH_TIME_CFG_TX_AS_BUSY | + MT_CH_TIME_CFG_RX_AS_BUSY | + MT_CH_TIME_CFG_NAV_AS_BUSY | + MT_CH_TIME_CFG_EIFS_AS_BUSY | + FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1)); + + mt76x2_init_beacon_offsets(dev); + + mt76x2_set_tx_ackto(dev); + + return 0; +} + +int mt76x2_mac_start(struct mt76x2_dev *dev) +{ + int i; + + for (i = 0; i < 16; i++) + mt76_rr(dev, MT_TX_AGG_CNT(i)); + + for (i = 0; i < 16; i++) + mt76_rr(dev, MT_TX_STAT_FIFO); + + memset(dev->aggr_stats, 0, sizeof(dev->aggr_stats)); + + mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX); + wait_for_wpdma(dev); + usleep_range(50, 100); + + mt76_set(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_TX_DMA_EN | + MT_WPDMA_GLO_CFG_RX_DMA_EN); + + mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); + + mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter); + + mt76_wr(dev, MT_MAC_SYS_CTRL, + MT_MAC_SYS_CTRL_ENABLE_TX | + MT_MAC_SYS_CTRL_ENABLE_RX); + + mt76x2_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL | + MT_INT_TX_STAT); + + return 0; +} + +void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force) +{ + bool stopped = false; + u32 rts_cfg; + int i; + + mt76_wr(dev, MT_MAC_SYS_CTRL, 0); + + rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG); + mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT); + + /* Wait for MAC to become idle */ + for (i = 0; i < 300; i++) { + if (mt76_rr(dev, MT_MAC_STATUS) & + (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) + continue; + + if (mt76_rr(dev, MT_BBP(IBI, 12))) + continue; + + stopped = true; + break; + } + + if (force && !stopped) { + mt76_set(dev, MT_BBP(CORE, 4), BIT(1)); + mt76_clear(dev, MT_BBP(CORE, 4), BIT(1)); + + mt76_set(dev, MT_BBP(CORE, 4), BIT(0)); + mt76_clear(dev, MT_BBP(CORE, 4), BIT(0)); + } + + mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg); +} + +void mt76x2_mac_resume(struct mt76x2_dev *dev) +{ + mt76_wr(dev, MT_MAC_SYS_CTRL, + MT_MAC_SYS_CTRL_ENABLE_TX | + MT_MAC_SYS_CTRL_ENABLE_RX); +} + +static void +mt76x2_power_on_rf_patch(struct mt76x2_dev *dev) +{ + mt76_set(dev, 0x10130, BIT(0) | BIT(16)); + udelay(1); + + mt76_clear(dev, 0x1001c, 0xff); + mt76_set(dev, 0x1001c, 0x30); + + mt76_wr(dev, 0x10014, 0x484f); + udelay(1); + + mt76_set(dev, 0x10130, BIT(17)); + udelay(125); + + mt76_clear(dev, 0x10130, BIT(16)); + udelay(50); + + mt76_set(dev, 0x1014c, BIT(19) | BIT(20)); +} + +static void +mt76x2_power_on_rf(struct mt76x2_dev *dev, int unit) +{ + int shift = unit ? 8 : 0; + + /* Enable RF BG */ + mt76_set(dev, 0x10130, BIT(0) << shift); + udelay(10); + + /* Enable RFDIG LDO/AFE/ABB/ADDA */ + mt76_set(dev, 0x10130, (BIT(1) | BIT(3) | BIT(4) | BIT(5)) << shift); + udelay(10); + + /* Switch RFDIG power to internal LDO */ + mt76_clear(dev, 0x10130, BIT(2) << shift); + udelay(10); + + mt76x2_power_on_rf_patch(dev); + + mt76_set(dev, 0x530, 0xf); +} + +static void +mt76x2_power_on(struct mt76x2_dev *dev) +{ + u32 val; + + /* Turn on WL MTCMOS */ + mt76_set(dev, MT_WLAN_MTC_CTRL, MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP); + + val = MT_WLAN_MTC_CTRL_STATE_UP | + MT_WLAN_MTC_CTRL_PWR_ACK | + MT_WLAN_MTC_CTRL_PWR_ACK_S; + + mt76_poll(dev, MT_WLAN_MTC_CTRL, val, val, 1000); + + mt76_clear(dev, MT_WLAN_MTC_CTRL, 0x7f << 16); + udelay(10); + + mt76_clear(dev, MT_WLAN_MTC_CTRL, 0xf << 24); + udelay(10); + + mt76_set(dev, MT_WLAN_MTC_CTRL, 0xf << 24); + mt76_clear(dev, MT_WLAN_MTC_CTRL, 0xfff); + + /* Turn on AD/DA power down */ + mt76_clear(dev, 0x11204, BIT(3)); + + /* WLAN function enable */ + mt76_set(dev, 0x10080, BIT(0)); + + /* Release BBP software reset */ + mt76_clear(dev, 0x10064, BIT(18)); + + mt76x2_power_on_rf(dev, 0); + mt76x2_power_on_rf(dev, 1); +} + +void mt76x2_set_tx_ackto(struct mt76x2_dev *dev) +{ + u8 ackto, sifs, slottime = dev->slottime; + + slottime += 3 * dev->coverage_class; + + sifs = mt76_get_field(dev, MT_XIFS_TIME_CFG, + MT_XIFS_TIME_CFG_OFDM_SIFS); + + ackto = slottime + sifs; + mt76_rmw_field(dev, MT_TX_TIMEOUT_CFG, + MT_TX_TIMEOUT_CFG_ACKTO, ackto); +} + +static void +mt76x2_set_wlan_state(struct mt76x2_dev *dev, bool enable) +{ + u32 val = mt76_rr(dev, MT_WLAN_FUN_CTRL); + + if (enable) + val |= (MT_WLAN_FUN_CTRL_WLAN_EN | + MT_WLAN_FUN_CTRL_WLAN_CLK_EN); + else + val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN | + MT_WLAN_FUN_CTRL_WLAN_CLK_EN); + + mt76_wr(dev, MT_WLAN_FUN_CTRL, val); + udelay(20); +} + +static void +mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable) +{ + u32 val; + + val = mt76_rr(dev, MT_WLAN_FUN_CTRL); + + val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL; + + if (val & MT_WLAN_FUN_CTRL_WLAN_EN) { + val |= MT_WLAN_FUN_CTRL_WLAN_RESET_RF; + mt76_wr(dev, MT_WLAN_FUN_CTRL, val); + udelay(20); + + val &= ~MT_WLAN_FUN_CTRL_WLAN_RESET_RF; + } + + mt76_wr(dev, MT_WLAN_FUN_CTRL, val); + udelay(20); + + mt76x2_set_wlan_state(dev, enable); +} + +int mt76x2_init_hardware(struct mt76x2_dev *dev) +{ + static const u16 beacon_offsets[16] = { + /* 1024 byte per beacon */ + 0xc000, + 0xc400, + 0xc800, + 0xcc00, + 0xd000, + 0xd400, + 0xd800, + 0xdc00, + + /* BSS idx 8-15 not used for beacons */ + 0xc000, + 0xc000, + 0xc000, + 0xc000, + 0xc000, + 0xc000, + 0xc000, + 0xc000, + }; + u32 val; + int ret; + + dev->beacon_offsets = beacon_offsets; + tasklet_init(&dev->pre_tbtt_tasklet, mt76x2_pre_tbtt_tasklet, + (unsigned long) dev); + + dev->chainmask = 0x202; + dev->global_wcid.idx = 255; + dev->global_wcid.hw_key_idx = -1; + dev->slottime = 9; + + val = mt76_rr(dev, MT_WPDMA_GLO_CFG); + val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE | + MT_WPDMA_GLO_CFG_BIG_ENDIAN | + MT_WPDMA_GLO_CFG_HDR_SEG_LEN; + val |= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE; + mt76_wr(dev, MT_WPDMA_GLO_CFG, val); + + mt76x2_reset_wlan(dev, true); + mt76x2_power_on(dev); + + ret = mt76x2_eeprom_init(dev); + if (ret) + return ret; + + ret = mt76x2_mac_reset(dev, true); + if (ret) + return ret; + + dev->rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG); + + ret = mt76x2_dma_init(dev); + if (ret) + return ret; + + set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state); + ret = mt76x2_mac_start(dev); + if (ret) + return ret; + + ret = mt76x2_mcu_init(dev); + if (ret) + return ret; + + mt76x2_mac_stop(dev, false); + + return 0; +} + +void mt76x2_stop_hardware(struct mt76x2_dev *dev) +{ + cancel_delayed_work_sync(&dev->cal_work); + cancel_delayed_work_sync(&dev->mac_work); + mt76x2_mcu_set_radio_state(dev, false); + mt76x2_mac_stop(dev, false); +} + +void mt76x2_cleanup(struct mt76x2_dev *dev) +{ + tasklet_disable(&dev->dfs_pd.dfs_tasklet); + tasklet_disable(&dev->pre_tbtt_tasklet); + mt76x2_stop_hardware(dev); + mt76x2_dma_cleanup(dev); + mt76x2_mcu_cleanup(dev); +} + +struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev) +{ + static const struct mt76_driver_ops drv_ops = { + .txwi_size = sizeof(struct mt76x2_txwi), + .update_survey = mt76x2_update_channel, + .tx_prepare_skb = mt76x2_tx_prepare_skb, + .tx_complete_skb = mt76x2_tx_complete_skb, + .rx_skb = mt76x2_queue_rx_skb, + .rx_poll_complete = mt76x2_rx_poll_complete, + }; + struct ieee80211_hw *hw; + struct mt76x2_dev *dev; + + hw = ieee80211_alloc_hw(sizeof(*dev), &mt76x2_ops); + if (!hw) + return NULL; + + dev = hw->priv; + dev->mt76.dev = pdev; + dev->mt76.hw = hw; + dev->mt76.drv = &drv_ops; + mutex_init(&dev->mutex); + spin_lock_init(&dev->irq_lock); + + return dev; +} + +static void mt76x2_regd_notifier(struct wiphy *wiphy, + struct regulatory_request *request) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct mt76x2_dev *dev = hw->priv; + + mt76x2_dfs_set_domain(dev, request->dfs_region); +} + +#define CCK_RATE(_idx, _rate) { \ + .bitrate = _rate, \ + .flags = IEEE80211_RATE_SHORT_PREAMBLE, \ + .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \ + .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \ +} + +#define OFDM_RATE(_idx, _rate) { \ + .bitrate = _rate, \ + .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \ + .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \ +} + +static struct ieee80211_rate mt76x2_rates[] = { + CCK_RATE(0, 10), + CCK_RATE(1, 20), + CCK_RATE(2, 55), + CCK_RATE(3, 110), + OFDM_RATE(0, 60), + OFDM_RATE(1, 90), + OFDM_RATE(2, 120), + OFDM_RATE(3, 180), + OFDM_RATE(4, 240), + OFDM_RATE(5, 360), + OFDM_RATE(6, 480), + OFDM_RATE(7, 540), +}; + +static const struct ieee80211_iface_limit if_limits[] = { + { + .max = 1, + .types = BIT(NL80211_IFTYPE_ADHOC) + }, { + .max = 8, + .types = BIT(NL80211_IFTYPE_STATION) | +#ifdef CONFIG_MAC80211_MESH + BIT(NL80211_IFTYPE_MESH_POINT) | +#endif + BIT(NL80211_IFTYPE_AP) + }, +}; + +static const struct ieee80211_iface_combination if_comb[] = { + { + .limits = if_limits, + .n_limits = ARRAY_SIZE(if_limits), + .max_interfaces = 8, + .num_different_channels = 1, + .beacon_int_infra_match = true, + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40) | + BIT(NL80211_CHAN_WIDTH_80), + } +}; + +static void mt76x2_led_set_config(struct mt76_dev *mt76, u8 delay_on, + u8 delay_off) +{ + struct mt76x2_dev *dev = container_of(mt76, struct mt76x2_dev, + mt76); + u32 val; + + val = MT_LED_STATUS_DURATION(0xff) | + MT_LED_STATUS_OFF(delay_off) | + MT_LED_STATUS_ON(delay_on); + + mt76_wr(dev, MT_LED_S0(mt76->led_pin), val); + mt76_wr(dev, MT_LED_S1(mt76->led_pin), val); + + val = MT_LED_CTRL_REPLAY(mt76->led_pin) | + MT_LED_CTRL_KICK(mt76->led_pin); + if (mt76->led_al) + val |= MT_LED_CTRL_POLARITY(mt76->led_pin); + mt76_wr(dev, MT_LED_CTRL, val); +} + +static int mt76x2_led_set_blink(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off) +{ + struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev, + led_cdev); + u8 delta_on, delta_off; + + delta_off = max_t(u8, *delay_off / 10, 1); + delta_on = max_t(u8, *delay_on / 10, 1); + + mt76x2_led_set_config(mt76, delta_on, delta_off); + return 0; +} + +static void mt76x2_led_set_brightness(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev, + led_cdev); + + if (!brightness) + mt76x2_led_set_config(mt76, 0, 0xff); + else + mt76x2_led_set_config(mt76, 0xff, 0); +} + +static void +mt76x2_init_txpower(struct mt76x2_dev *dev, + struct ieee80211_supported_band *sband) +{ + struct ieee80211_channel *chan; + struct mt76x2_tx_power_info txp; + struct mt76_rate_power t = {}; + int target_power; + int i; + + for (i = 0; i < sband->n_channels; i++) { + chan = &sband->channels[i]; + + mt76x2_get_power_info(dev, &txp, chan); + + target_power = max_t(int, (txp.chain[0].target_power + + txp.chain[0].delta), + (txp.chain[1].target_power + + txp.chain[1].delta)); + + mt76x2_get_rate_power(dev, &t, chan); + + chan->max_power = mt76x2_get_max_rate_power(&t) + + target_power; + chan->max_power /= 2; + + /* convert to combined output power on 2x2 devices */ + chan->max_power += 3; + } +} + +int mt76x2_register_device(struct mt76x2_dev *dev) +{ + struct ieee80211_hw *hw = mt76_hw(dev); + struct wiphy *wiphy = hw->wiphy; + void *status_fifo; + int fifo_size; + int i, ret; + + fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x2_tx_status)); + status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL); + if (!status_fifo) + return -ENOMEM; + + kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size); + + ret = mt76x2_init_hardware(dev); + if (ret) + return ret; + + hw->queues = 4; + hw->max_rates = 1; + hw->max_report_rates = 7; + hw->max_rate_tries = 1; + hw->extra_tx_headroom = 2; + + hw->sta_data_size = sizeof(struct mt76x2_sta); + hw->vif_data_size = sizeof(struct mt76x2_vif); + + for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) { + u8 *addr = dev->macaddr_list[i].addr; + + memcpy(addr, dev->mt76.macaddr, ETH_ALEN); + + if (!i) + continue; + + addr[0] |= BIT(1); + addr[0] ^= ((i - 1) << 2); + } + wiphy->addresses = dev->macaddr_list; + wiphy->n_addresses = ARRAY_SIZE(dev->macaddr_list); + + wiphy->iface_combinations = if_comb; + wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); + + wiphy->reg_notifier = mt76x2_regd_notifier; + + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS); + + ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES); + ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); + + INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate); + INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work); + + dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; + dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; + + mt76x2_dfs_init_detector(dev); + + /* init led callbacks */ + dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness; + dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink; + + ret = mt76_register_device(&dev->mt76, true, mt76x2_rates, + ARRAY_SIZE(mt76x2_rates)); + if (ret) + goto fail; + + mt76x2_init_debugfs(dev); + mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband); + mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband); + + return 0; + +fail: + mt76x2_stop_hardware(dev); + return ret; +} + + diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c new file mode 100644 index 000000000000..6c30b5eaa9ca --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c @@ -0,0 +1,839 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/delay.h> +#include "mt76x2.h" +#include "mt76x2_mcu.h" +#include "mt76x2_eeprom.h" +#include "mt76x2_trace.h" + +void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr) +{ + idx &= 7; + mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr)); + mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR, + get_unaligned_le16(addr + 4)); +} + +static int +mt76x2_mac_process_rate(struct mt76_rx_status *status, u16 rate) +{ + u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate); + + switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) { + case MT_PHY_TYPE_OFDM: + if (idx >= 8) + idx = 0; + + if (status->band == NL80211_BAND_2GHZ) + idx += 4; + + status->rate_idx = idx; + return 0; + case MT_PHY_TYPE_CCK: + if (idx >= 8) { + idx -= 8; + status->enc_flags |= RX_ENC_FLAG_SHORTPRE; + } + + if (idx >= 4) + idx = 0; + + status->rate_idx = idx; + return 0; + case MT_PHY_TYPE_HT_GF: + status->enc_flags |= RX_ENC_FLAG_HT_GF; + /* fall through */ + case MT_PHY_TYPE_HT: + status->encoding = RX_ENC_HT; + status->rate_idx = idx; + break; + case MT_PHY_TYPE_VHT: + status->encoding = RX_ENC_VHT; + status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx); + status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1; + break; + default: + return -EINVAL; + } + + if (rate & MT_RXWI_RATE_LDPC) + status->enc_flags |= RX_ENC_FLAG_LDPC; + + if (rate & MT_RXWI_RATE_SGI) + status->enc_flags |= RX_ENC_FLAG_SHORT_GI; + + if (rate & MT_RXWI_RATE_STBC) + status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT; + + switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) { + case MT_PHY_BW_20: + break; + case MT_PHY_BW_40: + status->bw = RATE_INFO_BW_40; + break; + case MT_PHY_BW_80: + status->bw = RATE_INFO_BW_80; + break; + default: + break; + } + + return 0; +} + +static __le16 +mt76x2_mac_tx_rate_val(struct mt76x2_dev *dev, + const struct ieee80211_tx_rate *rate, u8 *nss_val) +{ + u16 rateval; + u8 phy, rate_idx; + u8 nss = 1; + u8 bw = 0; + + if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { + rate_idx = rate->idx; + nss = 1 + (rate->idx >> 4); + phy = MT_PHY_TYPE_VHT; + if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) + bw = 2; + else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + bw = 1; + } else if (rate->flags & IEEE80211_TX_RC_MCS) { + rate_idx = rate->idx; + nss = 1 + (rate->idx >> 3); + phy = MT_PHY_TYPE_HT; + if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD) + phy = MT_PHY_TYPE_HT_GF; + if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + bw = 1; + } else { + const struct ieee80211_rate *r; + int band = dev->mt76.chandef.chan->band; + u16 val; + + r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx]; + if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) + val = r->hw_value_short; + else + val = r->hw_value; + + phy = val >> 8; + rate_idx = val & 0xff; + bw = 0; + } + + rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx); + rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy); + rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw); + if (rate->flags & IEEE80211_TX_RC_SHORT_GI) + rateval |= MT_RXWI_RATE_SGI; + + *nss_val = nss; + return cpu_to_le16(rateval); +} + +void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop) +{ + u32 val = mt76_rr(dev, MT_WCID_DROP(idx)); + u32 bit = MT_WCID_DROP_MASK(idx); + + /* prevent unnecessary writes */ + if ((val & bit) != (bit * drop)) + mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop)); +} + +void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid, + const struct ieee80211_tx_rate *rate) +{ + spin_lock_bh(&dev->mt76.lock); + wcid->tx_rate = mt76x2_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss); + wcid->tx_rate_set = true; + spin_unlock_bh(&dev->mt76.lock); +} + +void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi, + struct sk_buff *skb, struct mt76_wcid *wcid, + struct ieee80211_sta *sta) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_tx_rate *rate = &info->control.rates[0]; + struct ieee80211_key_conf *key = info->control.hw_key; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2)); + u16 txwi_flags = 0; + u8 nss; + s8 txpwr_adj, max_txpwr_adj; + u8 ccmp_pn[8]; + + memset(txwi, 0, sizeof(*txwi)); + + if (wcid) + txwi->wcid = wcid->idx; + else + txwi->wcid = 0xff; + + txwi->pktid = 1; + + if (wcid && wcid->sw_iv && key) { + u64 pn = atomic64_inc_return(&key->tx_pn); + ccmp_pn[0] = pn; + ccmp_pn[1] = pn >> 8; + ccmp_pn[2] = 0; + ccmp_pn[3] = 0x20 | (key->keyidx << 6); + ccmp_pn[4] = pn >> 16; + ccmp_pn[5] = pn >> 24; + ccmp_pn[6] = pn >> 32; + ccmp_pn[7] = pn >> 40; + txwi->iv = *((u32 *) &ccmp_pn[0]); + txwi->eiv = *((u32 *) &ccmp_pn[1]); + } + + spin_lock_bh(&dev->mt76.lock); + if (wcid && (rate->idx < 0 || !rate->count)) { + txwi->rate = wcid->tx_rate; + max_txpwr_adj = wcid->max_txpwr_adj; + nss = wcid->tx_rate_nss; + } else { + txwi->rate = mt76x2_mac_tx_rate_val(dev, rate, &nss); + max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, rate); + } + spin_unlock_bh(&dev->mt76.lock); + + txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, dev->txpower_conf, + max_txpwr_adj); + txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj); + + if (mt76xx_rev(dev) >= MT76XX_REV_E4) + txwi->txstream = 0x13; + else if (mt76xx_rev(dev) >= MT76XX_REV_E3 && + !(txwi->rate & cpu_to_le16(rate_ht_mask))) + txwi->txstream = 0x93; + + if (info->flags & IEEE80211_TX_CTL_LDPC) + txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC); + if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1) + txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC); + if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC) + txwi_flags |= MT_TXWI_FLAGS_MMPS; + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) + txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ; + if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) + txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ; + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) + txwi->pktid |= MT_TXWI_PKTID_PROBE; + if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) { + u8 ba_size = IEEE80211_MIN_AMPDU_BUF; + + ba_size <<= sta->ht_cap.ampdu_factor; + ba_size = min_t(int, 63, ba_size - 1); + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) + ba_size = 0; + txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size); + + txwi_flags |= MT_TXWI_FLAGS_AMPDU | + FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY, + sta->ht_cap.ampdu_density); + } + + if (ieee80211_is_probe_resp(hdr->frame_control) || + ieee80211_is_beacon(hdr->frame_control)) + txwi_flags |= MT_TXWI_FLAGS_TS; + + txwi->flags |= cpu_to_le16(txwi_flags); + txwi->len_ctl = cpu_to_le16(skb->len); +} + +static void mt76x2_remove_hdr_pad(struct sk_buff *skb, int len) +{ + int hdrlen; + + if (!len) + return; + + hdrlen = ieee80211_get_hdrlen_from_skb(skb); + memmove(skb->data + len, skb->data, hdrlen); + skb_pull(skb, len); +} + +static struct mt76_wcid * +mt76x2_rx_get_sta_wcid(struct mt76x2_dev *dev, u8 idx, bool unicast) +{ + struct mt76x2_sta *sta; + struct mt76_wcid *wcid; + + if (idx >= ARRAY_SIZE(dev->wcid)) + return NULL; + + wcid = rcu_dereference(dev->wcid[idx]); + if (unicast || !wcid) + return wcid; + + sta = container_of(wcid, struct mt76x2_sta, wcid); + return &sta->vif->group_wcid; +} + +int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb, + void *rxi) +{ + struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb; + struct mt76x2_rxwi *rxwi = rxi; + u32 rxinfo = le32_to_cpu(rxwi->rxinfo); + u32 ctl = le32_to_cpu(rxwi->ctl); + u16 rate = le16_to_cpu(rxwi->rate); + u16 tid_sn = le16_to_cpu(rxwi->tid_sn); + bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST); + int pad_len = 0; + u8 pn_len; + u8 wcid; + int len; + + if (rxinfo & MT_RXINFO_L2PAD) + pad_len += 2; + + if (rxinfo & MT_RXINFO_DECRYPT) { + status->flag |= RX_FLAG_DECRYPTED; + status->flag |= RX_FLAG_MMIC_STRIPPED; + status->flag |= RX_FLAG_MIC_STRIPPED; + status->flag |= RX_FLAG_IV_STRIPPED; + } + + wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl); + status->wcid = mt76x2_rx_get_sta_wcid(dev, wcid, unicast); + + len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl); + pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo); + if (pn_len) { + int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len; + u8 *data = skb->data + offset; + + status->iv[0] = data[7]; + status->iv[1] = data[6]; + status->iv[2] = data[5]; + status->iv[3] = data[4]; + status->iv[4] = data[1]; + status->iv[5] = data[0]; + + /* + * Driver CCMP validation can't deal with fragments. + * Let mac80211 take care of it. + */ + if (rxinfo & MT_RXINFO_FRAG) { + status->flag &= ~RX_FLAG_IV_STRIPPED; + } else { + pad_len += pn_len << 2; + len -= pn_len << 2; + } + } + + mt76x2_remove_hdr_pad(skb, pad_len); + + if (rxinfo & MT_RXINFO_BA) + status->aggr = true; + + if (WARN_ON_ONCE(len > skb->len)) + return -EINVAL; + + pskb_trim(skb, len); + status->chains = BIT(0) | BIT(1); + status->chain_signal[0] = mt76x2_phy_get_rssi(dev, rxwi->rssi[0], 0); + status->chain_signal[1] = mt76x2_phy_get_rssi(dev, rxwi->rssi[1], 1); + status->signal = max(status->chain_signal[0], status->chain_signal[1]); + status->freq = dev->mt76.chandef.chan->center_freq; + status->band = dev->mt76.chandef.chan->band; + + status->tid = FIELD_GET(MT_RXWI_TID, tid_sn); + status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn); + + return mt76x2_mac_process_rate(status, rate); +} + +static int +mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate, + enum nl80211_band band) +{ + u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate); + + txrate->idx = 0; + txrate->flags = 0; + txrate->count = 1; + + switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) { + case MT_PHY_TYPE_OFDM: + if (band == NL80211_BAND_2GHZ) + idx += 4; + + txrate->idx = idx; + return 0; + case MT_PHY_TYPE_CCK: + if (idx >= 8) + idx -= 8; + + txrate->idx = idx; + return 0; + case MT_PHY_TYPE_HT_GF: + txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD; + /* fall through */ + case MT_PHY_TYPE_HT: + txrate->flags |= IEEE80211_TX_RC_MCS; + txrate->idx = idx; + break; + case MT_PHY_TYPE_VHT: + txrate->flags |= IEEE80211_TX_RC_VHT_MCS; + txrate->idx = idx; + break; + default: + return -EINVAL; + } + + switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) { + case MT_PHY_BW_20: + break; + case MT_PHY_BW_40: + txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + break; + case MT_PHY_BW_80: + txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH; + break; + default: + return -EINVAL; + break; + } + + if (rate & MT_RXWI_RATE_SGI) + txrate->flags |= IEEE80211_TX_RC_SHORT_GI; + + return 0; +} + +static void +mt76x2_mac_fill_tx_status(struct mt76x2_dev *dev, + struct ieee80211_tx_info *info, + struct mt76x2_tx_status *st, int n_frames) +{ + struct ieee80211_tx_rate *rate = info->status.rates; + int cur_idx, last_rate; + int i; + + if (!n_frames) + return; + + last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1); + mt76x2_mac_process_tx_rate(&rate[last_rate], st->rate, + dev->mt76.chandef.chan->band); + if (last_rate < IEEE80211_TX_MAX_RATES - 1) + rate[last_rate + 1].idx = -1; + + cur_idx = rate[last_rate].idx + st->retry; + for (i = 0; i <= last_rate; i++) { + rate[i].flags = rate[last_rate].flags; + rate[i].idx = max_t(int, 0, cur_idx - i); + rate[i].count = 1; + } + + if (last_rate > 0) + rate[last_rate - 1].count = st->retry + 1 - last_rate; + + info->status.ampdu_len = n_frames; + info->status.ampdu_ack_len = st->success ? n_frames : 0; + + if (st->pktid & MT_TXWI_PKTID_PROBE) + info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; + + if (st->aggr) + info->flags |= IEEE80211_TX_CTL_AMPDU | + IEEE80211_TX_STAT_AMPDU; + + if (!st->ack_req) + info->flags |= IEEE80211_TX_CTL_NO_ACK; + else if (st->success) + info->flags |= IEEE80211_TX_STAT_ACK; +} + +static void +mt76x2_send_tx_status(struct mt76x2_dev *dev, struct mt76x2_tx_status *stat, + u8 *update) +{ + struct ieee80211_tx_info info = {}; + struct ieee80211_sta *sta = NULL; + struct mt76_wcid *wcid = NULL; + struct mt76x2_sta *msta = NULL; + + rcu_read_lock(); + if (stat->wcid < ARRAY_SIZE(dev->wcid)) + wcid = rcu_dereference(dev->wcid[stat->wcid]); + + if (wcid) { + void *priv; + + priv = msta = container_of(wcid, struct mt76x2_sta, wcid); + sta = container_of(priv, struct ieee80211_sta, + drv_priv); + } + + if (msta && stat->aggr) { + u32 stat_val, stat_cache; + + stat_val = stat->rate; + stat_val |= ((u32) stat->retry) << 16; + stat_cache = msta->status.rate; + stat_cache |= ((u32) msta->status.retry) << 16; + + if (*update == 0 && stat_val == stat_cache && + stat->wcid == msta->status.wcid && msta->n_frames < 32) { + msta->n_frames++; + goto out; + } + + mt76x2_mac_fill_tx_status(dev, &info, &msta->status, + msta->n_frames); + + msta->status = *stat; + msta->n_frames = 1; + *update = 0; + } else { + mt76x2_mac_fill_tx_status(dev, &info, stat, 1); + *update = 1; + } + + ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info); + +out: + rcu_read_unlock(); +} + +void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq) +{ + struct mt76x2_tx_status stat = {}; + unsigned long flags; + u8 update = 1; + + if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) + return; + + trace_mac_txstat_poll(dev); + + while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) { + u32 stat1, stat2; + + spin_lock_irqsave(&dev->irq_lock, flags); + stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT); + stat1 = mt76_rr(dev, MT_TX_STAT_FIFO); + if (!(stat1 & MT_TX_STAT_FIFO_VALID)) { + spin_unlock_irqrestore(&dev->irq_lock, flags); + break; + } + + spin_unlock_irqrestore(&dev->irq_lock, flags); + + stat.valid = 1; + stat.success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS); + stat.aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR); + stat.ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ); + stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1); + stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1); + stat.retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2); + stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2); + trace_mac_txstat_fetch(dev, &stat); + + if (!irq) { + mt76x2_send_tx_status(dev, &stat, &update); + continue; + } + + kfifo_put(&dev->txstatus_fifo, stat); + } +} + +static void +mt76x2_mac_queue_txdone(struct mt76x2_dev *dev, struct sk_buff *skb, + void *txwi_ptr) +{ + struct mt76x2_tx_info *txi = mt76x2_skb_tx_info(skb); + struct mt76x2_txwi *txwi = txwi_ptr; + + mt76x2_mac_poll_tx_status(dev, false); + + txi->tries = 0; + txi->jiffies = jiffies; + txi->wcid = txwi->wcid; + txi->pktid = txwi->pktid; + trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid); + mt76x2_tx_complete(dev, skb); +} + +void mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev *dev) +{ + struct mt76x2_tx_status stat; + u8 update = 1; + + while (kfifo_get(&dev->txstatus_fifo, &stat)) + mt76x2_send_tx_status(dev, &stat, &update); +} + +void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, + struct mt76_queue_entry *e, bool flush) +{ + struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); + + if (e->txwi) + mt76x2_mac_queue_txdone(dev, e->skb, &e->txwi->txwi); + else + dev_kfree_skb_any(e->skb); +} + +static enum mt76x2_cipher_type +mt76x2_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data) +{ + memset(key_data, 0, 32); + if (!key) + return MT_CIPHER_NONE; + + if (key->keylen > 32) + return MT_CIPHER_NONE; + + memcpy(key_data, key->key, key->keylen); + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + return MT_CIPHER_WEP40; + case WLAN_CIPHER_SUITE_WEP104: + return MT_CIPHER_WEP104; + case WLAN_CIPHER_SUITE_TKIP: + return MT_CIPHER_TKIP; + case WLAN_CIPHER_SUITE_CCMP: + return MT_CIPHER_AES_CCMP; + default: + return MT_CIPHER_NONE; + } +} + +void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac) +{ + struct mt76_wcid_addr addr = {}; + u32 attr; + + attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) | + FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8)); + + mt76_wr(dev, MT_WCID_ATTR(idx), attr); + + mt76_wr(dev, MT_WCID_TX_RATE(idx), 0); + mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0); + + if (idx >= 128) + return; + + if (mac) + memcpy(addr.macaddr, mac, ETH_ALEN); + + mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr)); +} + +int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx, + struct ieee80211_key_conf *key) +{ + enum mt76x2_cipher_type cipher; + u8 key_data[32]; + u8 iv_data[8]; + + cipher = mt76x2_mac_get_key_info(key, key_data); + if (cipher == MT_CIPHER_NONE && key) + return -EOPNOTSUPP; + + mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher); + mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data)); + + memset(iv_data, 0, sizeof(iv_data)); + if (key) { + mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE, + !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)); + iv_data[3] = key->keyidx << 6; + if (cipher >= MT_CIPHER_TKIP) + iv_data[3] |= 0x20; + } + + mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data)); + + return 0; +} + +int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx, + struct ieee80211_key_conf *key) +{ + enum mt76x2_cipher_type cipher; + u8 key_data[32]; + u32 val; + + cipher = mt76x2_mac_get_key_info(key, key_data); + if (cipher == MT_CIPHER_NONE && key) + return -EOPNOTSUPP; + + val = mt76_rr(dev, MT_SKEY_MODE(vif_idx)); + val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx)); + val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx); + mt76_wr(dev, MT_SKEY_MODE(vif_idx), val); + + mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data, + sizeof(key_data)); + + return 0; +} + +static int +mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb) +{ + int beacon_len = dev->beacon_offsets[1] - dev->beacon_offsets[0]; + struct mt76x2_txwi txwi; + + if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x2_txwi))) + return -ENOSPC; + + mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL); + + mt76_wr_copy(dev, offset, &txwi, sizeof(txwi)); + offset += sizeof(txwi); + + mt76_wr_copy(dev, offset, skb->data, skb->len); + return 0; +} + +static int +__mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 bcn_idx, struct sk_buff *skb) +{ + int beacon_len = dev->beacon_offsets[1] - dev->beacon_offsets[0]; + int beacon_addr = dev->beacon_offsets[bcn_idx]; + int ret = 0; + int i; + + /* Prevent corrupt transmissions during update */ + mt76_set(dev, MT_BCN_BYPASS_MASK, BIT(bcn_idx)); + + if (skb) { + ret = mt76_write_beacon(dev, beacon_addr, skb); + if (!ret) + dev->beacon_data_mask |= BIT(bcn_idx) & + dev->beacon_mask; + } else { + dev->beacon_data_mask &= ~BIT(bcn_idx); + for (i = 0; i < beacon_len; i += 4) + mt76_wr(dev, beacon_addr + i, 0); + } + + mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~dev->beacon_data_mask); + + return ret; +} + +int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx, + struct sk_buff *skb) +{ + bool force_update = false; + int bcn_idx = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) { + if (vif_idx == i) { + force_update = !!dev->beacons[i] ^ !!skb; + + if (dev->beacons[i]) + dev_kfree_skb(dev->beacons[i]); + + dev->beacons[i] = skb; + __mt76x2_mac_set_beacon(dev, bcn_idx, skb); + } else if (force_update && dev->beacons[i]) { + __mt76x2_mac_set_beacon(dev, bcn_idx, dev->beacons[i]); + } + + bcn_idx += !!dev->beacons[i]; + } + + for (i = bcn_idx; i < ARRAY_SIZE(dev->beacons); i++) { + if (!(dev->beacon_data_mask & BIT(i))) + break; + + __mt76x2_mac_set_beacon(dev, i, NULL); + } + + mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N, + bcn_idx - 1); + return 0; +} + +void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val) +{ + u8 old_mask = dev->beacon_mask; + bool en; + u32 reg; + + if (val) { + dev->beacon_mask |= BIT(vif_idx); + } else { + dev->beacon_mask &= ~BIT(vif_idx); + mt76x2_mac_set_beacon(dev, vif_idx, NULL); + } + + if (!!old_mask == !!dev->beacon_mask) + return; + + en = dev->beacon_mask; + + mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en); + reg = MT_BEACON_TIME_CFG_BEACON_TX | + MT_BEACON_TIME_CFG_TBTT_EN | + MT_BEACON_TIME_CFG_TIMER_EN; + mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en); + + if (en) + mt76x2_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT); + else + mt76x2_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT); +} + +void mt76x2_update_channel(struct mt76_dev *mdev) +{ + struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); + struct mt76_channel_state *state; + u32 active, busy; + + state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan); + + busy = mt76_rr(dev, MT_CH_BUSY); + active = busy + mt76_rr(dev, MT_CH_IDLE); + + spin_lock_bh(&dev->mt76.cc_lock); + state->cc_busy += busy; + state->cc_active += active; + spin_unlock_bh(&dev->mt76.cc_lock); +} + +void mt76x2_mac_work(struct work_struct *work) +{ + struct mt76x2_dev *dev = container_of(work, struct mt76x2_dev, + mac_work.work); + int i, idx; + + mt76x2_update_channel(&dev->mt76); + for (i = 0, idx = 0; i < 16; i++) { + u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i)); + + dev->aggr_stats[idx++] += val & 0xffff; + dev->aggr_stats[idx++] += val >> 16; + } + + ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work, + MT_CALIBRATE_INTERVAL); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h new file mode 100644 index 000000000000..8a8a25e32d5f --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h @@ -0,0 +1,190 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __MT76x2_MAC_H +#define __MT76x2_MAC_H + +#include "mt76.h" + +struct mt76x2_dev; +struct mt76x2_sta; +struct mt76x2_vif; +struct mt76x2_txwi; + +struct mt76x2_tx_status { + u8 valid:1; + u8 success:1; + u8 aggr:1; + u8 ack_req:1; + u8 wcid; + u8 pktid; + u8 retry; + u16 rate; +} __packed __aligned(2); + +struct mt76x2_tx_info { + unsigned long jiffies; + u8 tries; + + u8 wcid; + u8 pktid; + u8 retry; +}; + +struct mt76x2_rxwi { + __le32 rxinfo; + + __le32 ctl; + + __le16 tid_sn; + __le16 rate; + + u8 rssi[4]; + + __le32 bbp_rxinfo[4]; +}; + +#define MT_RXINFO_BA BIT(0) +#define MT_RXINFO_DATA BIT(1) +#define MT_RXINFO_NULL BIT(2) +#define MT_RXINFO_FRAG BIT(3) +#define MT_RXINFO_UNICAST BIT(4) +#define MT_RXINFO_MULTICAST BIT(5) +#define MT_RXINFO_BROADCAST BIT(6) +#define MT_RXINFO_MYBSS BIT(7) +#define MT_RXINFO_CRCERR BIT(8) +#define MT_RXINFO_ICVERR BIT(9) +#define MT_RXINFO_MICERR BIT(10) +#define MT_RXINFO_AMSDU BIT(11) +#define MT_RXINFO_HTC BIT(12) +#define MT_RXINFO_RSSI BIT(13) +#define MT_RXINFO_L2PAD BIT(14) +#define MT_RXINFO_AMPDU BIT(15) +#define MT_RXINFO_DECRYPT BIT(16) +#define MT_RXINFO_BSSIDX3 BIT(17) +#define MT_RXINFO_WAPI_KEY BIT(18) +#define MT_RXINFO_PN_LEN GENMASK(21, 19) +#define MT_RXINFO_SW_FTYPE0 BIT(22) +#define MT_RXINFO_SW_FTYPE1 BIT(23) +#define MT_RXINFO_PROBE_RESP BIT(24) +#define MT_RXINFO_BEACON BIT(25) +#define MT_RXINFO_DISASSOC BIT(26) +#define MT_RXINFO_DEAUTH BIT(27) +#define MT_RXINFO_ACTION BIT(28) +#define MT_RXINFO_TCP_SUM_ERR BIT(30) +#define MT_RXINFO_IP_SUM_ERR BIT(31) + +#define MT_RXWI_CTL_WCID GENMASK(7, 0) +#define MT_RXWI_CTL_KEY_IDX GENMASK(9, 8) +#define MT_RXWI_CTL_BSS_IDX GENMASK(12, 10) +#define MT_RXWI_CTL_UDF GENMASK(15, 13) +#define MT_RXWI_CTL_MPDU_LEN GENMASK(29, 16) +#define MT_RXWI_CTL_EOF BIT(31) + +#define MT_RXWI_TID GENMASK(3, 0) +#define MT_RXWI_SN GENMASK(15, 4) + +#define MT_RXWI_RATE_INDEX GENMASK(5, 0) +#define MT_RXWI_RATE_LDPC BIT(6) +#define MT_RXWI_RATE_BW GENMASK(8, 7) +#define MT_RXWI_RATE_SGI BIT(9) +#define MT_RXWI_RATE_STBC BIT(10) +#define MT_RXWI_RATE_LDPC_EXSYM BIT(11) +#define MT_RXWI_RATE_PHY GENMASK(15, 13) + +#define MT_RATE_INDEX_VHT_IDX GENMASK(3, 0) +#define MT_RATE_INDEX_VHT_NSS GENMASK(5, 4) + +#define MT_TX_PWR_ADJ GENMASK(3, 0) + +enum mt76x2_phy_bandwidth { + MT_PHY_BW_20, + MT_PHY_BW_40, + MT_PHY_BW_80, +}; + +#define MT_TXWI_FLAGS_FRAG BIT(0) +#define MT_TXWI_FLAGS_MMPS BIT(1) +#define MT_TXWI_FLAGS_CFACK BIT(2) +#define MT_TXWI_FLAGS_TS BIT(3) +#define MT_TXWI_FLAGS_AMPDU BIT(4) +#define MT_TXWI_FLAGS_MPDU_DENSITY GENMASK(7, 5) +#define MT_TXWI_FLAGS_TXOP GENMASK(9, 8) +#define MT_TXWI_FLAGS_NDPS BIT(10) +#define MT_TXWI_FLAGS_RTSBWSIG BIT(11) +#define MT_TXWI_FLAGS_NDP_BW GENMASK(13, 12) +#define MT_TXWI_FLAGS_SOUND BIT(14) +#define MT_TXWI_FLAGS_TX_RATE_LUT BIT(15) + +#define MT_TXWI_ACK_CTL_REQ BIT(0) +#define MT_TXWI_ACK_CTL_NSEQ BIT(1) +#define MT_TXWI_ACK_CTL_BA_WINDOW GENMASK(7, 2) + +#define MT_TXWI_PKTID_PROBE BIT(7) + +struct mt76x2_txwi { + __le16 flags; + __le16 rate; + u8 ack_ctl; + u8 wcid; + __le16 len_ctl; + __le32 iv; + __le32 eiv; + u8 aid; + u8 txstream; + u8 ctl2; + u8 pktid; +} __packed __aligned(4); + +static inline struct mt76x2_tx_info * +mt76x2_skb_tx_info(struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + return (void *) info->status.status_driver_data; +} + +int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard); +int mt76x2_mac_start(struct mt76x2_dev *dev); +void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force); +void mt76x2_mac_resume(struct mt76x2_dev *dev); +void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr); + +int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb, + void *rxi); +void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi, + struct sk_buff *skb, struct mt76_wcid *wcid, + struct ieee80211_sta *sta); +void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac); +int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx, + struct ieee80211_key_conf *key); +void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid, + const struct ieee80211_tx_rate *rate); +void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop); + +int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx, + struct ieee80211_key_conf *key); + +int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx, + struct sk_buff *skb); +void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val); + +void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq); +void mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev *dev); + +void mt76x2_mac_work(struct work_struct *work); + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c new file mode 100644 index 000000000000..bf26284b9989 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c @@ -0,0 +1,577 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "mt76x2.h" + +static int +mt76x2_start(struct ieee80211_hw *hw) +{ + struct mt76x2_dev *dev = hw->priv; + int ret; + + mutex_lock(&dev->mutex); + + ret = mt76x2_mac_start(dev); + if (ret) + goto out; + + ret = mt76x2_phy_start(dev); + if (ret) + goto out; + + ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work, + MT_CALIBRATE_INTERVAL); + + set_bit(MT76_STATE_RUNNING, &dev->mt76.state); + +out: + mutex_unlock(&dev->mutex); + return ret; +} + +static void +mt76x2_stop(struct ieee80211_hw *hw) +{ + struct mt76x2_dev *dev = hw->priv; + + mutex_lock(&dev->mutex); + clear_bit(MT76_STATE_RUNNING, &dev->mt76.state); + mt76x2_stop_hardware(dev); + mutex_unlock(&dev->mutex); +} + +static void +mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq) +{ + struct mt76_txq *mtxq; + + if (!txq) + return; + + mtxq = (struct mt76_txq *) txq->drv_priv; + if (txq->sta) { + struct mt76x2_sta *sta; + + sta = (struct mt76x2_sta *) txq->sta->drv_priv; + mtxq->wcid = &sta->wcid; + } else { + struct mt76x2_vif *mvif; + + mvif = (struct mt76x2_vif *) txq->vif->drv_priv; + mtxq->wcid = &mvif->group_wcid; + } + + mt76_txq_init(&dev->mt76, txq); +} + +static int +mt76x2_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct mt76x2_dev *dev = hw->priv; + struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv; + unsigned int idx = 0; + + if (vif->addr[0] & BIT(1)) + idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7); + + /* + * Client mode typically only has one configurable BSSID register, + * which is used for bssidx=0. This is linked to the MAC address. + * Since mac80211 allows changing interface types, and we cannot + * force the use of the primary MAC address for a station mode + * interface, we need some other way of configuring a per-interface + * remote BSSID. + * The hardware provides an AP-Client feature, where bssidx 0-7 are + * used for AP mode and bssidx 8-15 for client mode. + * We shift the station interface bss index by 8 to force the + * hardware to recognize the BSSID. + * The resulting bssidx mismatch for unicast frames is ignored by hw. + */ + if (vif->type == NL80211_IFTYPE_STATION) + idx += 8; + + mvif->idx = idx; + mvif->group_wcid.idx = 254 - idx; + mvif->group_wcid.hw_key_idx = -1; + mt76x2_txq_init(dev, vif->txq); + + return 0; +} + +static void +mt76x2_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct mt76x2_dev *dev = hw->priv; + + mt76_txq_remove(&dev->mt76, vif->txq); +} + +static int +mt76x2_set_channel(struct mt76x2_dev *dev, struct cfg80211_chan_def *chandef) +{ + int ret; + + mt76_set_channel(&dev->mt76); + + tasklet_disable(&dev->pre_tbtt_tasklet); + tasklet_disable(&dev->dfs_pd.dfs_tasklet); + cancel_delayed_work_sync(&dev->cal_work); + + mt76x2_mac_stop(dev, true); + ret = mt76x2_phy_set_channel(dev, chandef); + + /* channel cycle counters read-and-clear */ + mt76_rr(dev, MT_CH_IDLE); + mt76_rr(dev, MT_CH_BUSY); + + mt76x2_dfs_init_params(dev); + + mt76x2_mac_resume(dev); + tasklet_enable(&dev->dfs_pd.dfs_tasklet); + tasklet_enable(&dev->pre_tbtt_tasklet); + + return ret; +} + +static int +mt76x2_config(struct ieee80211_hw *hw, u32 changed) +{ + struct mt76x2_dev *dev = hw->priv; + int ret = 0; + + mutex_lock(&dev->mutex); + + if (changed & IEEE80211_CONF_CHANGE_MONITOR) { + if (!(hw->conf.flags & IEEE80211_CONF_MONITOR)) + dev->rxfilter |= MT_RX_FILTR_CFG_PROMISC; + else + dev->rxfilter &= ~MT_RX_FILTR_CFG_PROMISC; + + mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter); + } + + if (changed & IEEE80211_CONF_CHANGE_POWER) { + dev->txpower_conf = hw->conf.power_level * 2; + + /* convert to per-chain power for 2x2 devices */ + dev->txpower_conf -= 6; + + if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) { + mt76x2_phy_set_txpower(dev); + mt76x2_tx_set_txpwr_auto(dev, dev->txpower_conf); + } + } + + if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { + ieee80211_stop_queues(hw); + ret = mt76x2_set_channel(dev, &hw->conf.chandef); + ieee80211_wake_queues(hw); + } + + mutex_unlock(&dev->mutex); + + return ret; +} + +static void +mt76x2_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, + unsigned int *total_flags, u64 multicast) +{ + struct mt76x2_dev *dev = hw->priv; + u32 flags = 0; + +#define MT76_FILTER(_flag, _hw) do { \ + flags |= *total_flags & FIF_##_flag; \ + dev->rxfilter &= ~(_hw); \ + dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \ + } while (0) + + mutex_lock(&dev->mutex); + + dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS; + + MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR); + MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR); + MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK | + MT_RX_FILTR_CFG_CTS | + MT_RX_FILTR_CFG_CFEND | + MT_RX_FILTR_CFG_CFACK | + MT_RX_FILTR_CFG_BA | + MT_RX_FILTR_CFG_CTRL_RSV); + MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL); + + *total_flags = flags; + mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter); + + mutex_unlock(&dev->mutex); +} + +static void +mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, u32 changed) +{ + struct mt76x2_dev *dev = hw->priv; + struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv; + + mutex_lock(&dev->mutex); + + if (changed & BSS_CHANGED_BSSID) + mt76x2_mac_set_bssid(dev, mvif->idx, info->bssid); + + if (changed & BSS_CHANGED_BEACON_INT) + mt76_rmw_field(dev, MT_BEACON_TIME_CFG, + MT_BEACON_TIME_CFG_INTVAL, + info->beacon_int << 4); + + if (changed & BSS_CHANGED_BEACON_ENABLED) { + tasklet_disable(&dev->pre_tbtt_tasklet); + mt76x2_mac_set_beacon_enable(dev, mvif->idx, + info->enable_beacon); + tasklet_enable(&dev->pre_tbtt_tasklet); + } + + if (changed & BSS_CHANGED_ERP_SLOT) { + int slottime = info->use_short_slot ? 9 : 20; + + dev->slottime = slottime; + mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG, + MT_BKOFF_SLOT_CFG_SLOTTIME, slottime); + } + + mutex_unlock(&dev->mutex); +} + +static int +mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt76x2_dev *dev = hw->priv; + struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv; + struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv; + int ret = 0; + int idx = 0; + int i; + + mutex_lock(&dev->mutex); + + idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid)); + if (idx < 0) { + ret = -ENOSPC; + goto out; + } + + msta->vif = mvif; + msta->wcid.sta = 1; + msta->wcid.idx = idx; + msta->wcid.hw_key_idx = -1; + mt76x2_mac_wcid_setup(dev, idx, mvif->idx, sta->addr); + mt76x2_mac_wcid_set_drop(dev, idx, false); + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) + mt76x2_txq_init(dev, sta->txq[i]); + + rcu_assign_pointer(dev->wcid[idx], &msta->wcid); + +out: + mutex_unlock(&dev->mutex); + + return ret; +} + +static int +mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt76x2_dev *dev = hw->priv; + struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv; + int idx = msta->wcid.idx; + int i; + + mutex_lock(&dev->mutex); + rcu_assign_pointer(dev->wcid[idx], NULL); + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) + mt76_txq_remove(&dev->mt76, sta->txq[i]); + mt76x2_mac_wcid_set_drop(dev, idx, true); + mt76_wcid_free(dev->wcid_mask, idx); + mt76x2_mac_wcid_setup(dev, idx, 0, NULL); + mutex_unlock(&dev->mutex); + + return 0; +} + +static void +mt76x2_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + enum sta_notify_cmd cmd, struct ieee80211_sta *sta) +{ + struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv; + struct mt76x2_dev *dev = hw->priv; + int idx = msta->wcid.idx; + + switch (cmd) { + case STA_NOTIFY_SLEEP: + mt76x2_mac_wcid_set_drop(dev, idx, true); + mt76_stop_tx_queues(&dev->mt76, sta, true); + break; + case STA_NOTIFY_AWAKE: + mt76x2_mac_wcid_set_drop(dev, idx, false); + break; + } +} + +static int +mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct mt76x2_dev *dev = hw->priv; + struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv; + struct mt76x2_sta *msta; + struct mt76_wcid *wcid; + int idx = key->keyidx; + int ret; + + /* + * The hardware does not support per-STA RX GTK, fall back + * to software mode for these. + */ + if ((vif->type == NL80211_IFTYPE_ADHOC || + vif->type == NL80211_IFTYPE_MESH_POINT) && + (key->cipher == WLAN_CIPHER_SUITE_TKIP || + key->cipher == WLAN_CIPHER_SUITE_CCMP) && + !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + return -EOPNOTSUPP; + + msta = sta ? (struct mt76x2_sta *) sta->drv_priv : NULL; + wcid = msta ? &msta->wcid : &mvif->group_wcid; + + if (cmd == SET_KEY) { + key->hw_key_idx = wcid->idx; + wcid->hw_key_idx = idx; + if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) { + key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; + wcid->sw_iv = true; + } + } else { + if (idx == wcid->hw_key_idx) { + wcid->hw_key_idx = -1; + wcid->sw_iv = true; + } + + key = NULL; + } + mt76_wcid_key_setup(&dev->mt76, wcid, key); + + if (!msta) { + if (key || wcid->hw_key_idx == idx) { + ret = mt76x2_mac_wcid_set_key(dev, wcid->idx, key); + if (ret) + return ret; + } + + return mt76x2_mac_shared_key_setup(dev, mvif->idx, idx, key); + } + + return mt76x2_mac_wcid_set_key(dev, msta->wcid.idx, key); +} + +static int +mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, + const struct ieee80211_tx_queue_params *params) +{ + struct mt76x2_dev *dev = hw->priv; + u8 cw_min = 5, cw_max = 10, qid; + u32 val; + + qid = dev->mt76.q_tx[queue].hw_idx; + + if (params->cw_min) + cw_min = fls(params->cw_min); + if (params->cw_max) + cw_max = fls(params->cw_max); + + val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) | + FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) | + FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) | + FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max); + mt76_wr(dev, MT_EDCA_CFG_AC(qid), val); + + val = mt76_rr(dev, MT_WMM_TXOP(qid)); + val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid)); + val |= params->txop << MT_WMM_TXOP_SHIFT(qid); + mt76_wr(dev, MT_WMM_TXOP(qid), val); + + val = mt76_rr(dev, MT_WMM_AIFSN); + val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid)); + val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid); + mt76_wr(dev, MT_WMM_AIFSN, val); + + val = mt76_rr(dev, MT_WMM_CWMIN); + val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid)); + val |= cw_min << MT_WMM_CWMIN_SHIFT(qid); + mt76_wr(dev, MT_WMM_CWMIN, val); + + val = mt76_rr(dev, MT_WMM_CWMAX); + val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid)); + val |= cw_max << MT_WMM_CWMAX_SHIFT(qid); + mt76_wr(dev, MT_WMM_CWMAX, val); + + return 0; +} + +static void +mt76x2_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + const u8 *mac) +{ + struct mt76x2_dev *dev = hw->priv; + + tasklet_disable(&dev->pre_tbtt_tasklet); + set_bit(MT76_SCANNING, &dev->mt76.state); +} + +static void +mt76x2_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct mt76x2_dev *dev = hw->priv; + + clear_bit(MT76_SCANNING, &dev->mt76.state); + tasklet_enable(&dev->pre_tbtt_tasklet); + mt76_txq_schedule_all(&dev->mt76); +} + +static void +mt76x2_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop) +{ +} + +static int +mt76x2_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm) +{ + struct mt76x2_dev *dev = hw->priv; + + *dbm = dev->txpower_cur / 2; + + /* convert from per-chain power to combined output on 2x2 devices */ + *dbm += 3; + + return 0; +} + +static int +mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params) +{ + enum ieee80211_ampdu_mlme_action action = params->action; + struct ieee80211_sta *sta = params->sta; + struct mt76x2_dev *dev = hw->priv; + struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv; + struct ieee80211_txq *txq = sta->txq[params->tid]; + u16 tid = params->tid; + u16 *ssn = ¶ms->ssn; + struct mt76_txq *mtxq; + + if (!txq) + return -EINVAL; + + mtxq = (struct mt76_txq *)txq->drv_priv; + + switch (action) { + case IEEE80211_AMPDU_RX_START: + mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn, params->buf_size); + mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid)); + break; + case IEEE80211_AMPDU_RX_STOP: + mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid); + mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, + BIT(16 + tid)); + break; + case IEEE80211_AMPDU_TX_OPERATIONAL: + mtxq->aggr = true; + mtxq->send_bar = false; + ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn); + break; + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: + mtxq->aggr = false; + ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn); + break; + case IEEE80211_AMPDU_TX_START: + mtxq->agg_ssn = *ssn << 4; + ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + case IEEE80211_AMPDU_TX_STOP_CONT: + mtxq->aggr = false; + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + } + + return 0; +} + +static void +mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt76x2_dev *dev = hw->priv; + struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv; + struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates); + struct ieee80211_tx_rate rate = {}; + + if (!rates) + return; + + rate.idx = rates->rate[0].idx; + rate.flags = rates->rate[0].flags; + mt76x2_mac_wcid_set_rate(dev, &msta->wcid, &rate); + msta->wcid.max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, &rate); +} + +static void mt76x2_set_coverage_class(struct ieee80211_hw *hw, + s16 coverage_class) +{ + struct mt76x2_dev *dev = hw->priv; + + mutex_lock(&dev->mutex); + dev->coverage_class = coverage_class; + mt76x2_set_tx_ackto(dev); + mutex_unlock(&dev->mutex); +} + +const struct ieee80211_ops mt76x2_ops = { + .tx = mt76x2_tx, + .start = mt76x2_start, + .stop = mt76x2_stop, + .add_interface = mt76x2_add_interface, + .remove_interface = mt76x2_remove_interface, + .config = mt76x2_config, + .configure_filter = mt76x2_configure_filter, + .bss_info_changed = mt76x2_bss_info_changed, + .sta_add = mt76x2_sta_add, + .sta_remove = mt76x2_sta_remove, + .sta_notify = mt76x2_sta_notify, + .set_key = mt76x2_set_key, + .conf_tx = mt76x2_conf_tx, + .sw_scan_start = mt76x2_sw_scan, + .sw_scan_complete = mt76x2_sw_scan_complete, + .flush = mt76x2_flush, + .ampdu_action = mt76x2_ampdu_action, + .get_txpower = mt76x2_get_txpower, + .wake_tx_queue = mt76_wake_tx_queue, + .sta_rate_tbl_update = mt76x2_sta_rate_tbl_update, + .release_buffered_frames = mt76_release_buffered_frames, + .set_coverage_class = mt76x2_set_coverage_class, + .get_survey = mt76_get_survey, +}; + diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c new file mode 100644 index 000000000000..15820b11f9db --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c @@ -0,0 +1,453 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/firmware.h> +#include <linux/delay.h> + +#include "mt76x2.h" +#include "mt76x2_mcu.h" +#include "mt76x2_dma.h" +#include "mt76x2_eeprom.h" + +struct mt76x2_fw_header { + __le32 ilm_len; + __le32 dlm_len; + __le16 build_ver; + __le16 fw_ver; + u8 pad[4]; + char build_time[16]; +}; + +struct mt76x2_patch_header { + char build_time[16]; + char platform[4]; + char hw_version[4]; + char patch_version[4]; + u8 pad[2]; +}; + +static struct sk_buff *mt76x2_mcu_msg_alloc(const void *data, int len) +{ + struct sk_buff *skb; + + skb = alloc_skb(len, GFP_KERNEL); + if (!skb) + return NULL; + memcpy(skb_put(skb, len), data, len); + + return skb; +} + +static struct sk_buff * +mt76x2_mcu_get_response(struct mt76x2_dev *dev, unsigned long expires) +{ + unsigned long timeout; + + if (!time_is_after_jiffies(expires)) + return NULL; + + timeout = expires - jiffies; + wait_event_timeout(dev->mcu.wait, !skb_queue_empty(&dev->mcu.res_q), + timeout); + return skb_dequeue(&dev->mcu.res_q); +} + +static int +mt76x2_mcu_msg_send(struct mt76x2_dev *dev, struct sk_buff *skb, + enum mcu_cmd cmd) +{ + unsigned long expires = jiffies + HZ; + int ret; + u8 seq; + + if (!skb) + return -EINVAL; + + mutex_lock(&dev->mcu.mutex); + + seq = ++dev->mcu.msg_seq & 0xf; + if (!seq) + seq = ++dev->mcu.msg_seq & 0xf; + + ret = mt76x2_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq); + if (ret) + goto out; + + while (1) { + u32 *rxfce; + bool check_seq = false; + + skb = mt76x2_mcu_get_response(dev, expires); + if (!skb) { + dev_err(dev->mt76.dev, + "MCU message %d (seq %d) timed out\n", cmd, + seq); + ret = -ETIMEDOUT; + break; + } + + rxfce = (u32 *) skb->cb; + + if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce)) + check_seq = true; + + dev_kfree_skb(skb); + if (check_seq) + break; + } + +out: + mutex_unlock(&dev->mcu.mutex); + + return ret; +} + +static int +mt76pci_load_rom_patch(struct mt76x2_dev *dev) +{ + const struct firmware *fw = NULL; + struct mt76x2_patch_header *hdr; + bool rom_protect = !is_mt7612(dev); + int len, ret = 0; + __le32 *cur; + u32 patch_mask, patch_reg; + + if (rom_protect && !mt76_poll(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) { + dev_err(dev->mt76.dev, + "Could not get hardware semaphore for ROM PATCH\n"); + return -ETIMEDOUT; + } + + if (mt76xx_rev(dev) >= MT76XX_REV_E3) { + patch_mask = BIT(0); + patch_reg = MT_MCU_CLOCK_CTL; + } else { + patch_mask = BIT(1); + patch_reg = MT_MCU_COM_REG0; + } + + if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) { + dev_info(dev->mt76.dev, "ROM patch already applied\n"); + goto out; + } + + ret = request_firmware(&fw, MT7662_ROM_PATCH, dev->mt76.dev); + if (ret) + goto out; + + if (!fw || !fw->data || fw->size <= sizeof(*hdr)) { + ret = -EIO; + dev_err(dev->mt76.dev, "Failed to load firmware\n"); + goto out; + } + + hdr = (struct mt76x2_patch_header *) fw->data; + dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time); + + mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ROM_PATCH_OFFSET); + + cur = (__le32 *) (fw->data + sizeof(*hdr)); + len = fw->size - sizeof(*hdr); + mt76_wr_copy(dev, MT_MCU_ROM_PATCH_ADDR, cur, len); + + mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0); + + /* Trigger ROM */ + mt76_wr(dev, MT_MCU_INT_LEVEL, 4); + + if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 2000)) { + dev_err(dev->mt76.dev, "Failed to load ROM patch\n"); + ret = -ETIMEDOUT; + } + +out: + /* release semaphore */ + if (rom_protect) + mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1); + release_firmware(fw); + return ret; +} + +static int +mt76pci_load_firmware(struct mt76x2_dev *dev) +{ + const struct firmware *fw; + const struct mt76x2_fw_header *hdr; + int i, len, ret; + __le32 *cur; + u32 offset, val; + + ret = request_firmware(&fw, MT7662_FIRMWARE, dev->mt76.dev); + if (ret) + return ret; + + if (!fw || !fw->data || fw->size < sizeof(*hdr)) + goto error; + + hdr = (const struct mt76x2_fw_header *) fw->data; + + len = sizeof(*hdr); + len += le32_to_cpu(hdr->ilm_len); + len += le32_to_cpu(hdr->dlm_len); + + if (fw->size != len) + goto error; + + val = le16_to_cpu(hdr->fw_ver); + dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n", + (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf); + + val = le16_to_cpu(hdr->build_ver); + dev_info(dev->mt76.dev, "Build: %x\n", val); + dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time); + + cur = (__le32 *) (fw->data + sizeof(*hdr)); + len = le32_to_cpu(hdr->ilm_len); + + mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ILM_OFFSET); + mt76_wr_copy(dev, MT_MCU_ILM_ADDR, cur, len); + + cur += len / sizeof(*cur); + len = le32_to_cpu(hdr->dlm_len); + + if (mt76xx_rev(dev) >= MT76XX_REV_E3) + offset = MT_MCU_DLM_ADDR_E3; + else + offset = MT_MCU_DLM_ADDR; + + mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_DLM_OFFSET); + mt76_wr_copy(dev, offset, cur, len); + + mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0); + + val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2); + if (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, val) == 1) + mt76_set(dev, MT_MCU_COM_REG0, BIT(30)); + + /* trigger firmware */ + mt76_wr(dev, MT_MCU_INT_LEVEL, 2); + for (i = 200; i > 0; i--) { + val = mt76_rr(dev, MT_MCU_COM_REG0); + + if (val & 1) + break; + + msleep(10); + } + + if (!i) { + dev_err(dev->mt76.dev, "Firmware failed to start\n"); + release_firmware(fw); + return -ETIMEDOUT; + } + + dev_info(dev->mt76.dev, "Firmware running!\n"); + + release_firmware(fw); + + return ret; + +error: + dev_err(dev->mt76.dev, "Invalid firmware\n"); + release_firmware(fw); + return -ENOENT; +} + +static int +mt76x2_mcu_function_select(struct mt76x2_dev *dev, enum mcu_function func, + u32 val) +{ + struct sk_buff *skb; + struct { + __le32 id; + __le32 value; + } __packed __aligned(4) msg = { + .id = cpu_to_le32(func), + .value = cpu_to_le32(val), + }; + + skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg)); + return mt76x2_mcu_msg_send(dev, skb, CMD_FUN_SET_OP); +} + +int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level, + u8 channel) +{ + struct sk_buff *skb; + struct { + u8 cr_mode; + u8 temp; + u8 ch; + u8 _pad0; + + __le32 cfg; + } __packed __aligned(4) msg = { + .cr_mode = type, + .temp = temp_level, + .ch = channel, + }; + u32 val; + + val = BIT(31); + val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff; + val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00; + msg.cfg = cpu_to_le32(val); + + /* first set the channel without the extension channel info */ + skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg)); + return mt76x2_mcu_msg_send(dev, skb, CMD_LOAD_CR); +} + +int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw, + u8 bw_index, bool scan) +{ + struct sk_buff *skb; + struct { + u8 idx; + u8 scan; + u8 bw; + u8 _pad0; + + __le16 chainmask; + u8 ext_chan; + u8 _pad1; + + } __packed __aligned(4) msg = { + .idx = channel, + .scan = scan, + .bw = bw, + .chainmask = cpu_to_le16(dev->chainmask), + }; + + /* first set the channel without the extension channel info */ + skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg)); + mt76x2_mcu_msg_send(dev, skb, CMD_SWITCH_CHANNEL_OP); + + usleep_range(5000, 10000); + + msg.ext_chan = 0xe0 + bw_index; + skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg)); + return mt76x2_mcu_msg_send(dev, skb, CMD_SWITCH_CHANNEL_OP); +} + +int mt76x2_mcu_set_radio_state(struct mt76x2_dev *dev, bool on) +{ + struct sk_buff *skb; + struct { + __le32 mode; + __le32 level; + } __packed __aligned(4) msg = { + .mode = cpu_to_le32(on ? RADIO_ON : RADIO_OFF), + .level = cpu_to_le32(0), + }; + + skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg)); + return mt76x2_mcu_msg_send(dev, skb, CMD_POWER_SAVING_OP); +} + +int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type, + u32 param) +{ + struct sk_buff *skb; + struct { + __le32 id; + __le32 value; + } __packed __aligned(4) msg = { + .id = cpu_to_le32(type), + .value = cpu_to_le32(param), + }; + int ret; + + mt76_clear(dev, MT_MCU_COM_REG0, BIT(31)); + + skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg)); + ret = mt76x2_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP); + if (ret) + return ret; + + if (WARN_ON(!mt76_poll_msec(dev, MT_MCU_COM_REG0, + BIT(31), BIT(31), 100))) + return -ETIMEDOUT; + + return 0; +} + +int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev, + struct mt76x2_tssi_comp *tssi_data) +{ + struct sk_buff *skb; + struct { + __le32 id; + struct mt76x2_tssi_comp data; + } __packed __aligned(4) msg = { + .id = cpu_to_le32(MCU_CAL_TSSI_COMP), + .data = *tssi_data, + }; + + skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg)); + return mt76x2_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP); +} + +int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain, + bool force) +{ + struct sk_buff *skb; + struct { + __le32 channel; + __le32 gain_val; + } __packed __aligned(4) msg = { + .channel = cpu_to_le32(channel), + .gain_val = cpu_to_le32(gain), + }; + + if (force) + msg.channel |= cpu_to_le32(BIT(31)); + + skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg)); + return mt76x2_mcu_msg_send(dev, skb, CMD_INIT_GAIN_OP); +} + +int mt76x2_mcu_init(struct mt76x2_dev *dev) +{ + int ret; + + mutex_init(&dev->mcu.mutex); + + ret = mt76pci_load_rom_patch(dev); + if (ret) + return ret; + + ret = mt76pci_load_firmware(dev); + if (ret) + return ret; + + mt76x2_mcu_function_select(dev, Q_SELECT, 1); + return 0; +} + +int mt76x2_mcu_cleanup(struct mt76x2_dev *dev) +{ + struct sk_buff *skb; + + mt76_wr(dev, MT_MCU_INT_LEVEL, 1); + usleep_range(20000, 30000); + + while ((skb = skb_dequeue(&dev->mcu.res_q)) != NULL) + dev_kfree_skb(skb); + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h new file mode 100644 index 000000000000..d7a7e83262ce --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h @@ -0,0 +1,155 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __MT76x2_MCU_H +#define __MT76x2_MCU_H + +/* Register definitions */ +#define MT_MCU_CPU_CTL 0x0704 +#define MT_MCU_CLOCK_CTL 0x0708 +#define MT_MCU_RESET_CTL 0x070C +#define MT_MCU_INT_LEVEL 0x0718 +#define MT_MCU_COM_REG0 0x0730 +#define MT_MCU_COM_REG1 0x0734 +#define MT_MCU_COM_REG2 0x0738 +#define MT_MCU_COM_REG3 0x073C +#define MT_MCU_PCIE_REMAP_BASE1 0x0740 +#define MT_MCU_PCIE_REMAP_BASE2 0x0744 +#define MT_MCU_PCIE_REMAP_BASE3 0x0748 +#define MT_MCU_PCIE_REMAP_BASE4 0x074C + +#define MT_LED_CTRL 0x0770 +#define MT_LED_CTRL_REPLAY(_n) BIT(0 + (8 * (_n))) +#define MT_LED_CTRL_POLARITY(_n) BIT(1 + (8 * (_n))) +#define MT_LED_CTRL_TX_BLINK_MODE(_n) BIT(2 + (8 * (_n))) +#define MT_LED_CTRL_KICK(_n) BIT(7 + (8 * (_n))) + +#define MT_LED_TX_BLINK_0 0x0774 +#define MT_LED_TX_BLINK_1 0x0778 + +#define MT_LED_S0_BASE 0x077C +#define MT_LED_S0(_n) (MT_LED_S0_BASE + 8 * (_n)) +#define MT_LED_S1_BASE 0x0780 +#define MT_LED_S1(_n) (MT_LED_S1_BASE + 8 * (_n)) +#define MT_LED_STATUS_OFF_MASK GENMASK(31, 24) +#define MT_LED_STATUS_OFF(_v) (((_v) << __ffs(MT_LED_STATUS_OFF_MASK)) & \ + MT_LED_STATUS_OFF_MASK) +#define MT_LED_STATUS_ON_MASK GENMASK(23, 16) +#define MT_LED_STATUS_ON(_v) (((_v) << __ffs(MT_LED_STATUS_ON_MASK)) & \ + MT_LED_STATUS_ON_MASK) +#define MT_LED_STATUS_DURATION_MASK GENMASK(15, 8) +#define MT_LED_STATUS_DURATION(_v) (((_v) << __ffs(MT_LED_STATUS_DURATION_MASK)) & \ + MT_LED_STATUS_DURATION_MASK) + +#define MT_MCU_SEMAPHORE_00 0x07B0 +#define MT_MCU_SEMAPHORE_01 0x07B4 +#define MT_MCU_SEMAPHORE_02 0x07B8 +#define MT_MCU_SEMAPHORE_03 0x07BC + +#define MT_MCU_ROM_PATCH_OFFSET 0x80000 +#define MT_MCU_ROM_PATCH_ADDR 0x90000 + +#define MT_MCU_ILM_OFFSET 0x80000 +#define MT_MCU_ILM_ADDR 0x80000 + +#define MT_MCU_DLM_OFFSET 0x100000 +#define MT_MCU_DLM_ADDR 0x90000 +#define MT_MCU_DLM_ADDR_E3 0x90800 + +enum mcu_cmd { + CMD_FUN_SET_OP = 1, + CMD_LOAD_CR = 2, + CMD_INIT_GAIN_OP = 3, + CMD_DYNC_VGA_OP = 6, + CMD_TDLS_CH_SW = 7, + CMD_BURST_WRITE = 8, + CMD_READ_MODIFY_WRITE = 9, + CMD_RANDOM_READ = 10, + CMD_BURST_READ = 11, + CMD_RANDOM_WRITE = 12, + CMD_LED_MODE_OP = 16, + CMD_POWER_SAVING_OP = 20, + CMD_WOW_CONFIG = 21, + CMD_WOW_QUERY = 22, + CMD_WOW_FEATURE = 24, + CMD_CARRIER_DETECT_OP = 28, + CMD_RADOR_DETECT_OP = 29, + CMD_SWITCH_CHANNEL_OP = 30, + CMD_CALIBRATION_OP = 31, + CMD_BEACON_OP = 32, + CMD_ANTENNA_OP = 33, +}; + +enum mcu_function { + Q_SELECT = 1, + BW_SETTING = 2, + USB2_SW_DISCONNECT = 2, + USB3_SW_DISCONNECT = 3, + LOG_FW_DEBUG_MSG = 4, + GET_FW_VERSION = 5, +}; + +enum mcu_power_mode { + RADIO_OFF = 0x30, + RADIO_ON = 0x31, + RADIO_OFF_AUTO_WAKEUP = 0x32, + RADIO_OFF_ADVANCE = 0x33, + RADIO_ON_ADVANCE = 0x34, +}; + +enum mcu_calibration { + MCU_CAL_R = 1, + MCU_CAL_TEMP_SENSOR, + MCU_CAL_RXDCOC, + MCU_CAL_RC, + MCU_CAL_SX_LOGEN, + MCU_CAL_LC, + MCU_CAL_TX_LOFT, + MCU_CAL_TXIQ, + MCU_CAL_TSSI, + MCU_CAL_TSSI_COMP, + MCU_CAL_DPD, + MCU_CAL_RXIQC_FI, + MCU_CAL_RXIQC_FD, + MCU_CAL_PWRON, + MCU_CAL_TX_SHAPING, +}; + +enum mt76x2_mcu_cr_mode { + MT_RF_CR, + MT_BBP_CR, + MT_RF_BBP_CR, + MT_HL_TEMP_CR_UPDATE, +}; + +struct mt76x2_tssi_comp { + u8 pa_mode; + u8 cal_mode; + u16 pad; + + u8 slope0; + u8 slope1; + u8 offset0; + u8 offset1; +} __packed __aligned(4); + +int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type, + u32 param); +int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev, struct mt76x2_tssi_comp *tssi_data); +int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain, + bool force); + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c new file mode 100644 index 000000000000..e66f047ea448 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c @@ -0,0 +1,110 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "mt76x2.h" +#include "mt76x2_trace.h" + +static const struct pci_device_id mt76pci_device_table[] = { + { PCI_DEVICE(0x14c3, 0x7662) }, + { PCI_DEVICE(0x14c3, 0x7612) }, + { PCI_DEVICE(0x14c3, 0x7602) }, + { }, +}; + +static int +mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct mt76x2_dev *dev; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); + if (ret) + return ret; + + pci_set_master(pdev); + + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret) + return ret; + + dev = mt76x2_alloc_device(&pdev->dev); + if (!dev) + return -ENOMEM; + + mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]); + + dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION); + dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev); + + ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x2_irq_handler, + IRQF_SHARED, KBUILD_MODNAME, dev); + if (ret) + goto error; + + ret = mt76x2_register_device(dev); + if (ret) + goto error; + + /* Fix up ASPM configuration */ + + /* RG_SSUSB_G1_CDR_BIR_LTR = 0x9 */ + mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9); + + /* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */ + mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf); + + /* RG_SSUSB_CDR_BR_PE1D = 0x3 */ + mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3); + + return 0; + +error: + ieee80211_free_hw(mt76_hw(dev)); + return ret; +} + +static void +mt76pci_remove(struct pci_dev *pdev) +{ + struct mt76_dev *mdev = pci_get_drvdata(pdev); + struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); + + mt76_unregister_device(mdev); + mt76x2_cleanup(dev); + ieee80211_free_hw(mdev->hw); +} + +MODULE_DEVICE_TABLE(pci, mt76pci_device_table); +MODULE_FIRMWARE(MT7662_FIRMWARE); +MODULE_FIRMWARE(MT7662_ROM_PATCH); +MODULE_LICENSE("Dual BSD/GPL"); + +static struct pci_driver mt76pci_driver = { + .name = KBUILD_MODNAME, + .id_table = mt76pci_device_table, + .probe = mt76pci_probe, + .remove = mt76pci_remove, +}; + +module_pci_driver(mt76pci_driver); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c new file mode 100644 index 000000000000..5b742749d5de --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c @@ -0,0 +1,740 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/delay.h> +#include "mt76x2.h" +#include "mt76x2_mcu.h" +#include "mt76x2_eeprom.h" + +static void +mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset) +{ + s8 gain; + + gain = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, mt76_rr(dev, MT_BBP(AGC, reg))); + gain -= offset / 2; + mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_LNA_HIGH_GAIN, gain); +} + +static void +mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset) +{ + s8 gain; + + gain = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, reg))); + gain += offset; + mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain); +} + +static void +mt76x2_apply_gain_adj(struct mt76x2_dev *dev) +{ + s8 *gain_adj = dev->cal.rx.high_gain; + + mt76x2_adjust_high_lna_gain(dev, 4, gain_adj[0]); + mt76x2_adjust_high_lna_gain(dev, 5, gain_adj[1]); + + mt76x2_adjust_agc_gain(dev, 8, gain_adj[0]); + mt76x2_adjust_agc_gain(dev, 9, gain_adj[1]); +} + +static u32 +mt76x2_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4) +{ + u32 val = 0; + + val |= (v1 & (BIT(6) - 1)) << 0; + val |= (v2 & (BIT(6) - 1)) << 8; + val |= (v3 & (BIT(6) - 1)) << 16; + val |= (v4 & (BIT(6) - 1)) << 24; + return val; +} + +int mt76x2_phy_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain) +{ + struct mt76x2_rx_freq_cal *cal = &dev->cal.rx; + + rssi += cal->rssi_offset[chain]; + rssi -= cal->lna_gain; + + return rssi; +} + +static u8 +mt76x2_txpower_check(int value) +{ + if (value < 0) + return 0; + if (value > 0x2f) + return 0x2f; + return value; +} + +static void +mt76x2_add_rate_power_offset(struct mt76_rate_power *r, int offset) +{ + int i; + + for (i = 0; i < sizeof(r->all); i++) + r->all[i] += offset; +} + +static void +mt76x2_limit_rate_power(struct mt76_rate_power *r, int limit) +{ + int i; + + for (i = 0; i < sizeof(r->all); i++) + if (r->all[i] > limit) + r->all[i] = limit; +} + +void mt76x2_phy_set_txpower(struct mt76x2_dev *dev) +{ + enum nl80211_chan_width width = dev->mt76.chandef.width; + struct ieee80211_channel *chan = dev->mt76.chandef.chan; + struct mt76x2_tx_power_info txp; + int txp_0, txp_1, delta = 0; + struct mt76_rate_power t = {}; + + mt76x2_get_power_info(dev, &txp, chan); + + if (width == NL80211_CHAN_WIDTH_40) + delta = txp.delta_bw40; + else if (width == NL80211_CHAN_WIDTH_80) + delta = txp.delta_bw80; + + if (txp.target_power > dev->txpower_conf) + delta -= txp.target_power - dev->txpower_conf; + + mt76x2_get_rate_power(dev, &t, chan); + mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power + + txp.chain[0].delta); + mt76x2_limit_rate_power(&t, dev->txpower_conf); + dev->txpower_cur = mt76x2_get_max_rate_power(&t); + mt76x2_add_rate_power_offset(&t, -(txp.chain[0].target_power + + txp.chain[0].delta + delta)); + dev->target_power = txp.chain[0].target_power; + dev->target_power_delta[0] = txp.chain[0].delta + delta; + dev->target_power_delta[1] = txp.chain[1].delta + delta; + dev->rate_power = t; + + txp_0 = mt76x2_txpower_check(txp.chain[0].target_power + + txp.chain[0].delta + delta); + + txp_1 = mt76x2_txpower_check(txp.chain[1].target_power + + txp.chain[1].delta + delta); + + mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0); + mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1); + + mt76_wr(dev, MT_TX_PWR_CFG_0, + mt76x2_tx_power_mask(t.cck[0], t.cck[2], t.ofdm[0], t.ofdm[2])); + mt76_wr(dev, MT_TX_PWR_CFG_1, + mt76x2_tx_power_mask(t.ofdm[4], t.ofdm[6], t.ht[0], t.ht[2])); + mt76_wr(dev, MT_TX_PWR_CFG_2, + mt76x2_tx_power_mask(t.ht[4], t.ht[6], t.ht[8], t.ht[10])); + mt76_wr(dev, MT_TX_PWR_CFG_3, + mt76x2_tx_power_mask(t.ht[12], t.ht[14], t.ht[0], t.ht[2])); + mt76_wr(dev, MT_TX_PWR_CFG_4, + mt76x2_tx_power_mask(t.ht[4], t.ht[6], 0, 0)); + mt76_wr(dev, MT_TX_PWR_CFG_7, + mt76x2_tx_power_mask(t.ofdm[6], t.vht[8], t.ht[6], t.vht[8])); + mt76_wr(dev, MT_TX_PWR_CFG_8, + mt76x2_tx_power_mask(t.ht[14], t.vht[8], t.vht[8], 0)); + mt76_wr(dev, MT_TX_PWR_CFG_9, + mt76x2_tx_power_mask(t.ht[6], t.vht[8], t.vht[8], 0)); +} + +static bool +mt76x2_channel_silent(struct mt76x2_dev *dev) +{ + struct ieee80211_channel *chan = dev->mt76.chandef.chan; + + return ((chan->flags & IEEE80211_CHAN_RADAR) && + chan->dfs_state != NL80211_DFS_AVAILABLE); +} + +static bool +mt76x2_phy_tssi_init_cal(struct mt76x2_dev *dev) +{ + struct ieee80211_channel *chan = dev->mt76.chandef.chan; + u32 flag = 0; + + if (!mt76x2_tssi_enabled(dev)) + return false; + + if (mt76x2_channel_silent(dev)) + return false; + + if (chan->band == NL80211_BAND_2GHZ) + flag |= BIT(0); + + if (mt76x2_ext_pa_enabled(dev, chan->band)) + flag |= BIT(8); + + mt76x2_mcu_calibrate(dev, MCU_CAL_TSSI, flag); + dev->cal.tssi_cal_done = true; + return true; +} + +static void +mt76x2_phy_channel_calibrate(struct mt76x2_dev *dev, bool mac_stopped) +{ + struct ieee80211_channel *chan = dev->mt76.chandef.chan; + bool is_5ghz = chan->band == NL80211_BAND_5GHZ; + + if (dev->cal.channel_cal_done) + return; + + if (mt76x2_channel_silent(dev)) + return; + + if (!dev->cal.tssi_cal_done) + mt76x2_phy_tssi_init_cal(dev); + + if (!mac_stopped) + mt76x2_mac_stop(dev, false); + + if (is_5ghz) + mt76x2_mcu_calibrate(dev, MCU_CAL_LC, 0); + + mt76x2_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz); + mt76x2_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz); + mt76x2_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz); + mt76x2_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0); + mt76x2_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0); + + if (!mac_stopped) + mt76x2_mac_resume(dev); + + mt76x2_apply_gain_adj(dev); + + dev->cal.channel_cal_done = true; +} + +static void +mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, enum nl80211_band band) +{ + u32 pa_mode[2]; + u32 pa_mode_adj; + + if (band == NL80211_BAND_2GHZ) { + pa_mode[0] = 0x010055ff; + pa_mode[1] = 0x00550055; + + mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00); + mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06); + + if (mt76x2_ext_pa_enabled(dev, band)) { + mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00); + mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00); + } else { + mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0xf4000200); + mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0xfa000200); + } + } else { + pa_mode[0] = 0x0000ffff; + pa_mode[1] = 0x00ff00ff; + + if (mt76x2_ext_pa_enabled(dev, band)) { + mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400); + mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476); + } else { + mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400); + mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476); + } + mt76_wr(dev, MT_TX_ALC_CFG_4, 0); + + if (mt76x2_ext_pa_enabled(dev, band)) + pa_mode_adj = 0x04000000; + else + pa_mode_adj = 0; + + mt76_wr(dev, MT_RF_PA_MODE_ADJ0, pa_mode_adj); + mt76_wr(dev, MT_RF_PA_MODE_ADJ1, pa_mode_adj); + } + + mt76_wr(dev, MT_BB_PA_MODE_CFG0, pa_mode[0]); + mt76_wr(dev, MT_BB_PA_MODE_CFG1, pa_mode[1]); + mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]); + mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]); + + if (mt76x2_ext_pa_enabled(dev, band)) { + u32 val; + + if (band == NL80211_BAND_2GHZ) + val = 0x3c3c023c; + else + val = 0x363c023c; + + mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val); + mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val); + mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00001818); + } else { + if (band == NL80211_BAND_2GHZ) { + u32 val = 0x0f3c3c3c; + + mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val); + mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val); + mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00000606); + } else { + mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x383c023c); + mt76_wr(dev, MT_TX1_RF_GAIN_CORR, 0x24282e28); + mt76_wr(dev, MT_TX_ALC_CFG_4, 0); + } + } +} + +static void +mt76x2_configure_tx_delay(struct mt76x2_dev *dev, enum nl80211_band band, u8 bw) +{ + u32 cfg0, cfg1; + + if (mt76x2_ext_pa_enabled(dev, band)) { + cfg0 = bw ? 0x000b0c01 : 0x00101101; + cfg1 = 0x00011414; + } else { + cfg0 = bw ? 0x000b0b01 : 0x00101001; + cfg1 = 0x00021414; + } + mt76_wr(dev, MT_TX_SW_CFG0, cfg0); + mt76_wr(dev, MT_TX_SW_CFG1, cfg1); + + mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS, 15); +} + +static void +mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl) +{ + int core_val, agc_val; + + switch (width) { + case NL80211_CHAN_WIDTH_80: + core_val = 3; + agc_val = 7; + break; + case NL80211_CHAN_WIDTH_40: + core_val = 2; + agc_val = 3; + break; + default: + core_val = 0; + agc_val = 1; + break; + } + + mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val); + mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val); + mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl); + mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl); +} + +static void +mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper) +{ + switch (band) { + case NL80211_BAND_2GHZ: + mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G); + mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G); + break; + case NL80211_BAND_5GHZ: + mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G); + mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G); + break; + } + + mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M, + primary_upper); +} + +static void +mt76x2_set_rx_chains(struct mt76x2_dev *dev) +{ + u32 val; + + val = mt76_rr(dev, MT_BBP(AGC, 0)); + val &= ~(BIT(3) | BIT(4)); + + if (dev->chainmask & BIT(1)) + val |= BIT(3); + + mt76_wr(dev, MT_BBP(AGC, 0), val); +} + +static void +mt76x2_set_tx_dac(struct mt76x2_dev *dev) +{ + if (dev->chainmask & BIT(1)) + mt76_set(dev, MT_BBP(TXBE, 5), 3); + else + mt76_clear(dev, MT_BBP(TXBE, 5), 3); +} + +static void +mt76x2_get_agc_gain(struct mt76x2_dev *dev, u8 *dest) +{ + dest[0] = mt76_get_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN); + dest[1] = mt76_get_field(dev, MT_BBP(AGC, 9), MT_BBP_AGC_GAIN); +} + +static int +mt76x2_get_rssi_gain_thresh(struct mt76x2_dev *dev) +{ + switch (dev->mt76.chandef.width) { + case NL80211_CHAN_WIDTH_80: + return -62; + case NL80211_CHAN_WIDTH_40: + return -65; + default: + return -68; + } +} + +static int +mt76x2_get_low_rssi_gain_thresh(struct mt76x2_dev *dev) +{ + switch (dev->mt76.chandef.width) { + case NL80211_CHAN_WIDTH_80: + return -76; + case NL80211_CHAN_WIDTH_40: + return -79; + default: + return -82; + } +} + +static void +mt76x2_phy_set_gain_val(struct mt76x2_dev *dev) +{ + u32 val; + u8 gain_val[2]; + + gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust; + gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust; + + if (dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40) + val = 0x1e42 << 16; + else + val = 0x1836 << 16; + + val |= 0xf8; + + mt76_wr(dev, MT_BBP(AGC, 8), + val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[0])); + mt76_wr(dev, MT_BBP(AGC, 9), + val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[1])); + + if (dev->mt76.chandef.chan->flags & IEEE80211_CHAN_RADAR) + mt76x2_dfs_adjust_agc(dev); +} + +static void +mt76x2_phy_adjust_vga_gain(struct mt76x2_dev *dev) +{ + u32 false_cca; + u8 limit = dev->cal.low_gain > 1 ? 4 : 16; + + false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1)); + if (false_cca > 800 && dev->cal.agc_gain_adjust < limit) + dev->cal.agc_gain_adjust += 2; + else if (false_cca < 10 && dev->cal.agc_gain_adjust > 0) + dev->cal.agc_gain_adjust -= 2; + else + return; + + mt76x2_phy_set_gain_val(dev); +} + +static void +mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev) +{ + u32 val = mt76_rr(dev, MT_BBP(AGC, 20)); + int rssi0 = (s8) FIELD_GET(MT_BBP_AGC20_RSSI0, val); + int rssi1 = (s8) FIELD_GET(MT_BBP_AGC20_RSSI1, val); + u8 *gain = dev->cal.agc_gain_init; + u8 gain_delta; + int low_gain; + + dev->cal.avg_rssi[0] = (dev->cal.avg_rssi[0] * 15) / 16 + (rssi0 << 8); + dev->cal.avg_rssi[1] = (dev->cal.avg_rssi[1] * 15) / 16 + (rssi1 << 8); + dev->cal.avg_rssi_all = (dev->cal.avg_rssi[0] + + dev->cal.avg_rssi[1]) / 512; + + low_gain = (dev->cal.avg_rssi_all > mt76x2_get_rssi_gain_thresh(dev)) + + (dev->cal.avg_rssi_all > mt76x2_get_low_rssi_gain_thresh(dev)); + + if (dev->cal.low_gain == low_gain) { + mt76x2_phy_adjust_vga_gain(dev); + return; + } + + dev->cal.low_gain = low_gain; + + if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) + mt76_wr(dev, MT_BBP(RXO, 14), 0x00560211); + else + mt76_wr(dev, MT_BBP(RXO, 14), 0x00560423); + + if (low_gain) { + mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991); + mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808); + mt76_wr(dev, MT_BBP(AGC, 37), 0x08080808); + if (mt76x2_has_ext_lna(dev)) + gain_delta = 10; + else + gain_delta = 14; + } else { + mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990); + if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) + mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014); + else + mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116); + mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C); + gain_delta = 0; + } + + dev->cal.agc_gain_cur[0] = gain[0] - gain_delta; + dev->cal.agc_gain_cur[1] = gain[1] - gain_delta; + dev->cal.agc_gain_adjust = 0; + mt76x2_phy_set_gain_val(dev); +} + +int mt76x2_phy_set_channel(struct mt76x2_dev *dev, + struct cfg80211_chan_def *chandef) +{ + struct ieee80211_channel *chan = chandef->chan; + bool scan = test_bit(MT76_SCANNING, &dev->mt76.state); + enum nl80211_band band = chan->band; + u8 channel; + + u32 ext_cca_chan[4] = { + [0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)), + [1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)), + [2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)), + [3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) | + FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)), + }; + int ch_group_index; + u8 bw, bw_index; + int freq, freq1; + int ret; + + dev->cal.channel_cal_done = false; + freq = chandef->chan->center_freq; + freq1 = chandef->center_freq1; + channel = chan->hw_value; + + switch (chandef->width) { + case NL80211_CHAN_WIDTH_40: + bw = 1; + if (freq1 > freq) { + bw_index = 1; + ch_group_index = 0; + } else { + bw_index = 3; + ch_group_index = 1; + } + channel += 2 - ch_group_index * 4; + break; + case NL80211_CHAN_WIDTH_80: + ch_group_index = (freq - freq1 + 30) / 20; + if (WARN_ON(ch_group_index < 0 || ch_group_index > 3)) + ch_group_index = 0; + bw = 2; + bw_index = ch_group_index; + channel += 6 - ch_group_index * 4; + break; + default: + bw = 0; + bw_index = 0; + ch_group_index = 0; + break; + } + + mt76x2_read_rx_gain(dev); + mt76x2_phy_set_txpower_regs(dev, band); + mt76x2_configure_tx_delay(dev, band, bw); + mt76x2_phy_set_txpower(dev); + + mt76x2_set_rx_chains(dev); + mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1); + mt76x2_phy_set_bw(dev, chandef->width, ch_group_index); + mt76x2_set_tx_dac(dev); + + mt76_rmw(dev, MT_EXT_CCA_CFG, + (MT_EXT_CCA_CFG_CCA0 | + MT_EXT_CCA_CFG_CCA1 | + MT_EXT_CCA_CFG_CCA2 | + MT_EXT_CCA_CFG_CCA3 | + MT_EXT_CCA_CFG_CCA_MASK), + ext_cca_chan[ch_group_index]); + + ret = mt76x2_mcu_set_channel(dev, channel, bw, bw_index, scan); + if (ret) + return ret; + + mt76x2_mcu_init_gain(dev, channel, dev->cal.rx.mcu_gain, true); + + /* Enable LDPC Rx */ + if (mt76xx_rev(dev) >= MT76XX_REV_E3) + mt76_set(dev, MT_BBP(RXO, 13), BIT(10)); + + if (!dev->cal.init_cal_done) { + u8 val = mt76x2_eeprom_get(dev, MT_EE_BT_RCAL_RESULT); + + if (val != 0xff) + mt76x2_mcu_calibrate(dev, MCU_CAL_R, 0); + } + + mt76x2_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel); + + /* Rx LPF calibration */ + if (!dev->cal.init_cal_done) + mt76x2_mcu_calibrate(dev, MCU_CAL_RC, 0); + + dev->cal.init_cal_done = true; + + mt76_wr(dev, MT_BBP(AGC, 61), 0xFF64A4E2); + mt76_wr(dev, MT_BBP(AGC, 7), 0x08081010); + mt76_wr(dev, MT_BBP(AGC, 11), 0x00000404); + mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070); + mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x04101B3F); + + if (scan) + return 0; + + dev->cal.low_gain = -1; + mt76x2_phy_channel_calibrate(dev, true); + mt76x2_get_agc_gain(dev, dev->cal.agc_gain_init); + memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init, + sizeof(dev->cal.agc_gain_cur)); + + ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work, + MT_CALIBRATE_INTERVAL); + + return 0; +} + +static void +mt76x2_phy_tssi_compensate(struct mt76x2_dev *dev) +{ + struct ieee80211_channel *chan = dev->mt76.chandef.chan; + struct mt76x2_tx_power_info txp; + struct mt76x2_tssi_comp t = {}; + + if (!dev->cal.tssi_cal_done) + return; + + if (!dev->cal.tssi_comp_pending) { + /* TSSI trigger */ + t.cal_mode = BIT(0); + mt76x2_mcu_tssi_comp(dev, &t); + dev->cal.tssi_comp_pending = true; + } else { + if (mt76_rr(dev, MT_BBP(CORE, 34)) & BIT(4)) + return; + + dev->cal.tssi_comp_pending = false; + mt76x2_get_power_info(dev, &txp, chan); + + if (mt76x2_ext_pa_enabled(dev, chan->band)) + t.pa_mode = 1; + + t.cal_mode = BIT(1); + t.slope0 = txp.chain[0].tssi_slope; + t.offset0 = txp.chain[0].tssi_offset; + t.slope1 = txp.chain[1].tssi_slope; + t.offset1 = txp.chain[1].tssi_offset; + mt76x2_mcu_tssi_comp(dev, &t); + + if (t.pa_mode || dev->cal.dpd_cal_done) + return; + + usleep_range(10000, 20000); + mt76x2_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value); + dev->cal.dpd_cal_done = true; + } +} + +static void +mt76x2_phy_temp_compensate(struct mt76x2_dev *dev) +{ + struct mt76x2_temp_comp t; + int temp, db_diff; + + if (mt76x2_get_temp_comp(dev, &t)) + return; + + temp = mt76_get_field(dev, MT_TEMP_SENSOR, MT_TEMP_SENSOR_VAL); + temp -= t.temp_25_ref; + temp = (temp * 1789) / 1000 + 25; + dev->cal.temp = temp; + + if (temp > 25) + db_diff = (temp - 25) / t.high_slope; + else + db_diff = (25 - temp) / t.low_slope; + + db_diff = min(db_diff, t.upper_bound); + db_diff = max(db_diff, t.lower_bound); + + mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP, + db_diff * 2); + mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP, + db_diff * 2); +} + +void mt76x2_phy_calibrate(struct work_struct *work) +{ + struct mt76x2_dev *dev; + + dev = container_of(work, struct mt76x2_dev, cal_work.work); + mt76x2_phy_channel_calibrate(dev, false); + mt76x2_phy_tssi_compensate(dev); + mt76x2_phy_temp_compensate(dev); + mt76x2_phy_update_channel_gain(dev); + ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work, + MT_CALIBRATE_INTERVAL); +} + +int mt76x2_phy_start(struct mt76x2_dev *dev) +{ + int ret; + + ret = mt76x2_mcu_set_radio_state(dev, true); + if (ret) + return ret; + + mt76x2_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0); + + return ret; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h new file mode 100644 index 000000000000..ce3ab85c8b0f --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h @@ -0,0 +1,587 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __MT76x2_REGS_H +#define __MT76x2_REGS_H + +#define MT_ASIC_VERSION 0x0000 + +#define MT76XX_REV_E3 0x22 +#define MT76XX_REV_E4 0x33 + +#define MT_CMB_CTRL 0x0020 +#define MT_CMB_CTRL_XTAL_RDY BIT(22) +#define MT_CMB_CTRL_PLL_LD BIT(23) + +#define MT_EFUSE_CTRL 0x0024 +#define MT_EFUSE_CTRL_AOUT GENMASK(5, 0) +#define MT_EFUSE_CTRL_MODE GENMASK(7, 6) +#define MT_EFUSE_CTRL_LDO_OFF_TIME GENMASK(13, 8) +#define MT_EFUSE_CTRL_LDO_ON_TIME GENMASK(15, 14) +#define MT_EFUSE_CTRL_AIN GENMASK(25, 16) +#define MT_EFUSE_CTRL_KICK BIT(30) +#define MT_EFUSE_CTRL_SEL BIT(31) + +#define MT_EFUSE_DATA_BASE 0x0028 +#define MT_EFUSE_DATA(_n) (MT_EFUSE_DATA_BASE + ((_n) << 2)) + +#define MT_COEXCFG0 0x0040 +#define MT_COEXCFG0_COEX_EN BIT(0) + +#define MT_WLAN_FUN_CTRL 0x0080 +#define MT_WLAN_FUN_CTRL_WLAN_EN BIT(0) +#define MT_WLAN_FUN_CTRL_WLAN_CLK_EN BIT(1) +#define MT_WLAN_FUN_CTRL_WLAN_RESET_RF BIT(2) + +#define MT_WLAN_FUN_CTRL_WLAN_RESET BIT(3) /* MT76x0 */ +#define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN BIT(3) /* MT76x2 */ + +#define MT_WLAN_FUN_CTRL_PCIE_CLK_REQ BIT(4) +#define MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL BIT(5) +#define MT_WLAN_FUN_CTRL_INV_ANT_SEL BIT(6) +#define MT_WLAN_FUN_CTRL_WAKE_HOST BIT(7) + +#define MT_WLAN_FUN_CTRL_THERM_RST BIT(8) /* MT76x2 */ +#define MT_WLAN_FUN_CTRL_THERM_CKEN BIT(9) /* MT76x2 */ + +#define MT_WLAN_FUN_CTRL_GPIO_IN GENMASK(15, 8) /* MT76x0 */ +#define MT_WLAN_FUN_CTRL_GPIO_OUT GENMASK(23, 16) /* MT76x0 */ +#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN GENMASK(31, 24) /* MT76x0 */ + +#define MT_XO_CTRL0 0x0100 +#define MT_XO_CTRL1 0x0104 +#define MT_XO_CTRL2 0x0108 +#define MT_XO_CTRL3 0x010c +#define MT_XO_CTRL4 0x0110 + +#define MT_XO_CTRL5 0x0114 +#define MT_XO_CTRL5_C2_VAL GENMASK(14, 8) + +#define MT_XO_CTRL6 0x0118 +#define MT_XO_CTRL6_C2_CTRL GENMASK(14, 8) + +#define MT_XO_CTRL7 0x011c + +#define MT_WLAN_MTC_CTRL 0x10148 +#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP BIT(0) +#define MT_WLAN_MTC_CTRL_PWR_ACK BIT(12) +#define MT_WLAN_MTC_CTRL_PWR_ACK_S BIT(13) +#define MT_WLAN_MTC_CTRL_BBP_MEM_PD GENMASK(19, 16) +#define MT_WLAN_MTC_CTRL_PBF_MEM_PD BIT(20) +#define MT_WLAN_MTC_CTRL_FCE_MEM_PD BIT(21) +#define MT_WLAN_MTC_CTRL_TSO_MEM_PD BIT(22) +#define MT_WLAN_MTC_CTRL_BBP_MEM_RB BIT(24) +#define MT_WLAN_MTC_CTRL_PBF_MEM_RB BIT(25) +#define MT_WLAN_MTC_CTRL_FCE_MEM_RB BIT(26) +#define MT_WLAN_MTC_CTRL_TSO_MEM_RB BIT(27) +#define MT_WLAN_MTC_CTRL_STATE_UP BIT(28) + +#define MT_INT_SOURCE_CSR 0x0200 +#define MT_INT_MASK_CSR 0x0204 + +#define MT_INT_RX_DONE(_n) BIT(_n) +#define MT_INT_RX_DONE_ALL GENMASK(1, 0) +#define MT_INT_TX_DONE_ALL GENMASK(13, 4) +#define MT_INT_TX_DONE(_n) BIT(_n + 4) +#define MT_INT_RX_COHERENT BIT(16) +#define MT_INT_TX_COHERENT BIT(17) +#define MT_INT_ANY_COHERENT BIT(18) +#define MT_INT_MCU_CMD BIT(19) +#define MT_INT_TBTT BIT(20) +#define MT_INT_PRE_TBTT BIT(21) +#define MT_INT_TX_STAT BIT(22) +#define MT_INT_AUTO_WAKEUP BIT(23) +#define MT_INT_GPTIMER BIT(24) +#define MT_INT_RXDELAYINT BIT(26) +#define MT_INT_TXDELAYINT BIT(27) + +#define MT_WPDMA_GLO_CFG 0x0208 +#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0) +#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY BIT(1) +#define MT_WPDMA_GLO_CFG_RX_DMA_EN BIT(2) +#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY BIT(3) +#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE GENMASK(5, 4) +#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6) +#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7) +#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN GENMASK(15, 8) +#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS BIT(30) +#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31) + +#define MT_WPDMA_RST_IDX 0x020c + +#define MT_WPDMA_DELAY_INT_CFG 0x0210 + +#define MT_WMM_AIFSN 0x0214 +#define MT_WMM_AIFSN_MASK GENMASK(3, 0) +#define MT_WMM_AIFSN_SHIFT(_n) ((_n) * 4) + +#define MT_WMM_CWMIN 0x0218 +#define MT_WMM_CWMIN_MASK GENMASK(3, 0) +#define MT_WMM_CWMIN_SHIFT(_n) ((_n) * 4) + +#define MT_WMM_CWMAX 0x021c +#define MT_WMM_CWMAX_MASK GENMASK(3, 0) +#define MT_WMM_CWMAX_SHIFT(_n) ((_n) * 4) + +#define MT_WMM_TXOP_BASE 0x0220 +#define MT_WMM_TXOP(_n) (MT_WMM_TXOP_BASE + (((_n) / 2) << 2)) +#define MT_WMM_TXOP_SHIFT(_n) ((_n & 1) * 16) +#define MT_WMM_TXOP_MASK GENMASK(15, 0) + +#define MT_TSO_CTRL 0x0250 +#define MT_HEADER_TRANS_CTRL_REG 0x0260 + +#define MT_TX_RING_BASE 0x0300 +#define MT_RX_RING_BASE 0x03c0 + +#define MT_TX_HW_QUEUE_MCU 8 +#define MT_TX_HW_QUEUE_MGMT 9 + +#define MT_PBF_SYS_CTRL 0x0400 +#define MT_PBF_SYS_CTRL_MCU_RESET BIT(0) +#define MT_PBF_SYS_CTRL_DMA_RESET BIT(1) +#define MT_PBF_SYS_CTRL_MAC_RESET BIT(2) +#define MT_PBF_SYS_CTRL_PBF_RESET BIT(3) +#define MT_PBF_SYS_CTRL_ASY_RESET BIT(4) + +#define MT_PBF_CFG 0x0404 +#define MT_PBF_CFG_TX0Q_EN BIT(0) +#define MT_PBF_CFG_TX1Q_EN BIT(1) +#define MT_PBF_CFG_TX2Q_EN BIT(2) +#define MT_PBF_CFG_TX3Q_EN BIT(3) +#define MT_PBF_CFG_RX0Q_EN BIT(4) +#define MT_PBF_CFG_RX_DROP_EN BIT(8) + +#define MT_PBF_TX_MAX_PCNT 0x0408 +#define MT_PBF_RX_MAX_PCNT 0x040c + +#define MT_BCN_OFFSET_BASE 0x041c +#define MT_BCN_OFFSET(_n) (MT_BCN_OFFSET_BASE + ((_n) << 2)) + +#define MT_RF_BYPASS_0 0x0504 +#define MT_RF_BYPASS_1 0x0508 +#define MT_RF_SETTING_0 0x050c + +#define MT_RF_DATA_WRITE 0x0524 + +#define MT_RF_CTRL 0x0528 +#define MT_RF_CTRL_ADDR GENMASK(11, 0) +#define MT_RF_CTRL_WRITE BIT(12) +#define MT_RF_CTRL_BUSY BIT(13) +#define MT_RF_CTRL_IDX BIT(16) + +#define MT_RF_DATA_READ 0x052c + +#define MT_FCE_PSE_CTRL 0x0800 +#define MT_FCE_PARAMETERS 0x0804 +#define MT_FCE_CSO 0x0808 + +#define MT_FCE_L2_STUFF 0x080c +#define MT_FCE_L2_STUFF_HT_L2_EN BIT(0) +#define MT_FCE_L2_STUFF_QOS_L2_EN BIT(1) +#define MT_FCE_L2_STUFF_RX_STUFF_EN BIT(2) +#define MT_FCE_L2_STUFF_TX_STUFF_EN BIT(3) +#define MT_FCE_L2_STUFF_WR_MPDU_LEN_EN BIT(4) +#define MT_FCE_L2_STUFF_MVINV_BSWAP BIT(5) +#define MT_FCE_L2_STUFF_TS_CMD_QSEL_EN GENMASK(15, 8) +#define MT_FCE_L2_STUFF_TS_LEN_EN GENMASK(23, 16) +#define MT_FCE_L2_STUFF_OTHER_PORT GENMASK(25, 24) + +#define MT_FCE_WLAN_FLOW_CONTROL1 0x0824 + +#define MT_PAUSE_ENABLE_CONTROL1 0x0a38 + +#define MT_MAC_CSR0 0x1000 + +#define MT_MAC_SYS_CTRL 0x1004 +#define MT_MAC_SYS_CTRL_RESET_CSR BIT(0) +#define MT_MAC_SYS_CTRL_RESET_BBP BIT(1) +#define MT_MAC_SYS_CTRL_ENABLE_TX BIT(2) +#define MT_MAC_SYS_CTRL_ENABLE_RX BIT(3) + +#define MT_MAC_ADDR_DW0 0x1008 +#define MT_MAC_ADDR_DW1 0x100c + +#define MT_MAC_BSSID_DW0 0x1010 +#define MT_MAC_BSSID_DW1 0x1014 +#define MT_MAC_BSSID_DW1_ADDR GENMASK(15, 0) +#define MT_MAC_BSSID_DW1_MBSS_MODE GENMASK(17, 16) +#define MT_MAC_BSSID_DW1_MBEACON_N GENMASK(20, 18) +#define MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT BIT(21) +#define MT_MAC_BSSID_DW1_MBSS_MODE_B2 BIT(22) +#define MT_MAC_BSSID_DW1_MBEACON_N_B3 BIT(23) +#define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE GENMASK(26, 24) + +#define MT_MAX_LEN_CFG 0x1018 + +#define MT_AMPDU_MAX_LEN_20M1S 0x1030 +#define MT_AMPDU_MAX_LEN_20M2S 0x1034 +#define MT_AMPDU_MAX_LEN_40M1S 0x1038 +#define MT_AMPDU_MAX_LEN_40M2S 0x103c +#define MT_AMPDU_MAX_LEN 0x1040 + +#define MT_WCID_DROP_BASE 0x106c +#define MT_WCID_DROP(_n) (MT_WCID_DROP_BASE + ((_n) >> 5) * 4) +#define MT_WCID_DROP_MASK(_n) BIT((_n) % 32) + +#define MT_BCN_BYPASS_MASK 0x108c + +#define MT_MAC_APC_BSSID_BASE 0x1090 +#define MT_MAC_APC_BSSID_L(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8)) +#define MT_MAC_APC_BSSID_H(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8 + 4)) +#define MT_MAC_APC_BSSID_H_ADDR GENMASK(15, 0) +#define MT_MAC_APC_BSSID0_H_EN BIT(16) + +#define MT_XIFS_TIME_CFG 0x1100 +#define MT_XIFS_TIME_CFG_CCK_SIFS GENMASK(7, 0) +#define MT_XIFS_TIME_CFG_OFDM_SIFS GENMASK(15, 8) +#define MT_XIFS_TIME_CFG_OFDM_XIFS GENMASK(19, 16) +#define MT_XIFS_TIME_CFG_EIFS GENMASK(28, 20) +#define MT_XIFS_TIME_CFG_BB_RXEND_EN BIT(29) + +#define MT_BKOFF_SLOT_CFG 0x1104 +#define MT_BKOFF_SLOT_CFG_SLOTTIME GENMASK(7, 0) +#define MT_BKOFF_SLOT_CFG_CC_DELAY GENMASK(11, 8) + +#define MT_CH_TIME_CFG 0x110c +#define MT_CH_TIME_CFG_TIMER_EN BIT(0) +#define MT_CH_TIME_CFG_TX_AS_BUSY BIT(1) +#define MT_CH_TIME_CFG_RX_AS_BUSY BIT(2) +#define MT_CH_TIME_CFG_NAV_AS_BUSY BIT(3) +#define MT_CH_TIME_CFG_EIFS_AS_BUSY BIT(4) +#define MT_CH_TIME_CFG_MDRDY_CNT_EN BIT(5) +#define MT_CH_TIME_CFG_CH_TIMER_CLR GENMASK(9, 8) +#define MT_CH_TIME_CFG_MDRDY_CLR GENMASK(11, 10) + +#define MT_PBF_LIFE_TIMER 0x1110 + +#define MT_BEACON_TIME_CFG 0x1114 +#define MT_BEACON_TIME_CFG_INTVAL GENMASK(15, 0) +#define MT_BEACON_TIME_CFG_TIMER_EN BIT(16) +#define MT_BEACON_TIME_CFG_SYNC_MODE GENMASK(18, 17) +#define MT_BEACON_TIME_CFG_TBTT_EN BIT(19) +#define MT_BEACON_TIME_CFG_BEACON_TX BIT(20) +#define MT_BEACON_TIME_CFG_TSF_COMP GENMASK(31, 24) + +#define MT_TBTT_SYNC_CFG 0x1118 +#define MT_TBTT_TIMER_CFG 0x1124 + +#define MT_INT_TIMER_CFG 0x1128 +#define MT_INT_TIMER_CFG_PRE_TBTT GENMASK(15, 0) +#define MT_INT_TIMER_CFG_GP_TIMER GENMASK(31, 16) + +#define MT_INT_TIMER_EN 0x112c +#define MT_INT_TIMER_EN_PRE_TBTT_EN BIT(0) +#define MT_INT_TIMER_EN_GP_TIMER_EN BIT(1) + +#define MT_CH_IDLE 0x1130 +#define MT_CH_BUSY 0x1134 +#define MT_EXT_CH_BUSY 0x1138 +#define MT_ED_CCA_TIMER 0x1140 + +#define MT_MAC_STATUS 0x1200 +#define MT_MAC_STATUS_TX BIT(0) +#define MT_MAC_STATUS_RX BIT(1) + +#define MT_PWR_PIN_CFG 0x1204 +#define MT_AUX_CLK_CFG 0x120c + +#define MT_BB_PA_MODE_CFG0 0x1214 +#define MT_BB_PA_MODE_CFG1 0x1218 +#define MT_RF_PA_MODE_CFG0 0x121c +#define MT_RF_PA_MODE_CFG1 0x1220 + +#define MT_RF_PA_MODE_ADJ0 0x1228 +#define MT_RF_PA_MODE_ADJ1 0x122c + +#define MT_DACCLK_EN_DLY_CFG 0x1264 + +#define MT_EDCA_CFG_BASE 0x1300 +#define MT_EDCA_CFG_AC(_n) (MT_EDCA_CFG_BASE + ((_n) << 2)) +#define MT_EDCA_CFG_TXOP GENMASK(7, 0) +#define MT_EDCA_CFG_AIFSN GENMASK(11, 8) +#define MT_EDCA_CFG_CWMIN GENMASK(15, 12) +#define MT_EDCA_CFG_CWMAX GENMASK(19, 16) + +#define MT_TX_PWR_CFG_0 0x1314 +#define MT_TX_PWR_CFG_1 0x1318 +#define MT_TX_PWR_CFG_2 0x131c +#define MT_TX_PWR_CFG_3 0x1320 +#define MT_TX_PWR_CFG_4 0x1324 + +#define MT_TX_BAND_CFG 0x132c +#define MT_TX_BAND_CFG_UPPER_40M BIT(0) +#define MT_TX_BAND_CFG_5G BIT(1) +#define MT_TX_BAND_CFG_2G BIT(2) + +#define MT_HT_FBK_TO_LEGACY 0x1384 +#define MT_TX_MPDU_ADJ_INT 0x1388 + +#define MT_TX_PWR_CFG_7 0x13d4 +#define MT_TX_PWR_CFG_8 0x13d8 +#define MT_TX_PWR_CFG_9 0x13dc + +#define MT_TX_SW_CFG0 0x1330 +#define MT_TX_SW_CFG1 0x1334 +#define MT_TX_SW_CFG2 0x1338 + +#define MT_TXOP_CTRL_CFG 0x1340 + +#define MT_TX_RTS_CFG 0x1344 +#define MT_TX_RTS_CFG_RETRY_LIMIT GENMASK(7, 0) +#define MT_TX_RTS_CFG_THRESH GENMASK(23, 8) +#define MT_TX_RTS_FALLBACK BIT(24) + +#define MT_TX_TIMEOUT_CFG 0x1348 +#define MT_TX_TIMEOUT_CFG_ACKTO GENMASK(15, 8) + +#define MT_TX_RETRY_CFG 0x134c +#define MT_VHT_HT_FBK_CFG1 0x1358 + +#define MT_PROT_CFG_RATE GENMASK(15, 0) +#define MT_PROT_CFG_CTRL GENMASK(17, 16) +#define MT_PROT_CFG_NAV GENMASK(19, 18) +#define MT_PROT_CFG_TXOP_ALLOW GENMASK(25, 20) +#define MT_PROT_CFG_RTS_THRESH BIT(26) + +#define MT_CCK_PROT_CFG 0x1364 +#define MT_OFDM_PROT_CFG 0x1368 +#define MT_MM20_PROT_CFG 0x136c +#define MT_MM40_PROT_CFG 0x1370 +#define MT_GF20_PROT_CFG 0x1374 +#define MT_GF40_PROT_CFG 0x1378 + +#define MT_EXP_ACK_TIME 0x1380 + +#define MT_TX_PWR_CFG_0_EXT 0x1390 +#define MT_TX_PWR_CFG_1_EXT 0x1394 + +#define MT_TX_FBK_LIMIT 0x1398 +#define MT_TX_FBK_LIMIT_MPDU_FBK GENMASK(7, 0) +#define MT_TX_FBK_LIMIT_AMPDU_FBK GENMASK(15, 8) +#define MT_TX_FBK_LIMIT_MPDU_UP_CLEAR BIT(16) +#define MT_TX_FBK_LIMIT_AMPDU_UP_CLEAR BIT(17) +#define MT_TX_FBK_LIMIT_RATE_LUT BIT(18) + +#define MT_TX0_RF_GAIN_CORR 0x13a0 +#define MT_TX1_RF_GAIN_CORR 0x13a4 + +#define MT_TX_ALC_CFG_0 0x13b0 +#define MT_TX_ALC_CFG_0_CH_INIT_0 GENMASK(5, 0) +#define MT_TX_ALC_CFG_0_CH_INIT_1 GENMASK(13, 8) +#define MT_TX_ALC_CFG_0_LIMIT_0 GENMASK(21, 16) +#define MT_TX_ALC_CFG_0_LIMIT_1 GENMASK(29, 24) + +#define MT_TX_ALC_CFG_1 0x13b4 +#define MT_TX_ALC_CFG_1_TEMP_COMP GENMASK(5, 0) + +#define MT_TX_ALC_CFG_2 0x13a8 +#define MT_TX_ALC_CFG_2_TEMP_COMP GENMASK(5, 0) + +#define MT_TX_ALC_CFG_3 0x13ac +#define MT_TX_ALC_CFG_4 0x13c0 +#define MT_TX_ALC_CFG_4_LOWGAIN_CH_EN BIT(31) + +#define MT_TX_ALC_VGA3 0x13c8 + +#define MT_TX_PROT_CFG6 0x13e0 +#define MT_TX_PROT_CFG7 0x13e4 +#define MT_TX_PROT_CFG8 0x13e8 + +#define MT_PIFS_TX_CFG 0x13ec + +#define MT_RX_FILTR_CFG 0x1400 + +#define MT_RX_FILTR_CFG_CRC_ERR BIT(0) +#define MT_RX_FILTR_CFG_PHY_ERR BIT(1) +#define MT_RX_FILTR_CFG_PROMISC BIT(2) +#define MT_RX_FILTR_CFG_OTHER_BSS BIT(3) +#define MT_RX_FILTR_CFG_VER_ERR BIT(4) +#define MT_RX_FILTR_CFG_MCAST BIT(5) +#define MT_RX_FILTR_CFG_BCAST BIT(6) +#define MT_RX_FILTR_CFG_DUP BIT(7) +#define MT_RX_FILTR_CFG_CFACK BIT(8) +#define MT_RX_FILTR_CFG_CFEND BIT(9) +#define MT_RX_FILTR_CFG_ACK BIT(10) +#define MT_RX_FILTR_CFG_CTS BIT(11) +#define MT_RX_FILTR_CFG_RTS BIT(12) +#define MT_RX_FILTR_CFG_PSPOLL BIT(13) +#define MT_RX_FILTR_CFG_BA BIT(14) +#define MT_RX_FILTR_CFG_BAR BIT(15) +#define MT_RX_FILTR_CFG_CTRL_RSV BIT(16) + +#define MT_LEGACY_BASIC_RATE 0x1408 +#define MT_HT_BASIC_RATE 0x140c + +#define MT_HT_CTRL_CFG 0x1410 + +#define MT_EXT_CCA_CFG 0x141c +#define MT_EXT_CCA_CFG_CCA0 GENMASK(1, 0) +#define MT_EXT_CCA_CFG_CCA1 GENMASK(3, 2) +#define MT_EXT_CCA_CFG_CCA2 GENMASK(5, 4) +#define MT_EXT_CCA_CFG_CCA3 GENMASK(7, 6) +#define MT_EXT_CCA_CFG_CCA_MASK GENMASK(11, 8) +#define MT_EXT_CCA_CFG_ED_CCA_MASK GENMASK(15, 12) + +#define MT_TX_SW_CFG3 0x1478 + +#define MT_PN_PAD_MODE 0x150c + +#define MT_TXOP_HLDR_ET 0x1608 + +#define MT_PROT_AUTO_TX_CFG 0x1648 +#define MT_PROT_AUTO_TX_CFG_PROT_PADJ GENMASK(11, 8) +#define MT_PROT_AUTO_TX_CFG_AUTO_PADJ GENMASK(27, 24) + +#define MT_RX_STAT_0 0x1700 +#define MT_RX_STAT_0_CRC_ERRORS GENMASK(15, 0) +#define MT_RX_STAT_0_PHY_ERRORS GENMASK(31, 16) + +#define MT_RX_STAT_1 0x1704 +#define MT_RX_STAT_1_CCA_ERRORS GENMASK(15, 0) +#define MT_RX_STAT_1_PLCP_ERRORS GENMASK(31, 16) + +#define MT_RX_STAT_2 0x1708 +#define MT_RX_STAT_2_DUP_ERRORS GENMASK(15, 0) +#define MT_RX_STAT_2_OVERFLOW_ERRORS GENMASK(31, 16) + +#define MT_TX_STAT_FIFO 0x1718 +#define MT_TX_STAT_FIFO_VALID BIT(0) +#define MT_TX_STAT_FIFO_SUCCESS BIT(5) +#define MT_TX_STAT_FIFO_AGGR BIT(6) +#define MT_TX_STAT_FIFO_ACKREQ BIT(7) +#define MT_TX_STAT_FIFO_WCID GENMASK(15, 8) +#define MT_TX_STAT_FIFO_RATE GENMASK(31, 16) + +#define MT_TX_AGG_CNT_BASE0 0x1720 +#define MT_TX_AGG_CNT_BASE1 0x174c + +#define MT_TX_AGG_CNT(_id) ((_id) < 8 ? \ + MT_TX_AGG_CNT_BASE0 + ((_id) << 2) : \ + MT_TX_AGG_CNT_BASE1 + ((_id - 8) << 2)) + +#define MT_TX_STAT_FIFO_EXT 0x1798 +#define MT_TX_STAT_FIFO_EXT_RETRY GENMASK(7, 0) +#define MT_TX_STAT_FIFO_EXT_PKTID GENMASK(15, 8) + +#define MT_WCID_TX_RATE_BASE 0x1c00 +#define MT_WCID_TX_RATE(_i) (MT_WCID_TX_RATE_BASE + ((_i) << 3)) + +#define MT_BBP_CORE_BASE 0x2000 +#define MT_BBP_IBI_BASE 0x2100 +#define MT_BBP_AGC_BASE 0x2300 +#define MT_BBP_TXC_BASE 0x2400 +#define MT_BBP_RXC_BASE 0x2500 +#define MT_BBP_TXO_BASE 0x2600 +#define MT_BBP_TXBE_BASE 0x2700 +#define MT_BBP_RXFE_BASE 0x2800 +#define MT_BBP_RXO_BASE 0x2900 +#define MT_BBP_DFS_BASE 0x2a00 +#define MT_BBP_TR_BASE 0x2b00 +#define MT_BBP_CAL_BASE 0x2c00 +#define MT_BBP_DSC_BASE 0x2e00 +#define MT_BBP_PFMU_BASE 0x2f00 + +#define MT_BBP(_type, _n) (MT_BBP_##_type##_BASE + ((_n) << 2)) + +#define MT_BBP_CORE_R1_BW GENMASK(4, 3) + +#define MT_BBP_AGC_R0_CTRL_CHAN GENMASK(9, 8) +#define MT_BBP_AGC_R0_BW GENMASK(14, 12) + +/* AGC, R4/R5 */ +#define MT_BBP_AGC_LNA_HIGH_GAIN GENMASK(21, 16) +#define MT_BBP_AGC_LNA_MID_GAIN GENMASK(13, 8) +#define MT_BBP_AGC_LNA_LOW_GAIN GENMASK(5, 0) + +/* AGC, R6/R7 */ +#define MT_BBP_AGC_LNA_ULOW_GAIN GENMASK(5, 0) + +/* AGC, R8/R9 */ +#define MT_BBP_AGC_LNA_GAIN_MODE GENMASK(7, 6) +#define MT_BBP_AGC_GAIN GENMASK(14, 8) + +#define MT_BBP_AGC20_RSSI0 GENMASK(7, 0) +#define MT_BBP_AGC20_RSSI1 GENMASK(15, 8) + +#define MT_BBP_TXBE_R0_CTRL_CHAN GENMASK(1, 0) + +#define MT_WCID_ADDR_BASE 0x1800 +#define MT_WCID_ADDR(_n) (MT_WCID_ADDR_BASE + (_n) * 8) + +#define MT_SRAM_BASE 0x4000 + +#define MT_WCID_KEY_BASE 0x8000 +#define MT_WCID_KEY(_n) (MT_WCID_KEY_BASE + (_n) * 32) + +#define MT_WCID_IV_BASE 0xa000 +#define MT_WCID_IV(_n) (MT_WCID_IV_BASE + (_n) * 8) + +#define MT_WCID_ATTR_BASE 0xa800 +#define MT_WCID_ATTR(_n) (MT_WCID_ATTR_BASE + (_n) * 4) + +#define MT_WCID_ATTR_PAIRWISE BIT(0) +#define MT_WCID_ATTR_PKEY_MODE GENMASK(3, 1) +#define MT_WCID_ATTR_BSS_IDX GENMASK(6, 4) +#define MT_WCID_ATTR_RXWI_UDF GENMASK(9, 7) +#define MT_WCID_ATTR_PKEY_MODE_EXT BIT(10) +#define MT_WCID_ATTR_BSS_IDX_EXT BIT(11) +#define MT_WCID_ATTR_WAPI_MCBC BIT(15) +#define MT_WCID_ATTR_WAPI_KEYID GENMASK(31, 24) + +#define MT_SKEY_BASE_0 0xac00 +#define MT_SKEY_BASE_1 0xb400 +#define MT_SKEY_0(_bss, _idx) (MT_SKEY_BASE_0 + (4 * (_bss) + _idx) * 32) +#define MT_SKEY_1(_bss, _idx) (MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + _idx) * 32) +#define MT_SKEY(_bss, _idx) ((_bss & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx)) + +#define MT_SKEY_MODE_BASE_0 0xb000 +#define MT_SKEY_MODE_BASE_1 0xb3f0 +#define MT_SKEY_MODE_0(_bss) (MT_SKEY_MODE_BASE_0 + ((_bss / 2) << 2)) +#define MT_SKEY_MODE_1(_bss) (MT_SKEY_MODE_BASE_1 + ((((_bss) & 7) / 2) << 2)) +#define MT_SKEY_MODE(_bss) ((_bss & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss)) +#define MT_SKEY_MODE_MASK GENMASK(3, 0) +#define MT_SKEY_MODE_SHIFT(_bss, _idx) (4 * ((_idx) + 4 * (_bss & 1))) + +#define MT_BEACON_BASE 0xc000 + +#define MT_TEMP_SENSOR 0x1d000 +#define MT_TEMP_SENSOR_VAL GENMASK(6, 0) + +struct mt76_wcid_addr { + u8 macaddr[6]; + __le16 ba_mask; +} __packed __aligned(4); + +struct mt76_wcid_key { + u8 key[16]; + u8 tx_mic[8]; + u8 rx_mic[8]; +} __packed __aligned(4); + +enum mt76x2_cipher_type { + MT_CIPHER_NONE, + MT_CIPHER_WEP40, + MT_CIPHER_WEP104, + MT_CIPHER_TKIP, + MT_CIPHER_AES_CCMP, + MT_CIPHER_CKIP40, + MT_CIPHER_CKIP104, + MT_CIPHER_CKIP128, + MT_CIPHER_WAPI, +}; + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_trace.c b/drivers/net/wireless/mediatek/mt76/mt76x2_trace.c new file mode 100644 index 000000000000..a09f117848d6 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_trace.c @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/module.h> + +#ifndef __CHECKER__ +#define CREATE_TRACE_POINTS +#include "mt76x2_trace.h" + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_trace.h b/drivers/net/wireless/mediatek/mt76/mt76x2_trace.h new file mode 100644 index 000000000000..4cd424148d4b --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_trace.h @@ -0,0 +1,144 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#if !defined(__MT76x2_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define __MT76x2_TRACE_H + +#include <linux/tracepoint.h> +#include "mt76x2.h" + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mt76x2 + +#define MAXNAME 32 +#define DEV_ENTRY __array(char, wiphy_name, 32) +#define DEV_ASSIGN strlcpy(__entry->wiphy_name, wiphy_name(mt76_hw(dev)->wiphy), MAXNAME) +#define DEV_PR_FMT "%s" +#define DEV_PR_ARG __entry->wiphy_name + +#define TXID_ENTRY __field(u8, wcid) __field(u8, pktid) +#define TXID_ASSIGN __entry->wcid = wcid; __entry->pktid = pktid +#define TXID_PR_FMT " [%d:%d]" +#define TXID_PR_ARG __entry->wcid, __entry->pktid + +DECLARE_EVENT_CLASS(dev_evt, + TP_PROTO(struct mt76x2_dev *dev), + TP_ARGS(dev), + TP_STRUCT__entry( + DEV_ENTRY + ), + TP_fast_assign( + DEV_ASSIGN; + ), + TP_printk(DEV_PR_FMT, DEV_PR_ARG) +); + +DECLARE_EVENT_CLASS(dev_txid_evt, + TP_PROTO(struct mt76x2_dev *dev, u8 wcid, u8 pktid), + TP_ARGS(dev, wcid, pktid), + TP_STRUCT__entry( + DEV_ENTRY + TXID_ENTRY + ), + TP_fast_assign( + DEV_ASSIGN; + TXID_ASSIGN; + ), + TP_printk( + DEV_PR_FMT TXID_PR_FMT, + DEV_PR_ARG, TXID_PR_ARG + ) +); + +DEFINE_EVENT(dev_evt, mac_txstat_poll, + TP_PROTO(struct mt76x2_dev *dev), + TP_ARGS(dev) +); + +DEFINE_EVENT(dev_txid_evt, mac_txdone_add, + TP_PROTO(struct mt76x2_dev *dev, u8 wcid, u8 pktid), + TP_ARGS(dev, wcid, pktid) +); + +TRACE_EVENT(mac_txstat_fetch, + TP_PROTO(struct mt76x2_dev *dev, + struct mt76x2_tx_status *stat), + + TP_ARGS(dev, stat), + + TP_STRUCT__entry( + DEV_ENTRY + TXID_ENTRY + __field(bool, success) + __field(bool, aggr) + __field(bool, ack_req) + __field(u16, rate) + __field(u8, retry) + ), + + TP_fast_assign( + DEV_ASSIGN; + __entry->success = stat->success; + __entry->aggr = stat->aggr; + __entry->ack_req = stat->ack_req; + __entry->wcid = stat->wcid; + __entry->pktid = stat->pktid; + __entry->rate = stat->rate; + __entry->retry = stat->retry; + ), + + TP_printk( + DEV_PR_FMT TXID_PR_FMT + " success:%d aggr:%d ack_req:%d" + " rate:%04x retry:%d", + DEV_PR_ARG, TXID_PR_ARG, + __entry->success, __entry->aggr, __entry->ack_req, + __entry->rate, __entry->retry + ) +); + + +TRACE_EVENT(dev_irq, + TP_PROTO(struct mt76x2_dev *dev, u32 val, u32 mask), + + TP_ARGS(dev, val, mask), + + TP_STRUCT__entry( + DEV_ENTRY + __field(u32, val) + __field(u32, mask) + ), + + TP_fast_assign( + DEV_ASSIGN; + __entry->val = val; + __entry->mask = mask; + ), + + TP_printk( + DEV_PR_FMT " %08x & %08x", + DEV_PR_ARG, __entry->val, __entry->mask + ) +); + +#endif + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE mt76x2_trace + +#include <trace/define_trace.h> diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c new file mode 100644 index 000000000000..534e4bf9a34c --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c @@ -0,0 +1,260 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "mt76x2.h" +#include "mt76x2_dma.h" + +struct beacon_bc_data { + struct mt76x2_dev *dev; + struct sk_buff_head q; + struct sk_buff *tail[8]; +}; + +void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct mt76x2_dev *dev = hw->priv; + struct ieee80211_vif *vif = info->control.vif; + struct mt76_wcid *wcid = &dev->global_wcid; + + if (control->sta) { + struct mt76x2_sta *msta; + + msta = (struct mt76x2_sta *) control->sta->drv_priv; + wcid = &msta->wcid; + } + + if (vif || (!info->control.hw_key && wcid->hw_key_idx != -1)) { + struct mt76x2_vif *mvif; + + mvif = (struct mt76x2_vif *) vif->drv_priv; + wcid = &mvif->group_wcid; + } + + mt76_tx(&dev->mt76, control->sta, wcid, skb); +} + +void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + if (info->flags & IEEE80211_TX_CTL_AMPDU) { + ieee80211_free_txskb(mt76_hw(dev), skb); + } else { + ieee80211_tx_info_clear_status(info); + info->status.rates[0].idx = -1; + info->flags |= IEEE80211_TX_STAT_ACK; + ieee80211_tx_status(mt76_hw(dev), skb); + } +} + +s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev, + const struct ieee80211_tx_rate *rate) +{ + s8 max_txpwr; + + if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { + u8 mcs = ieee80211_rate_get_vht_mcs(rate); + + if (mcs == 8 || mcs == 9) { + max_txpwr = dev->rate_power.vht[8]; + } else { + u8 nss, idx; + + nss = ieee80211_rate_get_vht_nss(rate); + idx = ((nss - 1) << 3) + mcs; + max_txpwr = dev->rate_power.ht[idx & 0xf]; + } + } else if (rate->flags & IEEE80211_TX_RC_MCS) { + max_txpwr = dev->rate_power.ht[rate->idx & 0xf]; + } else { + enum nl80211_band band = dev->mt76.chandef.chan->band; + + if (band == NL80211_BAND_2GHZ) { + const struct ieee80211_rate *r; + struct wiphy *wiphy = mt76_hw(dev)->wiphy; + struct mt76_rate_power *rp = &dev->rate_power; + + r = &wiphy->bands[band]->bitrates[rate->idx]; + if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE) + max_txpwr = rp->cck[r->hw_value & 0x3]; + else + max_txpwr = rp->ofdm[r->hw_value & 0x7]; + } else { + max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7]; + } + } + + return max_txpwr; +} + +s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj) +{ + txpwr = min_t(s8, txpwr, dev->txpower_conf); + txpwr -= (dev->target_power + dev->target_power_delta[0]); + txpwr = min_t(s8, txpwr, max_txpwr_adj); + + if (!dev->enable_tpc) + return 0; + else if (txpwr >= 0) + return min_t(s8, txpwr, 7); + else + return (txpwr < -16) ? 8 : (txpwr + 32) / 2; +} + +void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr) +{ + s8 txpwr_adj; + + txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, txpwr, + dev->rate_power.ofdm[4]); + mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG, + MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj); + mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG, + MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj); +} + +static int mt76x2_insert_hdr_pad(struct sk_buff *skb) +{ + int len = ieee80211_get_hdrlen_from_skb(skb); + + if (len % 4 == 0) + return 0; + + skb_push(skb, 2); + memmove(skb->data, skb->data + 2, len); + + skb->data[len] = 0; + skb->data[len + 1] = 0; + return 2; +} + +int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi, + struct sk_buff *skb, struct mt76_queue *q, + struct mt76_wcid *wcid, struct ieee80211_sta *sta, + u32 *tx_info) +{ + struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int qsel = MT_QSEL_EDCA; + int ret; + + if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128) + mt76x2_mac_wcid_set_drop(dev, wcid->idx, false); + + mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta); + + ret = mt76x2_insert_hdr_pad(skb); + if (ret < 0) + return ret; + + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) + qsel = MT_QSEL_MGMT; + + *tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) | + MT_TXD_INFO_80211; + + if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv) + *tx_info |= MT_TXD_INFO_WIV; + + return 0; +} + +static void +mt76x2_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + struct mt76x2_dev *dev = (struct mt76x2_dev *) priv; + struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv; + struct sk_buff *skb = NULL; + + if (!(dev->beacon_mask & BIT(mvif->idx))) + return; + + skb = ieee80211_beacon_get(mt76_hw(dev), vif); + if (!skb) + return; + + mt76x2_mac_set_beacon(dev, mvif->idx, skb); +} + +static void +mt76x2_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + struct beacon_bc_data *data = priv; + struct mt76x2_dev *dev = data->dev; + struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv; + struct ieee80211_tx_info *info; + struct sk_buff *skb; + + if (!(dev->beacon_mask & BIT(mvif->idx))) + return; + + skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif); + if (!skb) + return; + + info = IEEE80211_SKB_CB(skb); + info->control.vif = vif; + info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; + mt76_skb_set_moredata(skb, true); + __skb_queue_tail(&data->q, skb); + data->tail[mvif->idx] = skb; +} + +void mt76x2_pre_tbtt_tasklet(unsigned long arg) +{ + struct mt76x2_dev *dev = (struct mt76x2_dev *) arg; + struct mt76_queue *q = &dev->mt76.q_tx[MT_TXQ_PSD]; + struct beacon_bc_data data = {}; + struct sk_buff *skb; + int i, nframes; + + data.dev = dev; + __skb_queue_head_init(&data.q); + + ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), + IEEE80211_IFACE_ITER_RESUME_ALL, + mt76x2_update_beacon_iter, dev); + + do { + nframes = skb_queue_len(&data.q); + ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), + IEEE80211_IFACE_ITER_RESUME_ALL, + mt76x2_add_buffered_bc, &data); + } while (nframes != skb_queue_len(&data.q)); + + if (!nframes) + return; + + for (i = 0; i < ARRAY_SIZE(data.tail); i++) { + if (!data.tail[i]) + continue; + + mt76_skb_set_moredata(data.tail[i], false); + } + + spin_lock_bh(&q->lock); + while ((skb = __skb_dequeue(&data.q)) != NULL) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_vif *vif = info->control.vif; + struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv; + + mt76_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid, NULL); + } + spin_unlock_bh(&q->lock); +} + diff --git a/drivers/net/wireless/mediatek/mt76/trace.c b/drivers/net/wireless/mediatek/mt76/trace.c new file mode 100644 index 000000000000..ea4ab8729ae4 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/trace.c @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/module.h> + +#ifndef __CHECKER__ +#define CREATE_TRACE_POINTS +#include "trace.h" + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/trace.h b/drivers/net/wireless/mediatek/mt76/trace.h new file mode 100644 index 000000000000..ea30895933c5 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/trace.h @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#if !defined(__MT76_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define __MT76_TRACE_H + +#include <linux/tracepoint.h> +#include "mt76.h" + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mt76 + +#define MAXNAME 32 +#define DEV_ENTRY __array(char, wiphy_name, 32) +#define DEV_ASSIGN strlcpy(__entry->wiphy_name, wiphy_name(dev->hw->wiphy), MAXNAME) +#define DEV_PR_FMT "%s" +#define DEV_PR_ARG __entry->wiphy_name + +#define REG_ENTRY __field(u32, reg) __field(u32, val) +#define REG_ASSIGN __entry->reg = reg; __entry->val = val +#define REG_PR_FMT " %04x=%08x" +#define REG_PR_ARG __entry->reg, __entry->val + +DECLARE_EVENT_CLASS(dev_reg_evt, + TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val), + TP_ARGS(dev, reg, val), + TP_STRUCT__entry( + DEV_ENTRY + REG_ENTRY + ), + TP_fast_assign( + DEV_ASSIGN; + REG_ASSIGN; + ), + TP_printk( + DEV_PR_FMT REG_PR_FMT, + DEV_PR_ARG, REG_PR_ARG + ) +); + +DEFINE_EVENT(dev_reg_evt, reg_rr, + TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val), + TP_ARGS(dev, reg, val) +); + +DEFINE_EVENT(dev_reg_evt, reg_wr, + TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val), + TP_ARGS(dev, reg, val) +); + +#endif + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace + +#include <trace/define_trace.h> diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c new file mode 100644 index 000000000000..4eef69bd8a9e --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -0,0 +1,511 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "mt76.h" + +static struct mt76_txwi_cache * +mt76_alloc_txwi(struct mt76_dev *dev) +{ + struct mt76_txwi_cache *t; + dma_addr_t addr; + int size; + + size = (sizeof(*t) + L1_CACHE_BYTES - 1) & ~(L1_CACHE_BYTES - 1); + t = devm_kzalloc(dev->dev, size, GFP_ATOMIC); + if (!t) + return NULL; + + addr = dma_map_single(dev->dev, &t->txwi, sizeof(t->txwi), + DMA_TO_DEVICE); + t->dma_addr = addr; + + return t; +} + +static struct mt76_txwi_cache * +__mt76_get_txwi(struct mt76_dev *dev) +{ + struct mt76_txwi_cache *t = NULL; + + spin_lock_bh(&dev->lock); + if (!list_empty(&dev->txwi_cache)) { + t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache, + list); + list_del(&t->list); + } + spin_unlock_bh(&dev->lock); + + return t; +} + +static struct mt76_txwi_cache * +mt76_get_txwi(struct mt76_dev *dev) +{ + struct mt76_txwi_cache *t = __mt76_get_txwi(dev); + + if (t) + return t; + + return mt76_alloc_txwi(dev); +} + +void +mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) +{ + if (!t) + return; + + spin_lock_bh(&dev->lock); + list_add(&t->list, &dev->txwi_cache); + spin_unlock_bh(&dev->lock); +} + +void mt76_tx_free(struct mt76_dev *dev) +{ + struct mt76_txwi_cache *t; + + while ((t = __mt76_get_txwi(dev)) != NULL) + dma_unmap_single(dev->dev, t->dma_addr, sizeof(t->txwi), + DMA_TO_DEVICE); +} + +static int +mt76_txq_get_qid(struct ieee80211_txq *txq) +{ + if (!txq->sta) + return MT_TXQ_BE; + + return txq->ac; +} + +int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, + struct sk_buff *skb, struct mt76_wcid *wcid, + struct ieee80211_sta *sta) +{ + struct mt76_queue_entry e; + struct mt76_txwi_cache *t; + struct mt76_queue_buf buf[32]; + struct sk_buff *iter; + dma_addr_t addr; + int len; + u32 tx_info = 0; + int n, ret; + + t = mt76_get_txwi(dev); + if (!t) { + ieee80211_free_txskb(dev->hw, skb); + return -ENOMEM; + } + + dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi), + DMA_TO_DEVICE); + ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta, + &tx_info); + dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi), + DMA_TO_DEVICE); + if (ret < 0) + goto free; + + len = skb->len - skb->data_len; + addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(dev->dev, addr)) { + ret = -ENOMEM; + goto free; + } + + n = 0; + buf[n].addr = t->dma_addr; + buf[n++].len = dev->drv->txwi_size; + buf[n].addr = addr; + buf[n++].len = len; + + skb_walk_frags(skb, iter) { + if (n == ARRAY_SIZE(buf)) + goto unmap; + + addr = dma_map_single(dev->dev, iter->data, iter->len, + DMA_TO_DEVICE); + if (dma_mapping_error(dev->dev, addr)) + goto unmap; + + buf[n].addr = addr; + buf[n++].len = iter->len; + } + + if (q->queued + (n + 1) / 2 >= q->ndesc - 1) + goto unmap; + + return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t); + +unmap: + ret = -ENOMEM; + for (n--; n > 0; n--) + dma_unmap_single(dev->dev, buf[n].addr, buf[n].len, + DMA_TO_DEVICE); + +free: + e.skb = skb; + e.txwi = t; + dev->drv->tx_complete_skb(dev, q, &e, true); + mt76_put_txwi(dev, t); + return ret; +} +EXPORT_SYMBOL_GPL(mt76_tx_queue_skb); + +void +mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, + struct mt76_wcid *wcid, struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct mt76_queue *q; + int qid = skb_get_queue_mapping(skb); + + if (WARN_ON(qid >= MT_TXQ_PSD)) { + qid = MT_TXQ_BE; + skb_set_queue_mapping(skb, qid); + } + + if (!wcid->tx_rate_set) + ieee80211_get_tx_rates(info->control.vif, sta, skb, + info->control.rates, 1); + + q = &dev->q_tx[qid]; + + spin_lock_bh(&q->lock); + mt76_tx_queue_skb(dev, q, skb, wcid, sta); + dev->queue_ops->kick(dev, q); + + if (q->queued > q->ndesc - 8) + ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb)); + spin_unlock_bh(&q->lock); +} +EXPORT_SYMBOL_GPL(mt76_tx); + +static struct sk_buff * +mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps) +{ + struct ieee80211_txq *txq = mtxq_to_txq(mtxq); + struct sk_buff *skb; + + skb = skb_dequeue(&mtxq->retry_q); + if (skb) { + u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; + + if (ps && skb_queue_empty(&mtxq->retry_q)) + ieee80211_sta_set_buffered(txq->sta, tid, false); + + return skb; + } + + skb = ieee80211_tx_dequeue(dev->hw, txq); + if (!skb) + return NULL; + + return skb; +} + +static void +mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + + if (!ieee80211_is_data_qos(hdr->frame_control)) + return; + + mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10; +} + +static void +mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta, + struct sk_buff *skb, bool last) +{ + struct mt76_wcid *wcid = (struct mt76_wcid *) sta->drv_priv; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD]; + + info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; + if (last) + info->flags |= IEEE80211_TX_STATUS_EOSP; + + mt76_skb_set_moredata(skb, !last); + mt76_tx_queue_skb(dev, hwq, skb, wcid, sta); +} + +void +mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, + u16 tids, int nframes, + enum ieee80211_frame_release_type reason, + bool more_data) +{ + struct mt76_dev *dev = hw->priv; + struct sk_buff *last_skb = NULL; + struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD]; + int i; + + spin_lock_bh(&hwq->lock); + for (i = 0; tids && nframes; i++, tids >>= 1) { + struct ieee80211_txq *txq = sta->txq[i]; + struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; + struct sk_buff *skb; + + if (!(tids & 1)) + continue; + + do { + skb = mt76_txq_dequeue(dev, mtxq, true); + if (!skb) + break; + + if (mtxq->aggr) + mt76_check_agg_ssn(mtxq, skb); + + nframes--; + if (last_skb) + mt76_queue_ps_skb(dev, sta, last_skb, false); + + last_skb = skb; + } while (nframes); + } + + if (last_skb) { + mt76_queue_ps_skb(dev, sta, last_skb, true); + dev->queue_ops->kick(dev, hwq); + } + spin_unlock_bh(&hwq->lock); +} +EXPORT_SYMBOL_GPL(mt76_release_buffered_frames); + +static int +mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq, + struct mt76_txq *mtxq, bool *empty) +{ + struct ieee80211_txq *txq = mtxq_to_txq(mtxq); + struct ieee80211_tx_info *info; + struct mt76_wcid *wcid = mtxq->wcid; + struct sk_buff *skb; + int n_frames = 1, limit; + struct ieee80211_tx_rate tx_rate; + bool ampdu; + bool probe; + int idx; + + skb = mt76_txq_dequeue(dev, mtxq, false); + if (!skb) { + *empty = true; + return 0; + } + + info = IEEE80211_SKB_CB(skb); + if (!wcid->tx_rate_set) + ieee80211_get_tx_rates(txq->vif, txq->sta, skb, + info->control.rates, 1); + tx_rate = info->control.rates[0]; + + probe = (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); + ampdu = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU; + limit = ampdu ? 16 : 3; + + if (ampdu) + mt76_check_agg_ssn(mtxq, skb); + + idx = mt76_tx_queue_skb(dev, hwq, skb, wcid, txq->sta); + + if (idx < 0) + return idx; + + do { + bool cur_ampdu; + + if (probe) + break; + + if (test_bit(MT76_SCANNING, &dev->state) || + test_bit(MT76_RESET, &dev->state)) + return -EBUSY; + + skb = mt76_txq_dequeue(dev, mtxq, false); + if (!skb) { + *empty = true; + break; + } + + info = IEEE80211_SKB_CB(skb); + cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; + + if (ampdu != cur_ampdu || + (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { + skb_queue_tail(&mtxq->retry_q, skb); + break; + } + + info->control.rates[0] = tx_rate; + + if (cur_ampdu) + mt76_check_agg_ssn(mtxq, skb); + + idx = mt76_tx_queue_skb(dev, hwq, skb, wcid, txq->sta); + if (idx < 0) + return idx; + + n_frames++; + } while (n_frames < limit); + + if (!probe) { + hwq->swq_queued++; + hwq->entry[idx].schedule = true; + } + + dev->queue_ops->kick(dev, hwq); + + return n_frames; +} + +static int +mt76_txq_schedule_list(struct mt76_dev *dev, struct mt76_queue *hwq) +{ + struct mt76_txq *mtxq, *mtxq_last; + int len = 0; + +restart: + mtxq_last = list_last_entry(&hwq->swq, struct mt76_txq, list); + while (!list_empty(&hwq->swq)) { + bool empty = false; + int cur; + + mtxq = list_first_entry(&hwq->swq, struct mt76_txq, list); + if (mtxq->send_bar && mtxq->aggr) { + struct ieee80211_txq *txq = mtxq_to_txq(mtxq); + struct ieee80211_sta *sta = txq->sta; + struct ieee80211_vif *vif = txq->vif; + u16 agg_ssn = mtxq->agg_ssn; + u8 tid = txq->tid; + + mtxq->send_bar = false; + spin_unlock_bh(&hwq->lock); + ieee80211_send_bar(vif, sta->addr, tid, agg_ssn); + spin_lock_bh(&hwq->lock); + goto restart; + } + + list_del_init(&mtxq->list); + + cur = mt76_txq_send_burst(dev, hwq, mtxq, &empty); + if (!empty) + list_add_tail(&mtxq->list, &hwq->swq); + + if (cur < 0) + return cur; + + len += cur; + + if (mtxq == mtxq_last) + break; + } + + return len; +} + +void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq) +{ + int len; + + do { + if (hwq->swq_queued >= 4 || list_empty(&hwq->swq)) + break; + + len = mt76_txq_schedule_list(dev, hwq); + } while (len > 0); +} +EXPORT_SYMBOL_GPL(mt76_txq_schedule); + +void mt76_txq_schedule_all(struct mt76_dev *dev) +{ + int i; + + for (i = 0; i <= MT_TXQ_BK; i++) { + struct mt76_queue *q = &dev->q_tx[i]; + + spin_lock_bh(&q->lock); + mt76_txq_schedule(dev, q); + spin_unlock_bh(&q->lock); + } +} +EXPORT_SYMBOL_GPL(mt76_txq_schedule_all); + +void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, + bool send_bar) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { + struct ieee80211_txq *txq = sta->txq[i]; + struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; + + spin_lock_bh(&mtxq->hwq->lock); + mtxq->send_bar = mtxq->aggr && send_bar; + if (!list_empty(&mtxq->list)) + list_del_init(&mtxq->list); + spin_unlock_bh(&mtxq->hwq->lock); + } +} +EXPORT_SYMBOL_GPL(mt76_stop_tx_queues); + +void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) +{ + struct mt76_dev *dev = hw->priv; + struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; + struct mt76_queue *hwq = mtxq->hwq; + + spin_lock_bh(&hwq->lock); + if (list_empty(&mtxq->list)) + list_add_tail(&mtxq->list, &hwq->swq); + mt76_txq_schedule(dev, hwq); + spin_unlock_bh(&hwq->lock); +} +EXPORT_SYMBOL_GPL(mt76_wake_tx_queue); + +void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq) +{ + struct mt76_txq *mtxq; + struct mt76_queue *hwq; + struct sk_buff *skb; + + if (!txq) + return; + + mtxq = (struct mt76_txq *) txq->drv_priv; + hwq = mtxq->hwq; + + spin_lock_bh(&hwq->lock); + if (!list_empty(&mtxq->list)) + list_del(&mtxq->list); + spin_unlock_bh(&hwq->lock); + + while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL) + ieee80211_free_txskb(dev->hw, skb); +} +EXPORT_SYMBOL_GPL(mt76_txq_remove); + +void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq) +{ + struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; + + INIT_LIST_HEAD(&mtxq->list); + skb_queue_head_init(&mtxq->retry_q); + + mtxq->hwq = &dev->q_tx[mt76_txq_get_qid(txq)]; +} +EXPORT_SYMBOL_GPL(mt76_txq_init); diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c new file mode 100644 index 000000000000..0c35b8db58cd --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/util.c @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/module.h> +#include "mt76.h" + +bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, + int timeout) +{ + u32 cur; + + timeout /= 10; + do { + cur = dev->bus->rr(dev, offset) & mask; + if (cur == val) + return true; + + udelay(10); + } while (timeout-- > 0); + + return false; +} +EXPORT_SYMBOL_GPL(__mt76_poll); + +bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, + int timeout) +{ + u32 cur; + + timeout /= 10; + do { + cur = dev->bus->rr(dev, offset) & mask; + if (cur == val) + return true; + + usleep_range(10000, 20000); + } while (timeout-- > 0); + + return false; +} +EXPORT_SYMBOL_GPL(__mt76_poll_msec); + +int mt76_wcid_alloc(unsigned long *mask, int size) +{ + int i, idx = 0, cur; + + for (i = 0; i < size / BITS_PER_LONG; i++) { + idx = ffs(~mask[i]); + if (!idx) + continue; + + idx--; + cur = i * BITS_PER_LONG + idx; + if (cur >= size) + break; + + mask[i] |= BIT(idx); + return cur; + } + + return -1; +} +EXPORT_SYMBOL_GPL(mt76_wcid_alloc); + +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/util.h b/drivers/net/wireless/mediatek/mt76/util.h new file mode 100644 index 000000000000..018d475504a2 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/util.h @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MT76_UTIL_H +#define __MT76_UTIL_H + +#include <linux/skbuff.h> +#include <linux/bitops.h> +#include <linux/bitfield.h> + +#define MT76_INCR(_var, _size) \ + _var = (((_var) + 1) % _size) + +int mt76_wcid_alloc(unsigned long *mask, int size); + +static inline void +mt76_wcid_free(unsigned long *mask, int idx) +{ + mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG); +} + +static inline void +mt76_skb_set_moredata(struct sk_buff *skb, bool enable) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + + if (enable) + hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); + else + hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA); +} + +#endif diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index 6711e7fb6926..0398bece5782 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -81,6 +81,41 @@ qtnf_mgmt_stypes[NUM_NL80211_IFTYPES] = { }; static int +qtnf_validate_iface_combinations(struct wiphy *wiphy, + struct qtnf_vif *change_vif, + enum nl80211_iftype new_type) +{ + struct qtnf_wmac *mac; + struct qtnf_vif *vif; + int i; + int ret = 0; + struct iface_combination_params params = { + .num_different_channels = 1, + }; + + mac = wiphy_priv(wiphy); + if (!mac) + return -EFAULT; + + for (i = 0; i < QTNF_MAX_INTF; i++) { + vif = &mac->iflist[i]; + if (vif->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED) + params.iftype_num[vif->wdev.iftype]++; + } + + if (change_vif) { + params.iftype_num[new_type]++; + params.iftype_num[change_vif->wdev.iftype]--; + } else { + params.iftype_num[new_type]++; + } + + ret = cfg80211_check_combinations(wiphy, ¶ms); + + return ret; +} + +static int qtnf_change_virtual_intf(struct wiphy *wiphy, struct net_device *dev, enum nl80211_iftype type, @@ -90,6 +125,13 @@ qtnf_change_virtual_intf(struct wiphy *wiphy, u8 *mac_addr; int ret; + ret = qtnf_validate_iface_combinations(wiphy, vif, type); + if (ret) { + pr_err("VIF%u.%u combination check: failed to set type %d\n", + vif->mac->macid, vif->vifid, type); + return ret; + } + if (params) mac_addr = params->macaddr; else @@ -120,10 +162,6 @@ int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) qtnf_scan_done(vif->mac, true); - if (qtnf_cmd_send_del_intf(vif)) - pr_err("VIF%u.%u: failed to delete VIF\n", vif->mac->macid, - vif->vifid); - /* Stop data */ netif_tx_stop_all_queues(netdev); if (netif_carrier_ok(netdev)) @@ -132,6 +170,10 @@ int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) if (netdev->reg_state == NETREG_REGISTERED) unregister_netdevice(netdev); + if (qtnf_cmd_send_del_intf(vif)) + pr_err("VIF%u.%u: failed to delete VIF\n", vif->mac->macid, + vif->vifid); + vif->netdev->ieee80211_ptr = NULL; vif->netdev = NULL; vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; @@ -150,12 +192,20 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy, struct qtnf_wmac *mac; struct qtnf_vif *vif; u8 *mac_addr = NULL; + int ret; mac = wiphy_priv(wiphy); if (!mac) return ERR_PTR(-EFAULT); + ret = qtnf_validate_iface_combinations(wiphy, NULL, type); + if (ret) { + pr_err("MAC%u invalid combination: failed to add type %d\n", + mac->macid, type); + return ERR_PTR(ret); + } + switch (type) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_AP: @@ -190,7 +240,7 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy, goto err_mac; } - if (qtnf_core_net_attach(mac, vif, name, name_assign_t, type)) { + if (qtnf_core_net_attach(mac, vif, name, name_assign_t)) { pr_err("VIF%u.%u: failed to attach netdev\n", mac->macid, vif->vifid); goto err_net; @@ -381,6 +431,7 @@ qtnf_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, const struct ieee80211_mgmt *mgmt_frame = (void *)params->buf; u32 short_cookie = prandom_u32(); u16 flags = 0; + u16 freq; *cookie = short_cookie; @@ -393,13 +444,21 @@ qtnf_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, if (params->dont_wait_for_ack) flags |= QLINK_MGMT_FRAME_TX_FLAG_ACK_NOWAIT; + /* If channel is not specified, pass "freq = 0" to tell device + * firmware to use current channel. + */ + if (params->chan) + freq = params->chan->center_freq; + else + freq = 0; + pr_debug("%s freq:%u; FC:%.4X; DA:%pM; len:%zu; C:%.8X; FL:%.4X\n", - wdev->netdev->name, params->chan->center_freq, + wdev->netdev->name, freq, le16_to_cpu(mgmt_frame->frame_control), mgmt_frame->da, params->len, short_cookie, flags); return qtnf_cmd_send_mgmt_frame(vif, short_cookie, flags, - params->chan->center_freq, + freq, params->buf, params->len); } @@ -409,6 +468,7 @@ qtnf_get_station(struct wiphy *wiphy, struct net_device *dev, { struct qtnf_vif *vif = qtnf_netdev_get_priv(dev); + sinfo->generation = vif->generation; return qtnf_cmd_get_sta_info(vif, mac, sinfo); } @@ -430,11 +490,13 @@ qtnf_dump_station(struct wiphy *wiphy, struct net_device *dev, ret = qtnf_cmd_get_sta_info(vif, sta_node->mac_addr, sinfo); if (unlikely(ret == -ENOENT)) { - qtnf_sta_list_del(&vif->sta_list, mac); + qtnf_sta_list_del(vif, mac); cfg80211_del_sta(vif->netdev, mac, GFP_KERNEL); sinfo->filled = 0; } + sinfo->generation = vif->generation; + return ret; } @@ -533,19 +595,13 @@ qtnf_del_station(struct wiphy *wiphy, struct net_device *dev, return ret; } -static void qtnf_scan_timeout(struct timer_list *t) -{ - struct qtnf_wmac *mac = from_timer(mac, t, scan_timeout); - - pr_warn("mac%d scan timed out\n", mac->macid); - qtnf_scan_done(mac, true); -} - static int qtnf_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request) { struct qtnf_wmac *mac = wiphy_priv(wiphy); + cancel_delayed_work_sync(&mac->scan_timeout); + mac->scan_req = request; if (qtnf_cmd_send_scan(mac)) { @@ -554,9 +610,8 @@ qtnf_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request) return -EFAULT; } - mac->scan_timeout.function = qtnf_scan_timeout; - mod_timer(&mac->scan_timeout, - jiffies + QTNF_SCAN_TIMEOUT_SEC * HZ); + queue_delayed_work(mac->bus->workqueue, &mac->scan_timeout, + QTNF_SCAN_TIMEOUT_SEC * HZ); return 0; } @@ -617,7 +672,6 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev, return ret; } - vif->sta_state = QTNF_STA_DISCONNECTED; return 0; } @@ -717,7 +771,8 @@ qtnf_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev, } if (!cfg80211_chandef_valid(chandef)) { - pr_err("%s: bad chan freq1=%u freq2=%u bw=%u\n", ndev->name, + pr_err("%s: bad channel freq=%u cf1=%u cf2=%u bw=%u\n", + ndev->name, chandef->chan->center_freq, chandef->center_freq1, chandef->center_freq2, chandef->width); ret = -ENODATA; @@ -750,6 +805,35 @@ static int qtnf_channel_switch(struct wiphy *wiphy, struct net_device *dev, return ret; } +static int qtnf_start_radar_detection(struct wiphy *wiphy, + struct net_device *ndev, + struct cfg80211_chan_def *chandef, + u32 cac_time_ms) +{ + struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev); + int ret; + + ret = qtnf_cmd_start_cac(vif, chandef, cac_time_ms); + if (ret) + pr_err("%s: failed to start CAC ret=%d\n", ndev->name, ret); + + return ret; +} + +static int qtnf_set_mac_acl(struct wiphy *wiphy, + struct net_device *dev, + const struct cfg80211_acl_data *params) +{ + struct qtnf_vif *vif = qtnf_netdev_get_priv(dev); + int ret; + + ret = qtnf_cmd_set_mac_acl(vif, params); + if (ret) + pr_err("%s: failed to set mac ACL ret=%d\n", dev->name, ret); + + return ret; +} + static struct cfg80211_ops qtn_cfg80211_ops = { .add_virtual_intf = qtnf_add_virtual_intf, .change_virtual_intf = qtnf_change_virtual_intf, @@ -773,7 +857,9 @@ static struct cfg80211_ops qtn_cfg80211_ops = { .disconnect = qtnf_disconnect, .dump_survey = qtnf_dump_survey, .get_channel = qtnf_get_channel, - .channel_switch = qtnf_channel_switch + .channel_switch = qtnf_channel_switch, + .start_radar_detection = qtnf_start_radar_detection, + .set_mac_acl = qtnf_set_mac_acl, }; static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in, @@ -802,6 +888,9 @@ static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in, continue; mac = bus->mac[mac_idx]; + if (!mac) + continue; + wiphy = priv_to_wiphy(mac); for (band = 0; band < NUM_NL80211_BANDS; ++band) { @@ -829,29 +918,29 @@ struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus) return wiphy; } -static int qtnf_wiphy_setup_if_comb(struct wiphy *wiphy, - struct ieee80211_iface_combination *if_comb, - const struct qtnf_mac_info *mac_info) +static int +qtnf_wiphy_setup_if_comb(struct wiphy *wiphy, struct qtnf_mac_info *mac_info) { - size_t max_interfaces = 0; + struct ieee80211_iface_combination *if_comb; + size_t n_if_comb; u16 interface_modes = 0; - size_t i; + size_t i, j; + + if_comb = mac_info->if_comb; + n_if_comb = mac_info->n_if_comb; - if (unlikely(!mac_info->limits || !mac_info->n_limits)) + if (!if_comb || !n_if_comb) return -ENOENT; - if_comb->limits = mac_info->limits; - if_comb->n_limits = mac_info->n_limits; + for (i = 0; i < n_if_comb; i++) { + if_comb[i].radar_detect_widths = mac_info->radar_detect_widths; - for (i = 0; i < mac_info->n_limits; i++) { - max_interfaces += mac_info->limits[i].max; - interface_modes |= mac_info->limits[i].types; + for (j = 0; j < if_comb[i].n_limits; j++) + interface_modes |= if_comb[i].limits[j].types; } - if_comb->num_different_channels = 1; - if_comb->beacon_int_infra_match = true; - if_comb->max_interfaces = max_interfaces; - if_comb->radar_detect_widths = mac_info->radar_detect_widths; + wiphy->iface_combinations = if_comb; + wiphy->n_iface_combinations = n_if_comb; wiphy->interface_modes = interface_modes; return 0; @@ -860,7 +949,6 @@ static int qtnf_wiphy_setup_if_comb(struct wiphy *wiphy, int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) { struct wiphy *wiphy = priv_to_wiphy(mac); - struct ieee80211_iface_combination *iface_comb = NULL; int ret; if (!wiphy) { @@ -868,14 +956,6 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) return -EFAULT; } - iface_comb = kzalloc(sizeof(*iface_comb), GFP_KERNEL); - if (!iface_comb) - return -ENOMEM; - - ret = qtnf_wiphy_setup_if_comb(wiphy, iface_comb, &mac->macinfo); - if (ret) - goto out; - wiphy->frag_threshold = mac->macinfo.frag_thr; wiphy->rts_threshold = mac->macinfo.rts_thr; wiphy->retry_short = mac->macinfo.sretry_limit; @@ -886,11 +966,13 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) wiphy->max_scan_ie_len = QTNF_MAX_VSIE_LEN; wiphy->mgmt_stypes = qtnf_mgmt_stypes; wiphy->max_remain_on_channel_duration = 5000; - - wiphy->iface_combinations = iface_comb; - wiphy->n_iface_combinations = 1; + wiphy->max_acl_mac_addrs = mac->macinfo.max_acl_mac_addrs; wiphy->max_num_csa_counters = 2; + ret = qtnf_wiphy_setup_if_comb(wiphy, &mac->macinfo); + if (ret) + goto out; + /* Initialize cipher suits */ wiphy->cipher_suites = qtnf_cipher_suites; wiphy->n_cipher_suites = ARRAY_SIZE(qtnf_cipher_suites); @@ -924,6 +1006,10 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; } + strlcpy(wiphy->fw_version, hw_info->fw_version, + sizeof(wiphy->fw_version)); + wiphy->hw_version = hw_info->hw_version; + ret = wiphy_register(wiphy); if (ret < 0) goto out; @@ -935,12 +1021,7 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) ret = regulatory_hint(wiphy, hw_info->rd->alpha2); out: - if (ret) { - kfree(iface_comb); - return ret; - } - - return 0; + return ret; } void qtnf_netdev_updown(struct net_device *ndev, bool up) diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h index 66db26613b1f..b73425122a10 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h @@ -28,23 +28,4 @@ void qtnf_band_init_rates(struct ieee80211_supported_band *band); void qtnf_band_setup_htvht_caps(struct qtnf_mac_info *macinfo, struct ieee80211_supported_band *band); -static inline void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted) -{ - struct cfg80211_scan_info info = { - .aborted = aborted, - }; - - if (timer_pending(&mac->scan_timeout)) - del_timer_sync(&mac->scan_timeout); - - mutex_lock(&mac->mac_lock); - - if (mac->scan_req) { - cfg80211_scan_done(mac->scan_req, &info); - mac->scan_req = NULL; - } - - mutex_unlock(&mac->mac_lock); -} - #endif /* _QTN_FMAC_CFG80211_H_ */ diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index 8bc8dd637315..deca0060eb27 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -162,6 +162,14 @@ static void qtnf_cmd_tlv_ie_set_add(struct sk_buff *cmd_skb, u8 frame_type, memcpy(tlv->ie_data, buf, len); } +static inline size_t qtnf_cmd_acl_data_size(const struct cfg80211_acl_data *acl) +{ + size_t size = sizeof(struct qlink_acl_data) + + acl->n_acl_entries * sizeof(struct qlink_mac_address); + + return size; +} + static bool qtnf_cmd_start_ap_can_fit(const struct qtnf_vif *vif, const struct cfg80211_ap_settings *s) { @@ -178,6 +186,10 @@ static bool qtnf_cmd_start_ap_can_fit(const struct qtnf_vif *vif, if (cfg80211_chandef_valid(&s->chandef)) len += sizeof(struct qlink_tlv_chandef); + if (s->acl) + len += sizeof(struct qlink_tlv_hdr) + + qtnf_cmd_acl_data_size(s->acl); + if (len > (sizeof(struct qlink_cmd) + QTNF_MAX_CMD_BUF_SIZE)) { pr_err("VIF%u.%u: can not fit AP settings: %u\n", vif->mac->macid, vif->vifid, len); @@ -203,7 +215,7 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif, cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, QLINK_CMD_START_AP, sizeof(*cmd)); - if (unlikely(!cmd_skb)) + if (!cmd_skb) return -ENOMEM; cmd = (struct qlink_cmd_start_ap *)cmd_skb->data; @@ -247,7 +259,7 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif, chtlv->hdr.type = cpu_to_le16(QTN_TLV_ID_CHANDEF); chtlv->hdr.len = cpu_to_le16(sizeof(*chtlv) - sizeof(chtlv->hdr)); - qlink_chandef_cfg2q(&s->chandef, &chtlv->chan); + qlink_chandef_cfg2q(&s->chandef, &chtlv->chdef); } qtnf_cmd_tlv_ie_set_add(cmd_skb, QLINK_IE_SET_BEACON_HEAD, @@ -283,6 +295,16 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif, memcpy(tlv->val, s->vht_cap, sizeof(*s->vht_cap)); } + if (s->acl) { + size_t acl_size = qtnf_cmd_acl_data_size(s->acl); + struct qlink_tlv_hdr *tlv = + skb_put(cmd_skb, sizeof(*tlv) + acl_size); + + tlv->type = cpu_to_le16(QTN_TLV_ID_ACL_DATA); + tlv->len = cpu_to_le16(acl_size); + qlink_acl_data_cfg2q(s->acl, (struct qlink_acl_data *)tlv->val); + } + qtnf_bus_lock(vif->mac->bus); ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); @@ -313,7 +335,7 @@ int qtnf_cmd_send_stop_ap(struct qtnf_vif *vif) cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, QLINK_CMD_STOP_AP, sizeof(struct qlink_cmd)); - if (unlikely(!cmd_skb)) + if (!cmd_skb) return -ENOMEM; qtnf_bus_lock(vif->mac->bus); @@ -347,7 +369,7 @@ int qtnf_cmd_send_register_mgmt(struct qtnf_vif *vif, u16 frame_type, bool reg) cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, QLINK_CMD_REGISTER_MGMT, sizeof(*cmd)); - if (unlikely(!cmd_skb)) + if (!cmd_skb) return -ENOMEM; qtnf_bus_lock(vif->mac->bus); @@ -390,7 +412,7 @@ int qtnf_cmd_send_mgmt_frame(struct qtnf_vif *vif, u32 cookie, u16 flags, cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, QLINK_CMD_SEND_MGMT_FRAME, sizeof(*cmd)); - if (unlikely(!cmd_skb)) + if (!cmd_skb) return -ENOMEM; qtnf_bus_lock(vif->mac->bus); @@ -436,7 +458,7 @@ int qtnf_cmd_send_mgmt_set_appie(struct qtnf_vif *vif, u8 frame_type, cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, QLINK_CMD_MGMT_SET_APPIE, sizeof(struct qlink_cmd)); - if (unlikely(!cmd_skb)) + if (!cmd_skb) return -ENOMEM; qtnf_cmd_tlv_ie_set_add(cmd_skb, frame_type, buf, len); @@ -461,30 +483,8 @@ out: } static void -qtnf_sta_info_parse_basic_counters(struct station_info *sinfo, - const struct qlink_sta_stat_basic_counters *counters) -{ - sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES) | - BIT(NL80211_STA_INFO_TX_BYTES); - sinfo->rx_bytes = get_unaligned_le64(&counters->rx_bytes); - sinfo->tx_bytes = get_unaligned_le64(&counters->tx_bytes); - - sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS) | - BIT(NL80211_STA_INFO_TX_PACKETS) | - BIT(NL80211_STA_INFO_BEACON_RX); - sinfo->rx_packets = get_unaligned_le32(&counters->rx_packets); - sinfo->tx_packets = get_unaligned_le32(&counters->tx_packets); - sinfo->rx_beacon = get_unaligned_le64(&counters->rx_beacons); - - sinfo->filled |= BIT(NL80211_STA_INFO_RX_DROP_MISC) | - BIT(NL80211_STA_INFO_TX_FAILED); - sinfo->rx_dropped_misc = get_unaligned_le32(&counters->rx_dropped); - sinfo->tx_failed = get_unaligned_le32(&counters->tx_failed); -} - -static void qtnf_sta_info_parse_rate(struct rate_info *rate_dst, - const struct qlink_sta_info_rate *rate_src) + const struct qlink_sta_info_rate *rate_src) { rate_dst->legacy = get_unaligned_le16(&rate_src->rate) * 10; @@ -493,22 +493,23 @@ qtnf_sta_info_parse_rate(struct rate_info *rate_dst, rate_dst->flags = 0; switch (rate_src->bw) { - case QLINK_STA_INFO_RATE_BW_5: + case QLINK_CHAN_WIDTH_5: rate_dst->bw = RATE_INFO_BW_5; break; - case QLINK_STA_INFO_RATE_BW_10: + case QLINK_CHAN_WIDTH_10: rate_dst->bw = RATE_INFO_BW_10; break; - case QLINK_STA_INFO_RATE_BW_20: + case QLINK_CHAN_WIDTH_20: + case QLINK_CHAN_WIDTH_20_NOHT: rate_dst->bw = RATE_INFO_BW_20; break; - case QLINK_STA_INFO_RATE_BW_40: + case QLINK_CHAN_WIDTH_40: rate_dst->bw = RATE_INFO_BW_40; break; - case QLINK_STA_INFO_RATE_BW_80: + case QLINK_CHAN_WIDTH_80: rate_dst->bw = RATE_INFO_BW_80; break; - case QLINK_STA_INFO_RATE_BW_160: + case QLINK_CHAN_WIDTH_160: rate_dst->bw = RATE_INFO_BW_160; break; default: @@ -578,87 +579,125 @@ qtnf_sta_info_parse_flags(struct nl80211_sta_flag_update *dst, } static void -qtnf_sta_info_parse_generic_info(struct station_info *sinfo, - const struct qlink_sta_info_generic *info) +qtnf_cmd_sta_info_parse(struct station_info *sinfo, + const struct qlink_tlv_hdr *tlv, + size_t resp_size) { - sinfo->filled |= BIT(NL80211_STA_INFO_CONNECTED_TIME) | - BIT(NL80211_STA_INFO_INACTIVE_TIME); - sinfo->connected_time = get_unaligned_le32(&info->connected_time); - sinfo->inactive_time = get_unaligned_le32(&info->inactive_time); + const struct qlink_sta_stats *stats = NULL; + const u8 *map = NULL; + unsigned int map_len = 0; + unsigned int stats_len = 0; + u16 tlv_len; + +#define qtnf_sta_stat_avail(stat_name, bitn) \ + (qtnf_utils_is_bit_set(map, bitn, map_len) && \ + (offsetofend(struct qlink_sta_stats, stat_name) <= stats_len)) + + while (resp_size >= sizeof(*tlv)) { + tlv_len = le16_to_cpu(tlv->len); + + switch (le16_to_cpu(tlv->type)) { + case QTN_TLV_ID_STA_STATS_MAP: + map_len = tlv_len; + map = tlv->val; + break; + case QTN_TLV_ID_STA_STATS: + stats_len = tlv_len; + stats = (const struct qlink_sta_stats *)tlv->val; + break; + default: + break; + } - sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL) | - BIT(NL80211_STA_INFO_SIGNAL_AVG); - sinfo->signal = info->rssi - 120; - sinfo->signal_avg = info->rssi_avg - QLINK_RSSI_OFFSET; + resp_size -= tlv_len + sizeof(*tlv); + tlv = (const struct qlink_tlv_hdr *)(tlv->val + tlv_len); + } - if (info->rx_rate.rate) { + if (!map || !stats) + return; + + if (qtnf_sta_stat_avail(inactive_time, QLINK_STA_INFO_INACTIVE_TIME)) { + sinfo->filled |= BIT(NL80211_STA_INFO_INACTIVE_TIME); + sinfo->inactive_time = le32_to_cpu(stats->inactive_time); + } + + if (qtnf_sta_stat_avail(connected_time, + QLINK_STA_INFO_CONNECTED_TIME)) { + sinfo->filled |= BIT(NL80211_STA_INFO_CONNECTED_TIME); + sinfo->connected_time = le32_to_cpu(stats->connected_time); + } + + if (qtnf_sta_stat_avail(signal, QLINK_STA_INFO_SIGNAL)) { + sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); + sinfo->signal = stats->signal - QLINK_RSSI_OFFSET; + } + + if (qtnf_sta_stat_avail(signal_avg, QLINK_STA_INFO_SIGNAL_AVG)) { + sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG); + sinfo->signal_avg = stats->signal_avg - QLINK_RSSI_OFFSET; + } + + if (qtnf_sta_stat_avail(rxrate, QLINK_STA_INFO_RX_BITRATE)) { sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE); - qtnf_sta_info_parse_rate(&sinfo->rxrate, &info->rx_rate); + qtnf_sta_info_parse_rate(&sinfo->rxrate, &stats->rxrate); } - if (info->tx_rate.rate) { + if (qtnf_sta_stat_avail(txrate, QLINK_STA_INFO_TX_BITRATE)) { sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE); - qtnf_sta_info_parse_rate(&sinfo->txrate, &info->tx_rate); + qtnf_sta_info_parse_rate(&sinfo->txrate, &stats->txrate); } - sinfo->filled |= BIT(NL80211_STA_INFO_STA_FLAGS); - qtnf_sta_info_parse_flags(&sinfo->sta_flags, &info->state); -} + if (qtnf_sta_stat_avail(sta_flags, QLINK_STA_INFO_STA_FLAGS)) { + sinfo->filled |= BIT(NL80211_STA_INFO_STA_FLAGS); + qtnf_sta_info_parse_flags(&sinfo->sta_flags, &stats->sta_flags); + } -static int qtnf_cmd_sta_info_parse(struct station_info *sinfo, - const u8 *payload, size_t payload_size) -{ - const struct qlink_sta_stat_basic_counters *counters; - const struct qlink_sta_info_generic *sta_info; - u16 tlv_type; - u16 tlv_value_len; - size_t tlv_full_len; - const struct qlink_tlv_hdr *tlv; + if (qtnf_sta_stat_avail(rx_bytes, QLINK_STA_INFO_RX_BYTES)) { + sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES); + sinfo->rx_bytes = le64_to_cpu(stats->rx_bytes); + } - sinfo->filled = 0; + if (qtnf_sta_stat_avail(tx_bytes, QLINK_STA_INFO_TX_BYTES)) { + sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES); + sinfo->tx_bytes = le64_to_cpu(stats->tx_bytes); + } - tlv = (const struct qlink_tlv_hdr *)payload; - while (payload_size >= sizeof(struct qlink_tlv_hdr)) { - tlv_type = le16_to_cpu(tlv->type); - tlv_value_len = le16_to_cpu(tlv->len); - tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr); - if (tlv_full_len > payload_size) { - pr_warn("malformed TLV 0x%.2X; LEN: %u\n", - tlv_type, tlv_value_len); - return -EINVAL; - } - switch (tlv_type) { - case QTN_TLV_ID_STA_BASIC_COUNTERS: - if (unlikely(tlv_value_len < sizeof(*counters))) { - pr_err("invalid TLV size %.4X: %u\n", - tlv_type, tlv_value_len); - break; - } + if (qtnf_sta_stat_avail(rx_bytes, QLINK_STA_INFO_RX_BYTES64)) { + sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64); + sinfo->rx_bytes = le64_to_cpu(stats->rx_bytes); + } - counters = (void *)tlv->val; - qtnf_sta_info_parse_basic_counters(sinfo, counters); - break; - case QTN_TLV_ID_STA_GENERIC_INFO: - if (unlikely(tlv_value_len < sizeof(*sta_info))) - break; + if (qtnf_sta_stat_avail(tx_bytes, QLINK_STA_INFO_TX_BYTES64)) { + sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES64); + sinfo->tx_bytes = le64_to_cpu(stats->tx_bytes); + } - sta_info = (void *)tlv->val; - qtnf_sta_info_parse_generic_info(sinfo, sta_info); - break; - default: - pr_warn("unexpected TLV type: %.4X\n", tlv_type); - break; - } - payload_size -= tlv_full_len; - tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len); + if (qtnf_sta_stat_avail(rx_packets, QLINK_STA_INFO_RX_PACKETS)) { + sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS); + sinfo->rx_packets = le32_to_cpu(stats->rx_packets); } - if (payload_size) { - pr_warn("malformed TLV buf; bytes left: %zu\n", payload_size); - return -EINVAL; + if (qtnf_sta_stat_avail(tx_packets, QLINK_STA_INFO_TX_PACKETS)) { + sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS); + sinfo->tx_packets = le32_to_cpu(stats->tx_packets); } - return 0; + if (qtnf_sta_stat_avail(rx_beacon, QLINK_STA_INFO_BEACON_RX)) { + sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX); + sinfo->rx_beacon = le64_to_cpu(stats->rx_beacon); + } + + if (qtnf_sta_stat_avail(rx_dropped_misc, QLINK_STA_INFO_RX_DROP_MISC)) { + sinfo->filled |= BIT(NL80211_STA_INFO_RX_DROP_MISC); + sinfo->rx_dropped_misc = le32_to_cpu(stats->rx_dropped_misc); + } + + if (qtnf_sta_stat_avail(tx_failed, QLINK_STA_INFO_TX_FAILED)) { + sinfo->filled |= BIT(NL80211_STA_INFO_TX_FAILED); + sinfo->tx_failed = le32_to_cpu(stats->tx_failed); + } + +#undef qtnf_sta_stat_avail } int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac, @@ -674,8 +713,7 @@ int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac, cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, QLINK_CMD_GET_STA_INFO, sizeof(*cmd)); - - if (unlikely(!cmd_skb)) + if (!cmd_skb) return -ENOMEM; qtnf_bus_lock(vif->mac->bus); @@ -715,7 +753,9 @@ int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac, goto out; } - ret = qtnf_cmd_sta_info_parse(sinfo, resp->info, var_resp_len); + qtnf_cmd_sta_info_parse(sinfo, + (const struct qlink_tlv_hdr *)resp->info, + var_resp_len); out: qtnf_bus_unlock(vif->mac->bus); @@ -738,7 +778,7 @@ static int qtnf_cmd_send_add_change_intf(struct qtnf_vif *vif, cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, cmd_type, sizeof(*cmd)); - if (unlikely(!cmd_skb)) + if (!cmd_skb) return -ENOMEM; qtnf_bus_lock(vif->mac->bus); @@ -811,7 +851,7 @@ int qtnf_cmd_send_del_intf(struct qtnf_vif *vif) cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, QLINK_CMD_DEL_INTF, sizeof(*cmd)); - if (unlikely(!cmd_skb)) + if (!cmd_skb) return -ENOMEM; qtnf_bus_lock(vif->mac->bus); @@ -908,6 +948,16 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus, struct qtnf_hw_info *hwinfo = &bus->hw_info; const struct qlink_tlv_hdr *tlv; const struct qlink_tlv_reg_rule *tlv_rule; + const char *bld_name = NULL; + const char *bld_rev = NULL; + const char *bld_type = NULL; + const char *bld_label = NULL; + u32 bld_tmstamp = 0; + u32 plat_id = 0; + const char *hw_id = NULL; + const char *calibration_ver = NULL; + const char *uboot_ver = NULL; + u32 hw_ver = 0; struct ieee80211_reg_rule *rule; u16 tlv_type; u16 tlv_value_len; @@ -934,6 +984,10 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus, hwinfo->rd->alpha2[0] = resp->alpha2[0]; hwinfo->rd->alpha2[1] = resp->alpha2[1]; + bld_tmstamp = le32_to_cpu(resp->bld_tmstamp); + plat_id = le32_to_cpu(resp->plat_id); + hw_ver = le32_to_cpu(resp->hw_ver); + switch (resp->dfs_region) { case QLINK_DFS_FCC: hwinfo->rd->dfs_region = NL80211_DFS_FCC; @@ -994,6 +1048,27 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus, rule->flags = qtnf_cmd_resp_reg_rule_flags_parse( le32_to_cpu(tlv_rule->flags)); break; + case QTN_TLV_ID_BUILD_NAME: + bld_name = (const void *)tlv->val; + break; + case QTN_TLV_ID_BUILD_REV: + bld_rev = (const void *)tlv->val; + break; + case QTN_TLV_ID_BUILD_TYPE: + bld_type = (const void *)tlv->val; + break; + case QTN_TLV_ID_BUILD_LABEL: + bld_label = (const void *)tlv->val; + break; + case QTN_TLV_ID_HW_ID: + hw_id = (const void *)tlv->val; + break; + case QTN_TLV_ID_CALIBRATION_VER: + calibration_ver = (const void *)tlv->val; + break; + case QTN_TLV_ID_UBOOT_VER: + uboot_ver = (const void *)tlv->val; + break; default: break; } @@ -1016,21 +1091,46 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus, hwinfo->total_tx_chain, hwinfo->total_rx_chain, hwinfo->hw_capab); + pr_info("\nBuild name: %s" \ + "\nBuild revision: %s" \ + "\nBuild type: %s" \ + "\nBuild label: %s" \ + "\nBuild timestamp: %lu" \ + "\nPlatform ID: %lu" \ + "\nHardware ID: %s" \ + "\nCalibration version: %s" \ + "\nU-Boot version: %s" \ + "\nHardware version: 0x%08x", + bld_name, bld_rev, bld_type, bld_label, + (unsigned long)bld_tmstamp, + (unsigned long)plat_id, + hw_id, calibration_ver, uboot_ver, hw_ver); + + strlcpy(hwinfo->fw_version, bld_label, sizeof(hwinfo->fw_version)); + hwinfo->hw_version = hw_ver; + return 0; } static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac, const u8 *tlv_buf, size_t tlv_buf_size) { - struct ieee80211_iface_limit *limits = NULL; - const struct qlink_iface_limit *limit_record; - size_t record_count = 0, rec = 0; - u16 tlv_type, tlv_value_len; - struct qlink_iface_comb_num *comb; + struct ieee80211_iface_combination *comb = NULL; + size_t n_comb = 0; + struct ieee80211_iface_limit *limits; + const struct qlink_iface_comb_num *comb_num; + const struct qlink_iface_limit_record *rec; + const struct qlink_iface_limit *lim; + u16 rec_len; + u16 tlv_type; + u16 tlv_value_len; size_t tlv_full_len; const struct qlink_tlv_hdr *tlv; - - mac->macinfo.n_limits = 0; + u8 *ext_capa = NULL; + u8 *ext_capa_mask = NULL; + u8 ext_capa_len = 0; + u8 ext_capa_mask_len = 0; + int i = 0; tlv = (const struct qlink_tlv_hdr *)tlv_buf; while (tlv_buf_size >= sizeof(struct qlink_tlv_hdr)) { @@ -1045,52 +1145,89 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac, switch (tlv_type) { case QTN_TLV_ID_NUM_IFACE_COMB: - if (unlikely(tlv_value_len != sizeof(*comb))) + if (tlv_value_len != sizeof(*comb_num)) return -EINVAL; - comb = (void *)tlv->val; - record_count = le16_to_cpu(comb->iface_comb_num); + comb_num = (void *)tlv->val; + + /* free earlier iface comb memory */ + qtnf_mac_iface_comb_free(mac); - mac->macinfo.n_limits = record_count; - /* free earlier iface limits memory */ - kfree(mac->macinfo.limits); - mac->macinfo.limits = - kzalloc(sizeof(*mac->macinfo.limits) * - record_count, GFP_KERNEL); + mac->macinfo.n_if_comb = + le32_to_cpu(comb_num->iface_comb_num); - if (unlikely(!mac->macinfo.limits)) + mac->macinfo.if_comb = + kcalloc(mac->macinfo.n_if_comb, + sizeof(*mac->macinfo.if_comb), + GFP_KERNEL); + + if (!mac->macinfo.if_comb) return -ENOMEM; - limits = mac->macinfo.limits; + comb = mac->macinfo.if_comb; + + pr_debug("MAC%u: %zu iface combinations\n", + mac->macid, mac->macinfo.n_if_comb); + break; case QTN_TLV_ID_IFACE_LIMIT: - if (unlikely(!limits)) { - pr_warn("MAC%u: limits are not inited\n", + if (unlikely(!comb)) { + pr_warn("MAC%u: no combinations advertised\n", mac->macid); return -EINVAL; } - if (unlikely(tlv_value_len != sizeof(*limit_record))) { - pr_warn("MAC%u: record size mismatch\n", + if (n_comb >= mac->macinfo.n_if_comb) { + pr_warn("MAC%u: combinations count exceeded\n", mac->macid); - return -EINVAL; + n_comb++; + break; } - limit_record = (void *)tlv->val; - limits[rec].max = le16_to_cpu(limit_record->max_num); - limits[rec].types = qlink_iface_type_to_nl_mask( - le16_to_cpu(limit_record->type)); + rec = (void *)tlv->val; + rec_len = sizeof(*rec) + rec->n_limits * sizeof(*lim); + + if (unlikely(tlv_value_len != rec_len)) { + pr_warn("MAC%u: record %zu size mismatch\n", + mac->macid, n_comb); + return -EINVAL; + } - /* supported modes: STA, AP */ - limits[rec].types &= BIT(NL80211_IFTYPE_AP) | - BIT(NL80211_IFTYPE_AP_VLAN) | - BIT(NL80211_IFTYPE_STATION); + limits = kzalloc(sizeof(*limits) * rec->n_limits, + GFP_KERNEL); + if (!limits) + return -ENOMEM; - pr_debug("MAC%u: MAX: %u; TYPES: %.4X\n", mac->macid, - limits[rec].max, limits[rec].types); + comb[n_comb].num_different_channels = + rec->num_different_channels; + comb[n_comb].max_interfaces = + le16_to_cpu(rec->max_interfaces); + comb[n_comb].n_limits = rec->n_limits; + comb[n_comb].limits = limits; + + for (i = 0; i < rec->n_limits; i++) { + lim = &rec->limits[i]; + limits[i].max = le16_to_cpu(lim->max_num); + limits[i].types = + qlink_iface_type_to_nl_mask(le16_to_cpu(lim->type)); + pr_debug("MAC%u: comb[%zu]: MAX:%u TYPES:%.4X\n", + mac->macid, n_comb, + limits[i].max, limits[i].types); + } - if (limits[rec].types) - rec++; + n_comb++; + break; + case WLAN_EID_EXT_CAPABILITY: + if (unlikely(tlv_value_len > U8_MAX)) + return -EINVAL; + ext_capa = (u8 *)tlv->val; + ext_capa_len = tlv_value_len; + break; + case QTN_TLV_ID_EXT_CAPABILITY_MASK: + if (unlikely(tlv_value_len > U8_MAX)) + return -EINVAL; + ext_capa_mask = (u8 *)tlv->val; + ext_capa_mask_len = tlv_value_len; break; default: break; @@ -1106,12 +1243,40 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac, return -EINVAL; } - if (mac->macinfo.n_limits != rec) { + if (mac->macinfo.n_if_comb != n_comb) { pr_err("MAC%u: combination mismatch: reported=%zu parsed=%zu\n", - mac->macid, mac->macinfo.n_limits, rec); + mac->macid, mac->macinfo.n_if_comb, n_comb); + return -EINVAL; + } + + if (ext_capa_len != ext_capa_mask_len) { + pr_err("MAC%u: ext_capa/_mask lengths mismatch: %u != %u\n", + mac->macid, ext_capa_len, ext_capa_mask_len); return -EINVAL; } + if (ext_capa_len > 0) { + ext_capa = kmemdup(ext_capa, ext_capa_len, GFP_KERNEL); + if (!ext_capa) + return -ENOMEM; + + ext_capa_mask = + kmemdup(ext_capa_mask, ext_capa_mask_len, GFP_KERNEL); + if (!ext_capa_mask) { + kfree(ext_capa); + return -ENOMEM; + } + } else { + ext_capa = NULL; + ext_capa_mask = NULL; + } + + kfree(mac->macinfo.extended_capabilities); + kfree(mac->macinfo.extended_capabilities_mask); + mac->macinfo.extended_capabilities = ext_capa; + mac->macinfo.extended_capabilities_mask = ext_capa_mask; + mac->macinfo.extended_capabilities_len = ext_capa_len; + return 0; } @@ -1143,6 +1308,7 @@ qtnf_cmd_resp_proc_mac_info(struct qtnf_wmac *mac, mac_info->radar_detect_widths = qlink_chan_width_mask_to_nl(le16_to_cpu( resp_info->radar_detect_widths)); + mac_info->max_acl_mac_addrs = le32_to_cpu(resp_info->max_acl_mac_addrs); memcpy(&mac_info->ht_cap_mod_mask, &resp_info->ht_cap_mod_mask, sizeof(mac_info->ht_cap_mod_mask)); @@ -1186,7 +1352,7 @@ qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band, size_t tlv_len; size_t tlv_dlen; const struct qlink_tlv_hdr *tlv; - const struct qlink_tlv_channel *qchan; + const struct qlink_channel *qchan; struct ieee80211_channel *chan; unsigned int chidx = 0; u32 qflags; @@ -1232,7 +1398,7 @@ qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band, switch (tlv_type) { case QTN_TLV_ID_CHANNEL: - if (unlikely(tlv_len != sizeof(*qchan))) { + if (unlikely(tlv_dlen != sizeof(*qchan))) { pr_err("invalid channel TLV len %zu\n", tlv_len); goto error_ret; @@ -1243,7 +1409,7 @@ qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band, goto error_ret; } - qchan = (const struct qlink_tlv_channel *)tlv; + qchan = (const struct qlink_channel *)tlv->val; chan = &band->channels[chidx++]; qflags = le32_to_cpu(qchan->flags); @@ -1490,7 +1656,7 @@ int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac) cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD, QLINK_CMD_MAC_INFO, sizeof(struct qlink_cmd)); - if (unlikely(!cmd_skb)) + if (!cmd_skb) return -ENOMEM; qtnf_bus_lock(mac->bus); @@ -1528,7 +1694,7 @@ int qtnf_cmd_get_hw_info(struct qtnf_bus *bus) cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD, QLINK_CMD_GET_HW_INFO, sizeof(struct qlink_cmd)); - if (unlikely(!cmd_skb)) + if (!cmd_skb) return -ENOMEM; qtnf_bus_lock(bus); @@ -1708,7 +1874,7 @@ int qtnf_cmd_send_init_fw(struct qtnf_bus *bus) cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD, QLINK_CMD_FW_INIT, sizeof(struct qlink_cmd)); - if (unlikely(!cmd_skb)) + if (!cmd_skb) return -ENOMEM; qtnf_bus_lock(bus); @@ -1757,7 +1923,7 @@ int qtnf_cmd_send_add_key(struct qtnf_vif *vif, u8 key_index, bool pairwise, cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, QLINK_CMD_ADD_KEY, sizeof(*cmd)); - if (unlikely(!cmd_skb)) + if (!cmd_skb) return -ENOMEM; qtnf_bus_lock(vif->mac->bus); @@ -1810,7 +1976,7 @@ int qtnf_cmd_send_del_key(struct qtnf_vif *vif, u8 key_index, bool pairwise, cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, QLINK_CMD_DEL_KEY, sizeof(*cmd)); - if (unlikely(!cmd_skb)) + if (!cmd_skb) return -ENOMEM; qtnf_bus_lock(vif->mac->bus); @@ -1851,7 +2017,7 @@ int qtnf_cmd_send_set_default_key(struct qtnf_vif *vif, u8 key_index, cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, QLINK_CMD_SET_DEFAULT_KEY, sizeof(*cmd)); - if (unlikely(!cmd_skb)) + if (!cmd_skb) return -ENOMEM; qtnf_bus_lock(vif->mac->bus); @@ -1886,7 +2052,7 @@ int qtnf_cmd_send_set_default_mgmt_key(struct qtnf_vif *vif, u8 key_index) cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, QLINK_CMD_SET_DEFAULT_MGMT_KEY, sizeof(*cmd)); - if (unlikely(!cmd_skb)) + if (!cmd_skb) return -ENOMEM; qtnf_bus_lock(vif->mac->bus); @@ -1941,28 +2107,24 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac, cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, QLINK_CMD_CHANGE_STA, sizeof(*cmd)); - if (unlikely(!cmd_skb)) + if (!cmd_skb) return -ENOMEM; qtnf_bus_lock(vif->mac->bus); cmd = (struct qlink_cmd_change_sta *)cmd_skb->data; ether_addr_copy(cmd->sta_addr, mac); + cmd->flag_update.mask = + cpu_to_le32(qtnf_encode_sta_flags(params->sta_flags_mask)); + cmd->flag_update.value = + cpu_to_le32(qtnf_encode_sta_flags(params->sta_flags_set)); switch (vif->wdev.iftype) { case NL80211_IFTYPE_AP: cmd->if_type = cpu_to_le16(QLINK_IFTYPE_AP); - cmd->sta_flags_mask = cpu_to_le32(qtnf_encode_sta_flags( - params->sta_flags_mask)); - cmd->sta_flags_set = cpu_to_le32(qtnf_encode_sta_flags( - params->sta_flags_set)); break; case NL80211_IFTYPE_STATION: cmd->if_type = cpu_to_le16(QLINK_IFTYPE_STATION); - cmd->sta_flags_mask = cpu_to_le32(qtnf_encode_sta_flags( - params->sta_flags_mask)); - cmd->sta_flags_set = cpu_to_le32(qtnf_encode_sta_flags( - params->sta_flags_set)); break; default: pr_err("unsupported iftype %d\n", vif->wdev.iftype); @@ -1997,7 +2159,7 @@ int qtnf_cmd_send_del_sta(struct qtnf_vif *vif, cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, QLINK_CMD_DEL_STA, sizeof(*cmd)); - if (unlikely(!cmd_skb)) + if (!cmd_skb) return -ENOMEM; qtnf_bus_lock(vif->mac->bus); @@ -2037,8 +2199,8 @@ static void qtnf_cmd_channel_tlv_add(struct sk_buff *cmd_skb, qchan = skb_put_zero(cmd_skb, sizeof(*qchan)); qchan->hdr.type = cpu_to_le16(QTN_TLV_ID_CHANNEL); qchan->hdr.len = cpu_to_le16(sizeof(*qchan) - sizeof(qchan->hdr)); - qchan->center_freq = cpu_to_le16(sc->center_freq); - qchan->hw_value = cpu_to_le16(sc->hw_value); + qchan->chan.center_freq = cpu_to_le16(sc->center_freq); + qchan->chan.hw_value = cpu_to_le16(sc->hw_value); if (sc->flags & IEEE80211_CHAN_NO_IR) flags |= QLINK_CHAN_NO_IR; @@ -2046,7 +2208,7 @@ static void qtnf_cmd_channel_tlv_add(struct sk_buff *cmd_skb, if (sc->flags & IEEE80211_CHAN_RADAR) flags |= QLINK_CHAN_RADAR; - qchan->flags = cpu_to_le32(flags); + qchan->chan.flags = cpu_to_le32(flags); } int qtnf_cmd_send_scan(struct qtnf_wmac *mac) @@ -2067,7 +2229,7 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac) cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD, QLINK_CMD_SCAN, sizeof(struct qlink_cmd)); - if (unlikely(!cmd_skb)) + if (!cmd_skb) return -ENOMEM; qtnf_bus_lock(mac->bus); @@ -2137,7 +2299,7 @@ int qtnf_cmd_send_connect(struct qtnf_vif *vif, cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, QLINK_CMD_CONNECT, sizeof(*cmd)); - if (unlikely(!cmd_skb)) + if (!cmd_skb) return -ENOMEM; cmd = (struct qlink_cmd_connect *)cmd_skb->data; @@ -2239,7 +2401,7 @@ int qtnf_cmd_send_disconnect(struct qtnf_vif *vif, u16 reason_code) cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, QLINK_CMD_DISCONNECT, sizeof(*cmd)); - if (unlikely(!cmd_skb)) + if (!cmd_skb) return -ENOMEM; qtnf_bus_lock(vif->mac->bus); @@ -2273,7 +2435,7 @@ int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif, bool up) cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, QLINK_CMD_UPDOWN_INTF, sizeof(*cmd)); - if (unlikely(!cmd_skb)) + if (!cmd_skb) return -ENOMEM; cmd = (struct qlink_cmd_updown *)cmd_skb->data; @@ -2434,8 +2596,7 @@ int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif, cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, vif->vifid, QLINK_CMD_CHAN_SWITCH, sizeof(*cmd)); - - if (unlikely(!cmd_skb)) + if (!cmd_skb) return -ENOMEM; qtnf_bus_lock(mac->bus); @@ -2487,7 +2648,7 @@ int qtnf_cmd_get_channel(struct qtnf_vif *vif, struct cfg80211_chan_def *chdef) cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, QLINK_CMD_CHAN_GET, sizeof(struct qlink_cmd)); - if (unlikely(!cmd_skb)) + if (!cmd_skb) return -ENOMEM; qtnf_bus_lock(bus); @@ -2512,3 +2673,83 @@ out: consume_skb(resp_skb); return ret; } + +int qtnf_cmd_start_cac(const struct qtnf_vif *vif, + const struct cfg80211_chan_def *chdef, + u32 cac_time_ms) +{ + struct qtnf_bus *bus = vif->mac->bus; + struct sk_buff *cmd_skb; + struct qlink_cmd_start_cac *cmd; + int ret; + u16 res_code; + + cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, + QLINK_CMD_START_CAC, + sizeof(*cmd)); + if (!cmd_skb) + return -ENOMEM; + + cmd = (struct qlink_cmd_start_cac *)cmd_skb->data; + cmd->cac_time_ms = cpu_to_le32(cac_time_ms); + qlink_chandef_cfg2q(chdef, &cmd->chan); + + qtnf_bus_lock(bus); + ret = qtnf_cmd_send(bus, cmd_skb, &res_code); + qtnf_bus_unlock(bus); + + if (ret) + return ret; + + switch (res_code) { + case QLINK_CMD_RESULT_OK: + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif, + const struct cfg80211_acl_data *params) +{ + struct qtnf_bus *bus = vif->mac->bus; + struct sk_buff *cmd_skb; + struct qlink_tlv_hdr *tlv; + size_t acl_size = qtnf_cmd_acl_data_size(params); + u16 res_code; + int ret; + + cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, + QLINK_CMD_SET_MAC_ACL, + sizeof(struct qlink_cmd)); + if (!cmd_skb) + return -ENOMEM; + + tlv = skb_put(cmd_skb, sizeof(*tlv) + acl_size); + tlv->type = cpu_to_le16(QTN_TLV_ID_ACL_DATA); + tlv->len = cpu_to_le16(acl_size); + qlink_acl_data_cfg2q(params, (struct qlink_acl_data *)tlv->val); + + qtnf_bus_lock(bus); + ret = qtnf_cmd_send(bus, cmd_skb, &res_code); + qtnf_bus_unlock(bus); + + if (unlikely(ret)) + return ret; + + switch (res_code) { + case QLINK_CMD_RESULT_OK: + break; + case QLINK_CMD_RESULT_INVALID: + ret = -EINVAL; + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h index d981a76e5835..69a7d56f7e58 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.h +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h @@ -76,5 +76,10 @@ int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel, int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif, struct cfg80211_csa_settings *params); int qtnf_cmd_get_channel(struct qtnf_vif *vif, struct cfg80211_chan_def *chdef); +int qtnf_cmd_start_cac(const struct qtnf_vif *vif, + const struct cfg80211_chan_def *chdef, + u32 cac_time_ms); +int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif, + const struct cfg80211_acl_data *params); #endif /* QLINK_COMMANDS_H_ */ diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c index 3423dc51198b..cf26c15a84f8 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.c +++ b/drivers/net/wireless/quantenna/qtnfmac/core.c @@ -119,9 +119,38 @@ qtnf_netdev_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev) /* Netdev handler for getting stats. */ -static struct net_device_stats *qtnf_netdev_get_stats(struct net_device *dev) +static void qtnf_netdev_get_stats64(struct net_device *ndev, + struct rtnl_link_stats64 *stats) { - return &dev->stats; + struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev); + unsigned int start; + int cpu; + + netdev_stats_to_stats64(stats, &ndev->stats); + + if (!vif->stats64) + return; + + for_each_possible_cpu(cpu) { + struct pcpu_sw_netstats *stats64; + u64 rx_packets, rx_bytes; + u64 tx_packets, tx_bytes; + + stats64 = per_cpu_ptr(vif->stats64, cpu); + + do { + start = u64_stats_fetch_begin_irq(&stats64->syncp); + rx_packets = stats64->rx_packets; + rx_bytes = stats64->rx_bytes; + tx_packets = stats64->tx_packets; + tx_bytes = stats64->tx_bytes; + } while (u64_stats_fetch_retry_irq(&stats64->syncp, start)); + + stats->rx_packets += rx_packets; + stats->rx_bytes += rx_bytes; + stats->tx_packets += tx_packets; + stats->tx_bytes += tx_bytes; + } } /* Netdev handler for transmission timeout. @@ -156,7 +185,7 @@ const struct net_device_ops qtnf_netdev_ops = { .ndo_stop = qtnf_netdev_close, .ndo_start_xmit = qtnf_netdev_hard_start_xmit, .ndo_tx_timeout = qtnf_netdev_tx_timeout, - .ndo_get_stats = qtnf_netdev_get_stats, + .ndo_get_stats64 = qtnf_netdev_get_stats64, }; static int qtnf_mac_init_single_band(struct wiphy *wiphy, @@ -233,6 +262,23 @@ struct qtnf_vif *qtnf_mac_get_base_vif(struct qtnf_wmac *mac) return vif; } +void qtnf_mac_iface_comb_free(struct qtnf_wmac *mac) +{ + struct ieee80211_iface_combination *comb; + int i; + + if (mac->macinfo.if_comb) { + for (i = 0; i < mac->macinfo.n_if_comb; i++) { + comb = &mac->macinfo.if_comb[i]; + kfree(comb->limits); + comb->limits = NULL; + } + + kfree(mac->macinfo.if_comb); + mac->macinfo.if_comb = NULL; + } +} + static void qtnf_vif_reset_handler(struct work_struct *work) { struct qtnf_vif *vif = container_of(work, struct qtnf_vif, reset_work); @@ -258,13 +304,44 @@ static void qtnf_mac_init_primary_intf(struct qtnf_wmac *mac) { struct qtnf_vif *vif = &mac->iflist[QTNF_PRIMARY_VIF_IDX]; - vif->wdev.iftype = NL80211_IFTYPE_AP; + vif->wdev.iftype = NL80211_IFTYPE_STATION; vif->bss_priority = QTNF_DEF_BSS_PRIORITY; vif->wdev.wiphy = priv_to_wiphy(mac); INIT_WORK(&vif->reset_work, qtnf_vif_reset_handler); vif->cons_tx_timeout_cnt = 0; } +static void qtnf_mac_scan_finish(struct qtnf_wmac *mac, bool aborted) +{ + struct cfg80211_scan_info info = { + .aborted = aborted, + }; + + mutex_lock(&mac->mac_lock); + + if (mac->scan_req) { + cfg80211_scan_done(mac->scan_req, &info); + mac->scan_req = NULL; + } + + mutex_unlock(&mac->mac_lock); +} + +void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted) +{ + cancel_delayed_work_sync(&mac->scan_timeout); + qtnf_mac_scan_finish(mac, aborted); +} + +static void qtnf_mac_scan_timeout(struct work_struct *work) +{ + struct qtnf_wmac *mac = + container_of(work, struct qtnf_wmac, scan_timeout.work); + + pr_warn("MAC%d: scan timed out\n", mac->macid); + qtnf_mac_scan_finish(mac, true); +} + static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus, unsigned int macid) { @@ -288,7 +365,12 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus, mac->iflist[i].vifid = i; qtnf_sta_list_init(&mac->iflist[i].sta_list); mutex_init(&mac->mac_lock); - timer_setup(&mac->scan_timeout, NULL, 0); + INIT_DELAYED_WORK(&mac->scan_timeout, qtnf_mac_scan_timeout); + mac->iflist[i].stats64 = + netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!mac->iflist[i].stats64) + pr_warn("VIF%u.%u: per cpu stats allocation failed\n", + macid, i); } qtnf_mac_init_primary_intf(mac); @@ -297,9 +379,12 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus, return mac; } +static const struct ethtool_ops qtnf_ethtool_ops = { + .get_drvinfo = cfg80211_get_drvinfo, +}; + int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *vif, - const char *name, unsigned char name_assign_type, - enum nl80211_iftype iftype) + const char *name, unsigned char name_assign_type) { struct wiphy *wiphy = priv_to_wiphy(mac); struct net_device *dev; @@ -320,12 +405,12 @@ int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *vif, dev->needs_free_netdev = true; dev_net_set(dev, wiphy_net(wiphy)); dev->ieee80211_ptr = &vif->wdev; - dev->ieee80211_ptr->iftype = iftype; ether_addr_copy(dev->dev_addr, vif->mac_addr); SET_NETDEV_DEV(dev, wiphy_dev(wiphy)); dev->flags |= IFF_BROADCAST | IFF_MULTICAST; dev->watchdog_timeo = QTNF_DEF_WDOG_TIMEOUT; dev->tx_queue_len = 100; + dev->ethtool_ops = &qtnf_ethtool_ops; qdev_vif = netdev_priv(dev); *((void **)qdev_vif) = vif; @@ -366,6 +451,7 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid) } rtnl_unlock(); qtnf_sta_list_free(&vif->sta_list); + free_percpu(vif->stats64); } if (mac->wiphy_registered) @@ -382,8 +468,9 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid) wiphy->bands[band] = NULL; } - kfree(mac->macinfo.limits); - kfree(wiphy->iface_combinations); + qtnf_mac_iface_comb_free(mac); + kfree(mac->macinfo.extended_capabilities); + kfree(mac->macinfo.extended_capabilities_mask); wiphy_free(wiphy); bus->mac[macid] = NULL; } @@ -418,7 +505,7 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid) goto error; } - ret = qtnf_cmd_send_add_intf(vif, NL80211_IFTYPE_AP, vif->mac_addr); + ret = qtnf_cmd_send_add_intf(vif, vif->wdev.iftype, vif->mac_addr); if (ret) { pr_err("MAC%u: failed to add VIF\n", macid); goto error; @@ -446,8 +533,7 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid) rtnl_lock(); - ret = qtnf_core_net_attach(mac, vif, "wlan%d", NET_NAME_ENUM, - NL80211_IFTYPE_AP); + ret = qtnf_core_net_attach(mac, vif, "wlan%d", NET_NAME_ENUM); rtnl_unlock(); if (ret) { @@ -644,6 +730,46 @@ void qtnf_wake_all_queues(struct net_device *ndev) } EXPORT_SYMBOL_GPL(qtnf_wake_all_queues); +void qtnf_update_rx_stats(struct net_device *ndev, const struct sk_buff *skb) +{ + struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev); + struct pcpu_sw_netstats *stats64; + + if (unlikely(!vif || !vif->stats64)) { + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += skb->len; + return; + } + + stats64 = this_cpu_ptr(vif->stats64); + + u64_stats_update_begin(&stats64->syncp); + stats64->rx_packets++; + stats64->rx_bytes += skb->len; + u64_stats_update_end(&stats64->syncp); +} +EXPORT_SYMBOL_GPL(qtnf_update_rx_stats); + +void qtnf_update_tx_stats(struct net_device *ndev, const struct sk_buff *skb) +{ + struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev); + struct pcpu_sw_netstats *stats64; + + if (unlikely(!vif || !vif->stats64)) { + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += skb->len; + return; + } + + stats64 = this_cpu_ptr(vif->stats64); + + u64_stats_update_begin(&stats64->syncp); + stats64->tx_packets++; + stats64->tx_bytes += skb->len; + u64_stats_update_end(&stats64->syncp); +} +EXPORT_SYMBOL_GPL(qtnf_update_tx_stats); + MODULE_AUTHOR("Quantenna Communications"); MODULE_DESCRIPTION("Quantenna 802.11 wireless LAN FullMAC driver."); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h index 1b7bc0318f3e..3b884c80b6ab 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.h +++ b/drivers/net/wireless/quantenna/qtnfmac/core.h @@ -88,6 +88,9 @@ struct qtnf_vif { struct work_struct reset_work; struct qtnf_sta_list sta_list; unsigned long cons_tx_timeout_cnt; + int generation; + + struct pcpu_sw_netstats __percpu *stats64; }; struct qtnf_mac_info { @@ -102,10 +105,14 @@ struct qtnf_mac_info { u8 sretry_limit; u8 coverage_class; u8 radar_detect_widths; + u32 max_acl_mac_addrs; struct ieee80211_ht_cap ht_cap_mod_mask; struct ieee80211_vht_cap vht_cap_mod_mask; - struct ieee80211_iface_limit *limits; - size_t n_limits; + struct ieee80211_iface_combination *if_comb; + size_t n_if_comb; + u8 *extended_capabilities; + u8 *extended_capabilities_mask; + u8 extended_capabilities_len; }; struct qtnf_chan_stats { @@ -126,7 +133,7 @@ struct qtnf_wmac { struct qtnf_vif iflist[QTNF_MAX_INTF]; struct cfg80211_scan_request *scan_req; struct mutex mac_lock; /* lock during wmac speicific ops */ - struct timer_list scan_timeout; + struct delayed_work scan_timeout; }; struct qtnf_hw_info { @@ -138,14 +145,16 @@ struct qtnf_hw_info { struct ieee80211_regdomain *rd; u8 total_tx_chain; u8 total_rx_chain; + char fw_version[ETHTOOL_FWVERS_LEN]; + u32 hw_version; }; struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac); struct qtnf_vif *qtnf_mac_get_base_vif(struct qtnf_wmac *mac); +void qtnf_mac_iface_comb_free(struct qtnf_wmac *mac); struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus); int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *priv, - const char *name, unsigned char name_assign_type, - enum nl80211_iftype iftype); + const char *name, unsigned char name_assign_type); void qtnf_main_work_queue(struct work_struct *work); int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed); int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac); @@ -153,9 +162,13 @@ int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac); struct qtnf_wmac *qtnf_core_get_mac(const struct qtnf_bus *bus, u8 macid); struct net_device *qtnf_classify_skb(struct qtnf_bus *bus, struct sk_buff *skb); void qtnf_wake_all_queues(struct net_device *ndev); +void qtnf_update_rx_stats(struct net_device *ndev, const struct sk_buff *skb); +void qtnf_update_tx_stats(struct net_device *ndev, const struct sk_buff *skb); + void qtnf_virtual_intf_cleanup(struct net_device *ndev); void qtnf_netdev_updown(struct net_device *ndev, bool up); +void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted); static inline struct qtnf_vif *qtnf_netdev_get_priv(struct net_device *dev) { diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c index 4abc6d9ed560..bcd415f96412 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/event.c +++ b/drivers/net/wireless/quantenna/qtnfmac/event.c @@ -59,10 +59,11 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif, pr_debug("VIF%u.%u: MAC:%pM FC:%x\n", mac->macid, vif->vifid, sta_addr, frame_control); - qtnf_sta_list_add(&vif->sta_list, sta_addr); + qtnf_sta_list_add(vif, sta_addr); sinfo.assoc_req_ies = NULL; sinfo.assoc_req_ies_len = 0; + sinfo.generation = vif->generation; payload_len = len - sizeof(*sta_assoc); tlv = (const struct qlink_tlv_hdr *)sta_assoc->ies; @@ -132,7 +133,7 @@ qtnf_event_handle_sta_deauth(struct qtnf_wmac *mac, struct qtnf_vif *vif, pr_debug("VIF%u.%u: MAC:%pM reason:%x\n", mac->macid, vif->vifid, sta_addr, reason); - if (qtnf_sta_list_del(&vif->sta_list, sta_addr)) + if (qtnf_sta_list_del(vif, sta_addr)) cfg80211_del_sta(vif->netdev, sta_deauth->sta_addr, GFP_KERNEL); @@ -237,9 +238,8 @@ qtnf_event_handle_mgmt_received(struct qtnf_vif *vif, pr_debug("%s LEN:%u FC:%.4X SA:%pM\n", vif->netdev->name, frame_len, le16_to_cpu(frame->frame_control), frame->addr2); - cfg80211_rx_mgmt(&vif->wdev, le32_to_cpu(rxmgmt->freq), - le32_to_cpu(rxmgmt->sig_dbm), rxmgmt->frame_data, - frame_len, flags); + cfg80211_rx_mgmt(&vif->wdev, le32_to_cpu(rxmgmt->freq), rxmgmt->sig_dbm, + rxmgmt->frame_data, frame_len, flags); return 0; } @@ -324,7 +324,7 @@ qtnf_event_handle_scan_results(struct qtnf_vif *vif, sr->bssid, get_unaligned_le64(&sr->tsf), le16_to_cpu(sr->capab), le16_to_cpu(sr->bintval), ies, ies_len, - sr->signal, GFP_KERNEL); + DBM_TO_MBM(sr->sig_dbm), GFP_KERNEL); if (!bss) return -ENOMEM; @@ -369,7 +369,8 @@ qtnf_event_handle_freq_change(struct qtnf_wmac *mac, qlink_chandef_q2cfg(wiphy, &data->chan, &chandef); if (!cfg80211_chandef_valid(&chandef)) { - pr_err("MAC%u: bad channel f1=%u f2=%u bw=%u\n", mac->macid, + pr_err("MAC%u: bad channel freq=%u cf1=%u cf2=%u bw=%u\n", + mac->macid, chandef.chan->center_freq, chandef.center_freq1, chandef.center_freq2, chandef.width); return -EINVAL; @@ -394,6 +395,63 @@ qtnf_event_handle_freq_change(struct qtnf_wmac *mac, return 0; } +static int qtnf_event_handle_radar(struct qtnf_vif *vif, + const struct qlink_event_radar *ev, + u16 len) +{ + struct wiphy *wiphy = priv_to_wiphy(vif->mac); + struct cfg80211_chan_def chandef; + + if (len < sizeof(*ev)) { + pr_err("MAC%u: payload is too short\n", vif->mac->macid); + return -EINVAL; + } + + if (!wiphy->registered || !vif->netdev) + return 0; + + qlink_chandef_q2cfg(wiphy, &ev->chan, &chandef); + + if (!cfg80211_chandef_valid(&chandef)) { + pr_err("MAC%u: bad channel f1=%u f2=%u bw=%u\n", + vif->mac->macid, + chandef.center_freq1, chandef.center_freq2, + chandef.width); + return -EINVAL; + } + + pr_info("%s: radar event=%u f1=%u f2=%u bw=%u\n", + vif->netdev->name, ev->event, + chandef.center_freq1, chandef.center_freq2, + chandef.width); + + switch (ev->event) { + case QLINK_RADAR_DETECTED: + cfg80211_radar_event(wiphy, &chandef, GFP_KERNEL); + break; + case QLINK_RADAR_CAC_FINISHED: + if (!vif->wdev.cac_started) + break; + + cfg80211_cac_event(vif->netdev, &chandef, + NL80211_RADAR_CAC_FINISHED, GFP_KERNEL); + break; + case QLINK_RADAR_CAC_ABORTED: + if (!vif->wdev.cac_started) + break; + + cfg80211_cac_event(vif->netdev, &chandef, + NL80211_RADAR_CAC_ABORTED, GFP_KERNEL); + break; + default: + pr_warn("%s: unhandled radar event %u\n", + vif->netdev->name, ev->event); + break; + } + + return 0; +} + static int qtnf_event_parse(struct qtnf_wmac *mac, const struct sk_buff *event_skb) { @@ -448,6 +506,10 @@ static int qtnf_event_parse(struct qtnf_wmac *mac, ret = qtnf_event_handle_freq_change(mac, (const void *)event, event_len); break; + case QLINK_EVENT_RADAR: + ret = qtnf_event_handle_radar(vif, (const void *)event, + event_len); + break; default: pr_warn("unknown event type: %x\n", event_id); break; @@ -479,9 +541,9 @@ static int qtnf_event_process_skb(struct qtnf_bus *bus, if (unlikely(!mac)) return -ENXIO; - qtnf_bus_lock(bus); + rtnl_lock(); res = qtnf_event_parse(mac, skb); - qtnf_bus_unlock(bus); + rtnl_unlock(); return res; } diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c index 7e487622d87d..6f6190964320 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c @@ -615,8 +615,7 @@ static void qtnf_pcie_data_tx_reclaim(struct qtnf_pcie_bus_priv *priv) PCI_DMA_TODEVICE); if (skb->dev) { - skb->dev->stats.tx_packets++; - skb->dev->stats.tx_bytes += skb->len; + qtnf_update_tx_stats(skb->dev, skb); if (unlikely(priv->tx_stopped)) { qtnf_wake_all_queues(skb->dev); priv->tx_stopped = 0; @@ -855,9 +854,7 @@ static int qtnf_rx_poll(struct napi_struct *napi, int budget) skb_put(skb, psize); ndev = qtnf_classify_skb(bus, skb); if (likely(ndev)) { - ndev->stats.rx_packets++; - ndev->stats.rx_bytes += skb->len; - + qtnf_update_rx_stats(ndev, skb); skb->protocol = eth_type_trans(skb, ndev); napi_gro_receive(napi, skb); } else { diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h index a432fb001c41..9bf3ae4d1b3b 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h @@ -19,7 +19,7 @@ #include <linux/ieee80211.h> -#define QLINK_PROTO_VER 6 +#define QLINK_PROTO_VER 11 #define QLINK_MACID_RSVD 0xFF #define QLINK_VIFID_RSVD 0xFF @@ -122,17 +122,49 @@ enum qlink_channel_width { }; /** + * struct qlink_channel - qlink control channel definition + * + * @hw_value: hardware-specific value for the channel + * @center_freq: center frequency in MHz + * @flags: channel flags from &enum qlink_channel_flags + * @band: band this channel belongs to + * @max_antenna_gain: maximum antenna gain in dBi + * @max_power: maximum transmission power (in dBm) + * @max_reg_power: maximum regulatory transmission power (in dBm) + * @dfs_state: current state of this channel. + * Only relevant if radar is required on this channel. + * @beacon_found: helper to regulatory code to indicate when a beacon + * has been found on this channel. Use regulatory_hint_found_beacon() + * to enable this, this is useful only on 5 GHz band. + */ +struct qlink_channel { + __le16 hw_value; + __le16 center_freq; + __le32 flags; + u8 band; + u8 max_antenna_gain; + u8 max_power; + u8 max_reg_power; + __le32 dfs_cac_ms; + u8 dfs_state; + u8 beacon_found; + u8 rsvd[2]; +} __packed; + +/** * struct qlink_chandef - qlink channel definition * + * @chan: primary channel definition * @center_freq1: center frequency of first segment * @center_freq2: center frequency of second segment (80+80 only) * @width: channel width, one of @enum qlink_channel_width */ struct qlink_chandef { + struct qlink_channel chan; __le16 center_freq1; __le16 center_freq2; u8 width; - u8 rsvd[3]; + u8 rsvd; } __packed; #define QLINK_MAX_NR_CIPHER_SUITES 5 @@ -153,6 +185,17 @@ struct qlink_auth_encr { u8 rsvd[2]; } __packed; +/** + * struct qlink_sta_info_state - station flags mask/value + * + * @mask: STA flags mask, bitmap of &enum qlink_sta_flags + * @value: STA flags values, bitmap of &enum qlink_sta_flags + */ +struct qlink_sta_info_state { + __le32 mask; + __le32 value; +} __packed; + /* QLINK Command messages related definitions */ @@ -173,6 +216,7 @@ struct qlink_auth_encr { * @QLINK_CMD_REG_NOTIFY: notify device about regulatory domain change. This * command is supported only if device reports QLINK_HW_SUPPORTS_REG_UPDATE * capability. + * @QLINK_CMD_START_CAC: start radar detection procedure on a specified channel. */ enum qlink_cmd_type { QLINK_CMD_FW_INIT = 0x0001, @@ -192,8 +236,10 @@ enum qlink_cmd_type { QLINK_CMD_BAND_INFO_GET = 0x001A, QLINK_CMD_CHAN_SWITCH = 0x001B, QLINK_CMD_CHAN_GET = 0x001C, + QLINK_CMD_START_CAC = 0x001D, QLINK_CMD_START_AP = 0x0021, QLINK_CMD_STOP_AP = 0x0022, + QLINK_CMD_SET_MAC_ACL = 0x0023, QLINK_CMD_GET_STA_INFO = 0x0030, QLINK_CMD_ADD_KEY = 0x0040, QLINK_CMD_DEL_KEY = 0x0041, @@ -368,16 +414,14 @@ struct qlink_cmd_set_def_mgmt_key { /** * struct qlink_cmd_change_sta - data for QLINK_CMD_CHANGE_STA command * - * @sta_flags_mask: STA flags mask, bitmap of &enum qlink_sta_flags - * @sta_flags_set: STA flags values, bitmap of &enum qlink_sta_flags + * @flag_update: STA flags to update * @if_type: Mode of interface operation, one of &enum qlink_iface_type * @vlanid: VLAN ID to assign to specific STA * @sta_addr: address of the STA for which parameters are set. */ struct qlink_cmd_change_sta { struct qlink_cmd chdr; - __le32 sta_flags_mask; - __le32 sta_flags_set; + struct qlink_sta_info_state flag_update; __le16 if_type; __le16 vlanid; u8 sta_addr[ETH_ALEN]; @@ -585,6 +629,40 @@ struct qlink_cmd_start_ap { u8 info[0]; } __packed; +/** + * struct qlink_cmd_start_cac - data for QLINK_CMD_START_CAC command + * + * @chan: a channel to start a radar detection procedure on. + * @cac_time_ms: CAC time. + */ +struct qlink_cmd_start_cac { + struct qlink_cmd chdr; + struct qlink_chandef chan; + __le32 cac_time_ms; +} __packed; + +enum qlink_acl_policy { + QLINK_ACL_POLICY_ACCEPT_UNLESS_LISTED, + QLINK_ACL_POLICY_DENY_UNLESS_LISTED, +}; + +struct qlink_mac_address { + u8 addr[ETH_ALEN]; +} __packed; + +/** + * struct qlink_acl_data - ACL data + * + * @policy: filter policy, one of &enum qlink_acl_policy. + * @num_entries: number of MAC addresses in array. + * @mac_address: MAC addresses array. + */ +struct qlink_acl_data { + __le32 policy; + __le32 num_entries; + struct qlink_mac_address mac_addrs[0]; +} __packed; + /* QLINK Command Responses messages related definitions */ @@ -646,6 +724,7 @@ struct qlink_resp_get_mac_info { struct ieee80211_ht_cap ht_cap_mod_mask; __le16 max_ap_assoc_sta; __le16 radar_detect_widths; + __le32 max_acl_mac_addrs; u8 bands_cap; u8 rsvd[1]; u8 var_info[0]; @@ -685,6 +764,9 @@ struct qlink_resp_get_hw_info { struct qlink_resp rhdr; __le32 fw_ver; __le32 hw_capab; + __le32 bld_tmstamp; + __le32 plat_id; + __le32 hw_ver; __le16 ql_proto_ver; u8 num_mac; u8 mac_bitmap; @@ -709,17 +791,27 @@ struct qlink_resp_manage_intf { struct qlink_intf_info intf_info; } __packed; +enum qlink_sta_info_rate_flags { + QLINK_STA_INFO_RATE_FLAG_HT_MCS = BIT(0), + QLINK_STA_INFO_RATE_FLAG_VHT_MCS = BIT(1), + QLINK_STA_INFO_RATE_FLAG_SHORT_GI = BIT(2), + QLINK_STA_INFO_RATE_FLAG_60G = BIT(3), +}; + /** * struct qlink_resp_get_sta_info - response for QLINK_CMD_GET_STA_INFO command * * Response data containing statistics for specified STA. * + * @filled: a bitmask of &enum qlink_sta_info, specifies which info in response + * is valid. * @sta_addr: MAC address of STA the response carries statistic for. - * @info: statistics for specified STA. + * @info: variable statistics for specified STA. */ struct qlink_resp_get_sta_info { struct qlink_resp rhdr; u8 sta_addr[ETH_ALEN]; + u8 rsvd[2]; u8 info[0]; } __packed; @@ -782,6 +874,7 @@ enum qlink_event_type { QLINK_EVENT_BSS_JOIN = 0x0026, QLINK_EVENT_BSS_LEAVE = 0x0027, QLINK_EVENT_FREQ_CHANGE = 0x0028, + QLINK_EVENT_RADAR = 0x0029, }; /** @@ -869,15 +962,16 @@ enum qlink_rxmgmt_flags { * struct qlink_event_rxmgmt - data for QLINK_EVENT_MGMT_RECEIVED event * * @freq: Frequency on which the frame was received in MHz. - * @sig_dbm: signal strength in dBm. * @flags: bitmap of &enum qlink_rxmgmt_flags. + * @sig_dbm: signal strength in dBm. * @frame_data: data of Rx'd frame itself. */ struct qlink_event_rxmgmt { struct qlink_event ehdr; __le32 freq; - __le32 sig_dbm; __le32 flags; + s8 sig_dbm; + u8 rsvd[3]; u8 frame_data[0]; } __packed; @@ -889,7 +983,7 @@ struct qlink_event_rxmgmt { * event was generated was discovered. * @capab: capabilities field. * @bintval: beacon interval announced by discovered BSS. - * @signal: signal strength. + * @sig_dbm: signal strength in dBm. * @bssid: BSSID announced by discovered BSS. * @ssid_len: length of SSID announced by BSS. * @ssid: SSID announced by discovered BSS. @@ -901,7 +995,7 @@ struct qlink_event_scan_result { __le16 freq; __le16 capab; __le16 bintval; - s8 signal; + s8 sig_dbm; u8 ssid_len; u8 ssid[IEEE80211_MAX_SSID_LEN]; u8 bssid[ETH_ALEN]; @@ -931,9 +1025,39 @@ struct qlink_event_scan_complete { __le32 flags; } __packed; +enum qlink_radar_event { + QLINK_RADAR_DETECTED, + QLINK_RADAR_CAC_FINISHED, + QLINK_RADAR_CAC_ABORTED, + QLINK_RADAR_NOP_FINISHED, + QLINK_RADAR_PRE_CAC_EXPIRED, +}; + +/** + * struct qlink_event_radar - data for QLINK_EVENT_RADAR event + * + * @chan: channel on which radar event happened. + * @event: radar event type, one of &enum qlink_radar_event. + */ +struct qlink_event_radar { + struct qlink_event ehdr; + struct qlink_chandef chan; + u8 event; + u8 rsvd[3]; +} __packed; + /* QLINK TLVs (Type-Length Values) definitions */ +/** + * enum qlink_tlv_id - list of TLVs that Qlink messages can carry + * + * @QTN_TLV_ID_STA_STATS_MAP: a bitmap of &enum qlink_sta_info, used to + * indicate which statistic carried in QTN_TLV_ID_STA_STATS is valid. + * @QTN_TLV_ID_STA_STATS: per-STA statistics as defined by + * &struct qlink_sta_stats. Valid values are marked as such in a bitmap + * carried by QTN_TLV_ID_STA_STATS_MAP. + */ enum qlink_tlv_id { QTN_TLV_ID_FRAG_THRESH = 0x0201, QTN_TLV_ID_RTS_THRESH = 0x0202, @@ -942,15 +1066,24 @@ enum qlink_tlv_id { QTN_TLV_ID_REG_RULE = 0x0207, QTN_TLV_ID_CHANNEL = 0x020F, QTN_TLV_ID_CHANDEF = 0x0210, + QTN_TLV_ID_STA_STATS_MAP = 0x0211, + QTN_TLV_ID_STA_STATS = 0x0212, QTN_TLV_ID_COVERAGE_CLASS = 0x0213, QTN_TLV_ID_IFACE_LIMIT = 0x0214, QTN_TLV_ID_NUM_IFACE_COMB = 0x0215, QTN_TLV_ID_CHANNEL_STATS = 0x0216, - QTN_TLV_ID_STA_BASIC_COUNTERS = 0x0300, - QTN_TLV_ID_STA_GENERIC_INFO = 0x0301, QTN_TLV_ID_KEY = 0x0302, QTN_TLV_ID_SEQ = 0x0303, QTN_TLV_ID_IE_SET = 0x0305, + QTN_TLV_ID_EXT_CAPABILITY_MASK = 0x0306, + QTN_TLV_ID_ACL_DATA = 0x0307, + QTN_TLV_ID_BUILD_NAME = 0x0401, + QTN_TLV_ID_BUILD_REV = 0x0402, + QTN_TLV_ID_BUILD_TYPE = 0x0403, + QTN_TLV_ID_BUILD_LABEL = 0x0404, + QTN_TLV_ID_HW_ID = 0x0405, + QTN_TLV_ID_CALIBRATION_VER = 0x0406, + QTN_TLV_ID_UBOOT_VER = 0x0407, }; struct qlink_tlv_hdr { @@ -959,76 +1092,24 @@ struct qlink_tlv_hdr { u8 val[0]; } __packed; -struct qlink_iface_limit { - __le16 max_num; - __le16 type; -} __packed; - struct qlink_iface_comb_num { - __le16 iface_comb_num; + __le32 iface_comb_num; } __packed; -struct qlink_sta_stat_basic_counters { - __le64 rx_bytes; - __le64 tx_bytes; - __le64 rx_beacons; - __le32 rx_packets; - __le32 tx_packets; - __le32 rx_dropped; - __le32 tx_failed; -} __packed; - -enum qlink_sta_info_rate_flags { - QLINK_STA_INFO_RATE_FLAG_INVALID = 0, - QLINK_STA_INFO_RATE_FLAG_HT_MCS = BIT(0), - QLINK_STA_INFO_RATE_FLAG_VHT_MCS = BIT(1), - QLINK_STA_INFO_RATE_FLAG_SHORT_GI = BIT(2), - QLINK_STA_INFO_RATE_FLAG_60G = BIT(3), -}; - -enum qlink_sta_info_rate_bw { - QLINK_STA_INFO_RATE_BW_5 = 0, - QLINK_STA_INFO_RATE_BW_10 = 1, - QLINK_STA_INFO_RATE_BW_20 = 2, - QLINK_STA_INFO_RATE_BW_40 = 3, - QLINK_STA_INFO_RATE_BW_80 = 4, - QLINK_STA_INFO_RATE_BW_160 = 5, -}; - -/** - * struct qlink_sta_info_rate - STA rate statistics - * - * @rate: data rate in Mbps. - * @flags: bitmap of &enum qlink_sta_flags. - * @mcs: 802.11-defined MCS index. - * nss: Number of Spatial Streams. - * @bw: bandwidth, one of &enum qlink_sta_info_rate_bw. - */ -struct qlink_sta_info_rate { - __le16 rate; - u8 flags; - u8 mcs; - u8 nss; - u8 bw; +struct qlink_iface_limit { + __le16 max_num; + __le16 type; } __packed; -struct qlink_sta_info_state { - __le32 mask; - __le32 value; +struct qlink_iface_limit_record { + __le16 max_interfaces; + u8 num_different_channels; + u8 n_limits; + struct qlink_iface_limit limits[0]; } __packed; #define QLINK_RSSI_OFFSET 120 -struct qlink_sta_info_generic { - struct qlink_sta_info_state state; - __le32 connected_time; - __le32 inactive_time; - struct qlink_sta_info_rate rx_rate; - struct qlink_sta_info_rate tx_rate; - u8 rssi; - u8 rssi_avg; -} __packed; - struct qlink_tlv_frag_rts_thr { struct qlink_tlv_hdr hdr; __le16 thr; @@ -1113,19 +1194,16 @@ enum qlink_dfs_state { QLINK_DFS_AVAILABLE, }; +/** + * struct qlink_tlv_channel - data for QTN_TLV_ID_CHANNEL TLV + * + * Channel settings. + * + * @channel: ieee80211 channel settings. + */ struct qlink_tlv_channel { struct qlink_tlv_hdr hdr; - __le16 hw_value; - __le16 center_freq; - __le32 flags; - u8 band; - u8 max_antenna_gain; - u8 max_power; - u8 max_reg_power; - __le32 dfs_cac_ms; - u8 dfs_state; - u8 beacon_found; - u8 rsvd[2]; + struct qlink_channel chan; } __packed; /** @@ -1137,7 +1215,7 @@ struct qlink_tlv_channel { */ struct qlink_tlv_chandef { struct qlink_tlv_hdr hdr; - struct qlink_chandef chan; + struct qlink_chandef chdef; } __packed; enum qlink_ie_set_type { @@ -1176,4 +1254,105 @@ struct qlink_chan_stats { s8 chan_noise; } __packed; +/** + * enum qlink_sta_info - station information bitmap + * + * Used to indicate which statistics values in &struct qlink_sta_stats + * are valid. Individual values are used to fill a bitmap carried in a + * payload of QTN_TLV_ID_STA_STATS_MAP. + * + * @QLINK_STA_INFO_CONNECTED_TIME: connected_time value is valid. + * @QLINK_STA_INFO_INACTIVE_TIME: inactive_time value is valid. + * @QLINK_STA_INFO_RX_BYTES: lower 32 bits of rx_bytes value are valid. + * @QLINK_STA_INFO_TX_BYTES: lower 32 bits of tx_bytes value are valid. + * @QLINK_STA_INFO_RX_BYTES64: rx_bytes value is valid. + * @QLINK_STA_INFO_TX_BYTES64: tx_bytes value is valid. + * @QLINK_STA_INFO_RX_DROP_MISC: rx_dropped_misc value is valid. + * @QLINK_STA_INFO_BEACON_RX: rx_beacon value is valid. + * @QLINK_STA_INFO_SIGNAL: signal value is valid. + * @QLINK_STA_INFO_SIGNAL_AVG: signal_avg value is valid. + * @QLINK_STA_INFO_RX_BITRATE: rxrate value is valid. + * @QLINK_STA_INFO_TX_BITRATE: txrate value is valid. + * @QLINK_STA_INFO_RX_PACKETS: rx_packets value is valid. + * @QLINK_STA_INFO_TX_PACKETS: tx_packets value is valid. + * @QLINK_STA_INFO_TX_RETRIES: tx_retries value is valid. + * @QLINK_STA_INFO_TX_FAILED: tx_failed value is valid. + * @QLINK_STA_INFO_STA_FLAGS: sta_flags value is valid. + */ +enum qlink_sta_info { + QLINK_STA_INFO_CONNECTED_TIME, + QLINK_STA_INFO_INACTIVE_TIME, + QLINK_STA_INFO_RX_BYTES, + QLINK_STA_INFO_TX_BYTES, + QLINK_STA_INFO_RX_BYTES64, + QLINK_STA_INFO_TX_BYTES64, + QLINK_STA_INFO_RX_DROP_MISC, + QLINK_STA_INFO_BEACON_RX, + QLINK_STA_INFO_SIGNAL, + QLINK_STA_INFO_SIGNAL_AVG, + QLINK_STA_INFO_RX_BITRATE, + QLINK_STA_INFO_TX_BITRATE, + QLINK_STA_INFO_RX_PACKETS, + QLINK_STA_INFO_TX_PACKETS, + QLINK_STA_INFO_TX_RETRIES, + QLINK_STA_INFO_TX_FAILED, + QLINK_STA_INFO_STA_FLAGS, + QLINK_STA_INFO_NUM, +}; + +/** + * struct qlink_sta_info_rate - STA rate statistics + * + * @rate: data rate in Mbps. + * @flags: bitmap of &enum qlink_sta_info_rate_flags. + * @mcs: 802.11-defined MCS index. + * nss: Number of Spatial Streams. + * @bw: bandwidth, one of &enum qlink_channel_width. + */ +struct qlink_sta_info_rate { + __le16 rate; + u8 flags; + u8 mcs; + u8 nss; + u8 bw; +} __packed; + +/** + * struct qlink_sta_stats - data for QTN_TLV_ID_STA_STATS + * + * Carries statistics of a STA. Not all fields may be filled with + * valid values. Valid fields should be indicated as such using a bitmap of + * &enum qlink_sta_info. Bitmap is carried separately in a payload of + * QTN_TLV_ID_STA_STATS_MAP. + */ +struct qlink_sta_stats { + __le64 rx_bytes; + __le64 tx_bytes; + __le64 rx_beacon; + __le64 rx_duration; + __le64 t_offset; + __le32 connected_time; + __le32 inactive_time; + __le32 rx_packets; + __le32 tx_packets; + __le32 tx_retries; + __le32 tx_failed; + __le32 rx_dropped_misc; + __le32 beacon_loss_count; + __le32 expected_throughput; + struct qlink_sta_info_state sta_flags; + struct qlink_sta_info_rate txrate; + struct qlink_sta_info_rate rxrate; + __le16 llid; + __le16 plid; + u8 local_pm; + u8 peer_pm; + u8 nonpeer_pm; + u8 rx_beacon_signal_avg; + u8 plink_state; + u8 signal; + u8 signal_avg; + u8 rsvd[1]; +}; + #endif /* _QTN_QLINK_H_ */ diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c index 61d999affb09..aeeda81b09ea 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c @@ -100,34 +100,6 @@ static enum nl80211_chan_width qlink_chanwidth_to_nl(u8 qlw) } } -void qlink_chandef_q2cfg(struct wiphy *wiphy, - const struct qlink_chandef *qch, - struct cfg80211_chan_def *chdef) -{ - chdef->center_freq1 = le16_to_cpu(qch->center_freq1); - chdef->center_freq2 = le16_to_cpu(qch->center_freq2); - chdef->width = qlink_chanwidth_to_nl(qch->width); - - switch (chdef->width) { - case NL80211_CHAN_WIDTH_20_NOHT: - case NL80211_CHAN_WIDTH_20: - case NL80211_CHAN_WIDTH_5: - case NL80211_CHAN_WIDTH_10: - chdef->chan = ieee80211_get_channel(wiphy, chdef->center_freq1); - break; - case NL80211_CHAN_WIDTH_40: - case NL80211_CHAN_WIDTH_80: - case NL80211_CHAN_WIDTH_80P80: - case NL80211_CHAN_WIDTH_160: - chdef->chan = ieee80211_get_channel(wiphy, - chdef->center_freq1 - 10); - break; - default: - chdef->chan = NULL; - break; - } -} - static u8 qlink_chanwidth_nl_to_qlink(enum nl80211_chan_width nlwidth) { switch (nlwidth) { @@ -152,9 +124,29 @@ static u8 qlink_chanwidth_nl_to_qlink(enum nl80211_chan_width nlwidth) } } +void qlink_chandef_q2cfg(struct wiphy *wiphy, + const struct qlink_chandef *qch, + struct cfg80211_chan_def *chdef) +{ + struct ieee80211_channel *chan; + + chan = ieee80211_get_channel(wiphy, le16_to_cpu(qch->chan.center_freq)); + + chdef->chan = chan; + chdef->center_freq1 = le16_to_cpu(qch->center_freq1); + chdef->center_freq2 = le16_to_cpu(qch->center_freq2); + chdef->width = qlink_chanwidth_to_nl(qch->width); +} + void qlink_chandef_cfg2q(const struct cfg80211_chan_def *chdef, struct qlink_chandef *qch) { + struct ieee80211_channel *chan = chdef->chan; + + qch->chan.hw_value = cpu_to_le16(chan->hw_value); + qch->chan.center_freq = cpu_to_le16(chan->center_freq); + qch->chan.flags = cpu_to_le32(chan->flags); + qch->center_freq1 = cpu_to_le16(chdef->center_freq1); qch->center_freq2 = cpu_to_le16(chdef->center_freq2); qch->width = qlink_chanwidth_nl_to_qlink(chdef->width); @@ -172,3 +164,33 @@ enum qlink_hidden_ssid qlink_hidden_ssid_nl2q(enum nl80211_hidden_ssid nl_val) return QLINK_HIDDEN_SSID_NOT_IN_USE; } } + +bool qtnf_utils_is_bit_set(const u8 *arr, unsigned int bit, + unsigned int arr_max_len) +{ + unsigned int idx = bit / BITS_PER_BYTE; + u8 mask = 1 << (bit - (idx * BITS_PER_BYTE)); + + if (idx >= arr_max_len) + return false; + + return arr[idx] & mask; +} + +void qlink_acl_data_cfg2q(const struct cfg80211_acl_data *acl, + struct qlink_acl_data *qacl) +{ + switch (acl->acl_policy) { + case NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED: + qacl->policy = + cpu_to_le32(QLINK_ACL_POLICY_ACCEPT_UNLESS_LISTED); + break; + case NL80211_ACL_POLICY_DENY_UNLESS_LISTED: + qacl->policy = cpu_to_le32(QLINK_ACL_POLICY_DENY_UNLESS_LISTED); + break; + } + + qacl->num_entries = cpu_to_le32(acl->n_acl_entries); + memcpy(qacl->mac_addrs, acl->mac_addrs, + acl->n_acl_entries * sizeof(*qacl->mac_addrs)); +} diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h index 260383d6d109..54caeb38917c 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h @@ -69,5 +69,9 @@ void qlink_chandef_q2cfg(struct wiphy *wiphy, void qlink_chandef_cfg2q(const struct cfg80211_chan_def *chdef, struct qlink_chandef *qch); enum qlink_hidden_ssid qlink_hidden_ssid_nl2q(enum nl80211_hidden_ssid nl_val); +bool qtnf_utils_is_bit_set(const u8 *arr, unsigned int bit, + unsigned int arr_max_len); +void qlink_acl_data_cfg2q(const struct cfg80211_acl_data *acl, + struct qlink_acl_data *qacl); #endif /* _QTN_FMAC_QLINK_UTIL_H_ */ diff --git a/drivers/net/wireless/quantenna/qtnfmac/util.c b/drivers/net/wireless/quantenna/qtnfmac/util.c index ed38e87471bf..e745733ba417 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/util.c +++ b/drivers/net/wireless/quantenna/qtnfmac/util.c @@ -57,9 +57,10 @@ struct qtnf_sta_node *qtnf_sta_list_lookup_index(struct qtnf_sta_list *list, return NULL; } -struct qtnf_sta_node *qtnf_sta_list_add(struct qtnf_sta_list *list, +struct qtnf_sta_node *qtnf_sta_list_add(struct qtnf_vif *vif, const u8 *mac) { + struct qtnf_sta_list *list = &vif->sta_list; struct qtnf_sta_node *node; if (unlikely(!mac)) @@ -77,13 +78,15 @@ struct qtnf_sta_node *qtnf_sta_list_add(struct qtnf_sta_list *list, ether_addr_copy(node->mac_addr, mac); list_add_tail(&node->list, &list->head); atomic_inc(&list->size); + ++vif->generation; done: return node; } -bool qtnf_sta_list_del(struct qtnf_sta_list *list, const u8 *mac) +bool qtnf_sta_list_del(struct qtnf_vif *vif, const u8 *mac) { + struct qtnf_sta_list *list = &vif->sta_list; struct qtnf_sta_node *node; bool ret = false; @@ -93,6 +96,7 @@ bool qtnf_sta_list_del(struct qtnf_sta_list *list, const u8 *mac) list_del(&node->list); atomic_dec(&list->size); kfree(node); + ++vif->generation; ret = true; } diff --git a/drivers/net/wireless/quantenna/qtnfmac/util.h b/drivers/net/wireless/quantenna/qtnfmac/util.h index 0359eae8c24b..0d4d92b11540 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/util.h +++ b/drivers/net/wireless/quantenna/qtnfmac/util.h @@ -26,9 +26,9 @@ struct qtnf_sta_node *qtnf_sta_list_lookup(struct qtnf_sta_list *list, const u8 *mac); struct qtnf_sta_node *qtnf_sta_list_lookup_index(struct qtnf_sta_list *list, size_t index); -struct qtnf_sta_node *qtnf_sta_list_add(struct qtnf_sta_list *list, +struct qtnf_sta_node *qtnf_sta_list_add(struct qtnf_vif *vif, const u8 *mac); -bool qtnf_sta_list_del(struct qtnf_sta_list *list, const u8 *mac); +bool qtnf_sta_list_del(struct qtnf_vif *vif, const u8 *mac); void qtnf_sta_list_free(struct qtnf_sta_list *list); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index d2c289446c00..429d07b651dd 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -4903,7 +4903,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev) min_sleep = 2000; break; default: - WARN_ONCE(1, "Not supported RF chipet %x for VCO recalibration", + WARN_ONCE(1, "Not supported RF chipset %x for VCO recalibration", rt2x00dev->chip.rf); return; } diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c index ecc96312a370..a971bc7a6b63 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c @@ -142,32 +142,28 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, if (!rt2x00dev->ops->hw->set_rts_threshold && (tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS | IEEE80211_TX_RC_USE_CTS_PROTECT))) { - if (rt2x00queue_available(queue) <= 1) - goto exit_fail; + if (rt2x00queue_available(queue) <= 1) { + /* + * Recheck for full queue under lock to avoid race + * conditions with rt2x00lib_txdone(). + */ + spin_lock(&queue->tx_lock); + if (rt2x00queue_threshold(queue)) + rt2x00queue_pause_queue(queue); + spin_unlock(&queue->tx_lock); + + goto exit_free_skb; + } if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb)) - goto exit_fail; + goto exit_free_skb; } if (unlikely(rt2x00queue_write_tx_frame(queue, skb, control->sta, false))) - goto exit_fail; - - /* - * Pausing queue has to be serialized with rt2x00lib_txdone(). Note - * we should not use spin_lock_bh variant as bottom halve was already - * disabled before ieee80211_xmit() call. - */ - spin_lock(&queue->tx_lock); - if (rt2x00queue_threshold(queue)) - rt2x00queue_pause_queue(queue); - spin_unlock(&queue->tx_lock); + goto exit_free_skb; return; - exit_fail: - spin_lock(&queue->tx_lock); - rt2x00queue_pause_queue(queue); - spin_unlock(&queue->tx_lock); exit_free_skb: ieee80211_free_txskb(hw, skb); } diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c index a2c1ca5c76d1..a6884e73d2ab 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c @@ -715,6 +715,14 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, rt2x00queue_kick_tx_queue(queue, &txdesc); out: + /* + * Pausing queue has to be serialized with rt2x00lib_txdone(), so we + * do this under queue->tx_lock. Bottom halve was already disabled + * before ieee80211_xmit() call. + */ + if (rt2x00queue_threshold(queue)) + rt2x00queue_pause_queue(queue); + spin_unlock(&queue->tx_lock); return ret; } @@ -1244,10 +1252,8 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev) rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim; queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL); - if (!queue) { - rt2x00_err(rt2x00dev, "Queue allocation failed\n"); + if (!queue) return -ENOMEM; - } /* * Initialize pointers diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c index e6668ffb77e6..ff0971f1e2c8 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c @@ -688,10 +688,7 @@ static void rtl8225z2_b_rf_set_tx_power(struct ieee80211_hw *dev, int channel) cck_power = priv->channels[channel - 1].hw_value & 0xF; ofdm_power = priv->channels[channel - 1].hw_value >> 4; - if (cck_power > 15) - cck_power = (priv->hw_rev == RTL8187BvB) ? 15 : 22; - else - cck_power += (priv->hw_rev == RTL8187BvB) ? 0 : 7; + cck_power += (priv->hw_rev == RTL8187BvB) ? 0 : 7; cck_power += priv->txpwr_base & 0xF; cck_power = min(cck_power, (u8)35); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 95e3993d8a33..8828baf26e7b 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1172,7 +1172,7 @@ struct rtl8723bu_c2h { u8 basic_rate:1; u8 bt_has_reset:1; - u8 dummy4_1:1;; + u8 dummy4_1:1; u8 ignore_wlan:1; u8 auto_report:1; u8 dummy4_2:3; diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index cad2272ae21b..d6c03bd5cc65 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -244,7 +244,8 @@ static void _rtl_init_hw_vht_capab(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); - if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE || + rtlhal->hw_type == HARDWARE_TYPE_RTL8822BE) { u16 mcs_map; vht_cap->vht_supported = true; @@ -395,6 +396,7 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw) ieee80211_hw_set(hw, CONNECTION_MONITOR); ieee80211_hw_set(hw, MFP_CAPABLE); ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); + ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); /* swlps or hwlps has been set in diff chip in init_sw_vars */ if (rtlpriv->psc.swctrl_lps) { @@ -551,7 +553,8 @@ int rtl_init_core(struct ieee80211_hw *hw) /* <4> locks */ mutex_init(&rtlpriv->locks.conf_mutex); - spin_lock_init(&rtlpriv->locks.ips_lock); + mutex_init(&rtlpriv->locks.ips_mutex); + mutex_init(&rtlpriv->locks.lps_mutex); spin_lock_init(&rtlpriv->locks.irq_th_lock); spin_lock_init(&rtlpriv->locks.h2c_lock); spin_lock_init(&rtlpriv->locks.rf_ps_lock); @@ -561,9 +564,7 @@ int rtl_init_core(struct ieee80211_hw *hw) spin_lock_init(&rtlpriv->locks.c2hcmd_lock); spin_lock_init(&rtlpriv->locks.scan_list_lock); spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock); - spin_lock_init(&rtlpriv->locks.check_sendpkt_lock); spin_lock_init(&rtlpriv->locks.fw_ps_lock); - spin_lock_init(&rtlpriv->locks.lps_lock); spin_lock_init(&rtlpriv->locks.iqk_lock); /* <5> init list */ INIT_LIST_HEAD(&rtlpriv->entry_list); @@ -697,14 +698,94 @@ static void _rtl_query_protection_mode(struct ieee80211_hw *hw, } } +u8 rtl_mrate_idx_to_arfr_id(struct ieee80211_hw *hw, u8 rate_index, + enum wireless_mode wirelessmode) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 ret = 0; + + switch (rate_index) { + case RATR_INX_WIRELESS_NGB: + if (rtlphy->rf_type == RF_1T1R) + ret = RATEID_IDX_BGN_40M_1SS; + else + ret = RATEID_IDX_BGN_40M_2SS; + ; break; + case RATR_INX_WIRELESS_N: + case RATR_INX_WIRELESS_NG: + if (rtlphy->rf_type == RF_1T1R) + ret = RATEID_IDX_GN_N1SS; + else + ret = RATEID_IDX_GN_N2SS; + ; break; + case RATR_INX_WIRELESS_NB: + if (rtlphy->rf_type == RF_1T1R) + ret = RATEID_IDX_BGN_20M_1SS_BN; + else + ret = RATEID_IDX_BGN_20M_2SS_BN; + ; break; + case RATR_INX_WIRELESS_GB: + ret = RATEID_IDX_BG; + break; + case RATR_INX_WIRELESS_G: + ret = RATEID_IDX_G; + break; + case RATR_INX_WIRELESS_B: + ret = RATEID_IDX_B; + break; + case RATR_INX_WIRELESS_MC: + if (wirelessmode == WIRELESS_MODE_B || + wirelessmode == WIRELESS_MODE_G || + wirelessmode == WIRELESS_MODE_N_24G || + wirelessmode == WIRELESS_MODE_AC_24G) + ret = RATEID_IDX_BG; + else + ret = RATEID_IDX_G; + break; + case RATR_INX_WIRELESS_AC_5N: + if (rtlphy->rf_type == RF_1T1R) + ret = RATEID_IDX_VHT_1SS; + else + ret = RATEID_IDX_VHT_2SS; + break; + case RATR_INX_WIRELESS_AC_24N: + if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) { + if (rtlphy->rf_type == RF_1T1R) + ret = RATEID_IDX_VHT_1SS; + else + ret = RATEID_IDX_VHT_2SS; + } else { + if (rtlphy->rf_type == RF_1T1R) + ret = RATEID_IDX_MIX1; + else + ret = RATEID_IDX_MIX2; + } + break; + default: + ret = RATEID_IDX_BGN_40M_2SS; + break; + } + return ret; +} +EXPORT_SYMBOL(rtl_mrate_idx_to_arfr_id); + static void _rtl_txrate_selectmode(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct rtl_tcb_desc *tcb_desc) { +#define SET_RATE_ID(rate_id) \ + ({typeof(rate_id) _id = rate_id; \ + ((rtlpriv->cfg->spec_ver & RTL_SPEC_NEW_RATEID) ? \ + rtl_mrate_idx_to_arfr_id(hw, _id, \ + (sta_entry ? sta_entry->wireless_mode : \ + WIRELESS_MODE_G)) : \ + _id); }) + struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_sta_info *sta_entry = NULL; - u8 ratr_index = 7; + u8 ratr_index = SET_RATE_ID(RATR_INX_WIRELESS_MC); if (sta) { sta_entry = (struct rtl_sta_info *) sta->drv_priv; @@ -719,7 +800,8 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw, tcb_desc->hw_rate = rtlpriv->cfg->maps[RTL_RC_CCK_RATE2M]; tcb_desc->use_driver_rate = 1; - tcb_desc->ratr_index = RATR_INX_WIRELESS_MC; + tcb_desc->ratr_index = + SET_RATE_ID(RATR_INX_WIRELESS_MC); } else { tcb_desc->ratr_index = ratr_index; } @@ -735,22 +817,30 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw, mac->opmode == NL80211_IFTYPE_MESH_POINT) { tcb_desc->mac_id = 0; - if (mac->mode == WIRELESS_MODE_AC_5G) + if (sta && + (rtlpriv->cfg->spec_ver & RTL_SPEC_NEW_RATEID)) + ; /* use sta_entry->ratr_index */ + else if (mac->mode == WIRELESS_MODE_AC_5G) tcb_desc->ratr_index = - RATR_INX_WIRELESS_AC_5N; + SET_RATE_ID(RATR_INX_WIRELESS_AC_5N); else if (mac->mode == WIRELESS_MODE_AC_24G) tcb_desc->ratr_index = - RATR_INX_WIRELESS_AC_24N; + SET_RATE_ID(RATR_INX_WIRELESS_AC_24N); else if (mac->mode == WIRELESS_MODE_N_24G) - tcb_desc->ratr_index = RATR_INX_WIRELESS_NGB; + tcb_desc->ratr_index = + SET_RATE_ID(RATR_INX_WIRELESS_NGB); else if (mac->mode == WIRELESS_MODE_N_5G) - tcb_desc->ratr_index = RATR_INX_WIRELESS_NG; + tcb_desc->ratr_index = + SET_RATE_ID(RATR_INX_WIRELESS_NG); else if (mac->mode & WIRELESS_MODE_G) - tcb_desc->ratr_index = RATR_INX_WIRELESS_GB; + tcb_desc->ratr_index = + SET_RATE_ID(RATR_INX_WIRELESS_GB); else if (mac->mode & WIRELESS_MODE_B) - tcb_desc->ratr_index = RATR_INX_WIRELESS_B; + tcb_desc->ratr_index = + SET_RATE_ID(RATR_INX_WIRELESS_B); else if (mac->mode & WIRELESS_MODE_A) - tcb_desc->ratr_index = RATR_INX_WIRELESS_G; + tcb_desc->ratr_index = + SET_RATE_ID(RATR_INX_WIRELESS_G); } else if (mac->opmode == NL80211_IFTYPE_AP || mac->opmode == NL80211_IFTYPE_ADHOC) { @@ -764,6 +854,7 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw, } } } +#undef SET_RATE_ID } static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw, @@ -859,8 +950,8 @@ static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw, struct rtl_phy *rtlphy = &rtlpriv->phy; u8 hw_rate; - if ((get_rf_type(rtlphy) == RF_2T2R) && - (sta->ht_cap.mcs.rx_mask[1] != 0)) + if (get_rf_type(rtlphy) == RF_2T2R && + sta->ht_cap.mcs.rx_mask[1] != 0) hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS15]; else hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS7]; @@ -1140,9 +1231,19 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc) { +#define SET_RATE_ID(rate_id) \ + ({typeof(rate_id) _id = rate_id; \ + ((rtlpriv->cfg->spec_ver & RTL_SPEC_NEW_RATEID) ? \ + rtl_mrate_idx_to_arfr_id(hw, _id, \ + (sta_entry ? sta_entry->wireless_mode : \ + WIRELESS_MODE_G)) : \ + _id); }) + struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw)); struct ieee80211_hdr *hdr = rtl_get_hdr(skb); + struct rtl_sta_info *sta_entry = + (sta ? (struct rtl_sta_info *)sta->drv_priv : NULL); __le16 fc = rtl_get_fc(skb); @@ -1165,7 +1266,8 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw, if (info->control.rates[0].idx == 0 || ieee80211_is_nullfunc(fc)) { tcb_desc->use_driver_rate = true; - tcb_desc->ratr_index = RATR_INX_WIRELESS_MC; + tcb_desc->ratr_index = + SET_RATE_ID(RATR_INX_WIRELESS_MC); tcb_desc->disable_ratefallback = 1; } else { @@ -1180,7 +1282,7 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw, tcb_desc->hw_rate = _rtl_get_vht_highest_n_rate(hw, sta); } else { - if (sta && (sta->ht_cap.ht_supported)) { + if (sta && sta->ht_cap.ht_supported) { tcb_desc->hw_rate = _rtl_get_highest_n_rate(hw, sta); } else { @@ -1207,11 +1309,12 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw, _rtl_query_protection_mode(hw, tcb_desc, info); } else { tcb_desc->use_driver_rate = true; - tcb_desc->ratr_index = RATR_INX_WIRELESS_MC; + tcb_desc->ratr_index = SET_RATE_ID(RATR_INX_WIRELESS_MC); tcb_desc->disable_ratefallback = 1; tcb_desc->mac_id = 0; tcb_desc->packet_bw = false; } +#undef SET_RATE_ID } EXPORT_SYMBOL(rtl_get_tcb_desc); @@ -1229,7 +1332,6 @@ bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb) } if (ieee80211_is_auth(fc)) { RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n"); - rtl_ips_nic_on(hw); mac->link_state = MAC80211_LINKING; /* Dul mac */ @@ -1321,6 +1423,10 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) le16_to_cpu(mgmt->u.action.u.addba_req.capab); tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; + if (tid >= MAX_TID_COUNT) { + rcu_read_unlock(); + return true; + } tid_data = &sta_entry->tids[tid]; if (tid_data->agg.rx_agg_state == RTL_RX_AGG_START) @@ -1726,7 +1832,7 @@ int rtl_tx_agg_oper(struct ieee80211_hw *hw, void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv) { struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops; - u8 reject_agg, ctrl_agg_size = 0, agg_size; + u8 reject_agg = 0, ctrl_agg_size = 0, agg_size = 0; if (rtlpriv->cfg->ops->get_btc_status()) btc_ops->btc_get_ampdu_cfg(rtlpriv, &reject_agg, @@ -1972,9 +2078,9 @@ void rtl_watchdog_wq_callback(void *data) rtlpriv->btcoexist.btc_ops->btc_is_bt_ctrl_lps(rtlpriv)) goto label_lps_done; - if (((rtlpriv->link_info.num_rx_inperiod + - rtlpriv->link_info.num_tx_inperiod) > 8) || - (rtlpriv->link_info.num_rx_inperiod > 2)) + if (rtlpriv->link_info.num_rx_inperiod + + rtlpriv->link_info.num_tx_inperiod > 8 || + rtlpriv->link_info.num_rx_inperiod > 2) rtl_lps_leave(hw); else rtl_lps_enter(hw); @@ -2225,9 +2331,7 @@ static struct sk_buff *rtl_make_smps_action(struct ieee80211_hw *hw, case IEEE80211_SMPS_AUTOMATIC:/* 0 */ case IEEE80211_SMPS_NUM_MODES:/* 4 */ WARN_ON(1); - /* Here will get a 'MISSING_BREAK' in Coverity Test, just ignore it. - * According to Kernel Code, here is right. - */ + /* fall through */ case IEEE80211_SMPS_OFF:/* 1 */ /*MIMO_PS_NOLIMIT*/ action_frame->u.action.u.ht_smps.smps_control = WLAN_HT_SMPS_CONTROL_DISABLED;/* 0 */ @@ -2528,6 +2632,9 @@ static int __init rtl_core_module_init(void) if (rtl_rate_control_register()) pr_err("rtl: Unable to register rtl_rc, use default RC !!\n"); + /* add debugfs */ + rtl_debugfs_add_topdir(); + /* init some global vars */ INIT_LIST_HEAD(&rtl_global_var.glb_priv_list); spin_lock_init(&rtl_global_var.glb_list_lock); @@ -2539,6 +2646,9 @@ static void __exit rtl_core_module_exit(void) { /*RC*/ rtl_rate_control_unregister(); + + /* remove debugfs */ + rtl_debugfs_remove_topdir(); } module_init(rtl_core_module_init); diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h index 26735319b38f..acc924635818 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.h +++ b/drivers/net/wireless/realtek/rtlwifi/base.h @@ -162,6 +162,8 @@ void rtl_c2hcmd_wq_callback(void *data); void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec); void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, u8 tag, u8 len, u8 *val); +u8 rtl_mrate_idx_to_arfr_id(struct ieee80211_hw *hw, u8 rate_index, + enum wireless_mode wirelessmode); void rtl_get_tcb_desc(struct ieee80211_hw *hw, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c index 44c25724529e..8fce371749d3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c @@ -2704,11 +2704,11 @@ void ex_btc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist) btc8192e2ant_init_coex_dm(btcoexist); } -void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist) +void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist, + struct seq_file *m) { struct btc_board_info *board_info = &btcoexist->board_info; struct btc_stack_info *stack_info = &btcoexist->stack_info; - struct rtl_priv *rtlpriv = btcoexist->adapter; u8 u8tmp[4], i, bt_info_ext, ps_tdma_case = 0; u16 u16tmp[4]; u32 u32tmp[4]; @@ -2719,75 +2719,64 @@ void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist) u8 wifi_dot11_chnl, wifi_hs_chnl; u32 fw_ver = 0, bt_patch_ver = 0; - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n ============[BT Coexist info]============"); + seq_puts(m, "\n ============[BT Coexist info]============"); if (btcoexist->manual_control) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n ===========[Under Manual Control]==========="); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n =========================================="); - } - - if (!board_info->bt_exist) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n BT not exists !!!"); - return; + seq_puts(m, "\n ===========[Under Manual Control]==========="); + seq_puts(m, "\n =========================================="); } - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:", + seq_printf(m, "\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:", board_info->pg_ant_num, board_info->btdm_ant_num); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %d", - "BT stack/ hci ext ver", + seq_printf(m, "\n %-35s = %s / %d", "BT stack/ hci ext ver", ((stack_info->profile_notified) ? "Yes" : "No"), stack_info->hci_version); btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)", - "CoexVer/ FwVer/ PatchVer", - glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant, - fw_ver, bt_patch_ver, bt_patch_ver); + seq_printf(m, "\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)", + "CoexVer/ FwVer/ PatchVer", + glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant, + fw_ver, bt_patch_ver, bt_patch_ver); btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifi_dot11_chnl); btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d(%d)", - "Dot11 channel / HsMode(HsChnl)", - wifi_dot11_chnl, bt_hs_on, wifi_hs_chnl); + seq_printf(m, "\n %-35s = %d / %d(%d)", + "Dot11 channel / HsMode(HsChnl)", + wifi_dot11_chnl, bt_hs_on, wifi_hs_chnl); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %3ph ", - "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info); + seq_printf(m, "\n %-35s = %3ph ", + "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info); btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi); btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", - "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi); + seq_printf(m, "\n %-35s = %d/ %d", + "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ", - "Wifi link/ roam/ scan", link, roam, scan); + seq_printf(m, "\n %-35s = %d/ %d/ %d ", + "Wifi link/ roam/ scan", link, roam, scan); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifi_traffic_dir); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %s/ %s ", - "Wifi status", (wifi_under_5g ? "5G" : "2.4G"), + seq_printf(m, "\n %-35s = %s / %s/ %s ", + "Wifi status", (wifi_under_5g ? "5G" : "2.4G"), ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" : (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))), ((!wifi_busy) ? "idle" : ((BTC_WIFI_TRAFFIC_TX == wifi_traffic_dir) ? "uplink" : "downlink"))); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = [%s/ %d/ %d] ", - "BT [status/ rssi/ retryCnt]", + seq_printf(m, "\n %-35s = [%s/ %d/ %d] ", + "BT [status/ rssi/ retryCnt]", ((btcoexist->bt_info.bt_disabled) ? ("disabled") : ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") : @@ -2797,131 +2786,129 @@ void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist) coex_dm->bt_status) ? "connected-idle" : "busy")))), coex_sta->bt_rssi, coex_sta->bt_retry_cnt); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d / %d / %d", - "SCO/HID/PAN/A2DP", stack_info->sco_exist, + seq_printf(m, "\n %-35s = %d / %d / %d / %d", + "SCO/HID/PAN/A2DP", stack_info->sco_exist, stack_info->hid_exist, stack_info->pan_exist, stack_info->a2dp_exist); - btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO, m); bt_info_ext = coex_sta->bt_info_ext; - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s", - "BT Info A2DP rate", + seq_printf(m, "\n %-35s = %s", + "BT Info A2DP rate", (bt_info_ext&BIT0) ? "Basic rate" : "EDR rate"); for (i = 0; i < BT_INFO_SRC_8192E_2ANT_MAX; i++) { if (coex_sta->bt_info_c2h_cnt[i]) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %7ph(%d)", - glbt_info_src_8192e_2ant[i], - coex_sta->bt_info_c2h[i], - coex_sta->bt_info_c2h_cnt[i]); + seq_printf(m, "\n %-35s = %7ph(%d)", + glbt_info_src_8192e_2ant[i], + coex_sta->bt_info_c2h[i], + coex_sta->bt_info_c2h_cnt[i]); } } - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/%s", - "PS state, IPS/LPS", - ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")), - ((coex_sta->under_lps ? "LPS ON" : "LPS OFF"))); - btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD); + seq_printf(m, "\n %-35s = %s/%s", + "PS state, IPS/LPS", + ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")), + ((coex_sta->under_lps ? "LPS ON" : "LPS OFF"))); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD, m); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ", "SS Type", - coex_dm->cur_ss_type); + seq_printf(m, "\n %-35s = 0x%x ", "SS Type", + coex_dm->cur_ss_type); /* Sw mechanism */ - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", - "============[Sw mechanism]============"); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ", - "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink, - coex_dm->cur_low_penalty_ra, coex_dm->limited_dig); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d(0x%x) ", - "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", - coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off, - coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl); - - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ", "Rate Mask", - btcoexist->bt_info.ra_mask); + seq_printf(m, "\n %-35s", + "============[Sw mechanism]============"); + seq_printf(m, "\n %-35s = %d/ %d/ %d ", + "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink, + coex_dm->cur_low_penalty_ra, coex_dm->limited_dig); + seq_printf(m, "\n %-35s = %d/ %d/ %d(0x%x) ", + "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", + coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off, + coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl); + + seq_printf(m, "\n %-35s = 0x%x ", "Rate Mask", + btcoexist->bt_info.ra_mask); /* Fw mechanism */ - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", - "============[Fw mechanism]============"); + seq_printf(m, "\n %-35s", + "============[Fw mechanism]============"); ps_tdma_case = coex_dm->cur_ps_tdma; - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %5ph case-%d (auto:%d)", - "PS TDMA", coex_dm->ps_tdma_para, - ps_tdma_case, coex_dm->auto_tdma_adjust); + seq_printf(m, + "\n %-35s = %5ph case-%d (auto:%d)", + "PS TDMA", coex_dm->ps_tdma_para, + ps_tdma_case, coex_dm->auto_tdma_adjust); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ", - "DecBtPwr/ IgnWlanAct", - coex_dm->cur_dec_bt_pwr, coex_dm->cur_ignore_wlan_act); + seq_printf(m, "\n %-35s = %d/ %d ", + "DecBtPwr/ IgnWlanAct", + coex_dm->cur_dec_bt_pwr, coex_dm->cur_ignore_wlan_act); /* Hw setting */ - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", - "============[Hw setting]============"); + seq_printf(m, "\n %-35s", + "============[Hw setting]============"); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x", - "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup); + seq_printf(m, "\n %-35s = 0x%x", + "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", - "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1, - coex_dm->backup_arfr_cnt2, coex_dm->backup_retry_limit, - coex_dm->backup_ampdu_maxtime); + seq_printf(m, "\n %-35s = 0x%x/0x%x/0x%x/0x%x", + "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1, + coex_dm->backup_arfr_cnt2, coex_dm->backup_retry_limit, + coex_dm->backup_ampdu_maxtime); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x430); u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434); u16tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", - "0x430/0x434/0x42a/0x456", - u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]); + seq_printf(m, "\n %-35s = 0x%x/0x%x/0x%x/0x%x", + "0x430/0x434/0x42a/0x456", + u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc04); u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xd04); u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x90c); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", - "0xc04/ 0xd04/ 0x90c", u32tmp[0], u32tmp[1], u32tmp[2]); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0xc04/ 0xd04/ 0x90c", u32tmp[0], u32tmp[1], u32tmp[2]); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x", "0x778", - u8tmp[0]); + seq_printf(m, "\n %-35s = 0x%x", "0x778", u8tmp[0]); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x92c); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x930); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", - "0x92c/ 0x930", (u8tmp[0]), u32tmp[0]); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x", + "0x92c/ 0x930", (u8tmp[0]), u32tmp[0]); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40); u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x4f); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", - "0x40/ 0x4f", u8tmp[0], u8tmp[1]); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x", + "0x40/ 0x4f", u8tmp[0], u8tmp[1]); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", - "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x", + "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x", "0xc50(dig)", - u32tmp[0]); + seq_printf(m, "\n %-35s = 0x%x", "0xc50(dig)", + u32tmp[0]); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0); u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4); u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", - "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", - u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]); - - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", - "0x770(hp rx[31:16]/tx[15:0])", - coex_sta->high_priority_rx, coex_sta->high_priority_tx); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", - "0x774(lp rx[31:16]/tx[15:0])", - coex_sta->low_priority_rx, coex_sta->low_priority_tx); + seq_printf(m, + "\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", + "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", + u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]); + + seq_printf(m, "\n %-35s = %d/ %d", + "0x770(hp rx[31:16]/tx[15:0])", + coex_sta->high_priority_rx, coex_sta->high_priority_tx); + seq_printf(m, "\n %-35s = %d/ %d", + "0x774(lp rx[31:16]/tx[15:0])", + coex_sta->low_priority_rx, coex_sta->low_priority_tx); if (btcoexist->auto_report_2ant) btc8192e2ant_monitor_bt_ctr(btcoexist); - btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS, m); } void ex_btc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h index 65502acee52c..b8c95c7afc06 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h @@ -180,4 +180,5 @@ void ex_btc8192e2ant_stack_operation_notify(struct btc_coexist *btcoexist, u8 type); void ex_btc8192e2ant_halt_notify(struct btc_coexist *btcoexist); void ex_btc8192e2ant_periodical(struct btc_coexist *btcoexist); -void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist); +void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist, + struct seq_file *m); diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c index 5f726f6d3567..fd3b1fb35dff 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c @@ -2474,12 +2474,12 @@ void ex_btc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist) halbtc8723b1ant_query_bt_info(btcoexist); } -void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist) +void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist, + struct seq_file *m) { struct btc_board_info *board_info = &btcoexist->board_info; struct btc_stack_info *stack_info = &btcoexist->stack_info; struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; - struct rtl_priv *rtlpriv = btcoexist->adapter; u8 u8tmp[4], i, bt_info_ext, pstdmacase = 0; u16 u16tmp[4]; u32 u32tmp[4]; @@ -2491,62 +2491,56 @@ void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist) u8 wifi_dot11_chnl, wifi_hs_chnl; u32 fw_ver = 0, bt_patch_ver = 0; - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n ============[BT Coexist info]============"); + seq_puts(m, "\n ============[BT Coexist info]============"); if (btcoexist->manual_control) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n ============[Under Manual Control]=========="); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n =========================================="); + seq_puts(m, "\n ============[Under Manual Control]=========="); + seq_puts(m, "\n =========================================="); } if (btcoexist->stop_coex_dm) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n ============[Coex is STOPPED]============"); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n =========================================="); + seq_puts(m, "\n ============[Coex is STOPPED]============"); + seq_puts(m, "\n =========================================="); } - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d", - "Ant PG Num/ Ant Mech/ Ant Pos:", - board_info->pg_ant_num, board_info->btdm_ant_num, - board_info->btdm_ant_pos); + seq_printf(m, "\n %-35s = %d/ %d/ %d", + "Ant PG Num/ Ant Mech/ Ant Pos:", + board_info->pg_ant_num, board_info->btdm_ant_num, + board_info->btdm_ant_pos); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %d", - "BT stack/ hci ext ver", - ((stack_info->profile_notified) ? "Yes" : "No"), - stack_info->hci_version); + seq_printf(m, "\n %-35s = %s / %d", + "BT stack/ hci ext ver", + ((stack_info->profile_notified) ? "Yes" : "No"), + stack_info->hci_version); btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)", - "CoexVer/ FwVer/ PatchVer", - glcoex_ver_date_8723b_1ant, glcoex_ver_8723b_1ant, - fw_ver, bt_patch_ver, bt_patch_ver); + seq_printf(m, "\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)", + "CoexVer/ FwVer/ PatchVer", + glcoex_ver_date_8723b_1ant, glcoex_ver_8723b_1ant, + fw_ver, bt_patch_ver, bt_patch_ver); btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifi_dot11_chnl); btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d(%d)", - "Dot11 channel / HsChnl(HsMode)", - wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on); + seq_printf(m, "\n %-35s = %d / %d(%d)", + "Dot11 channel / HsChnl(HsMode)", + wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %3ph ", - "H2C Wifi inform bt chnl Info", - coex_dm->wifi_chnl_info); + seq_printf(m, "\n %-35s = %3ph ", + "H2C Wifi inform bt chnl Info", + coex_dm->wifi_chnl_info); btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi); btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", - "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi); + seq_printf(m, "\n %-35s = %d/ %d", + "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ", - "Wifi link/ roam/ scan", link, roam, scan); + seq_printf(m, "\n %-35s = %d/ %d/ %d ", + "Wifi link/ roam/ scan", link, roam, scan); btcoexist->btc_get(btcoexist , BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); @@ -2555,110 +2549,106 @@ void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist) btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifi_traffic_dir); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %s/ %s ", - "Wifi status", (wifi_under_5g ? "5G" : "2.4G"), - ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" : - ((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20")), - ((!wifi_busy) ? "idle" : - ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ? - "uplink" : "downlink"))); + seq_printf(m, "\n %-35s = %s / %s/ %s ", + "Wifi status", (wifi_under_5g ? "5G" : "2.4G"), + ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" : + ((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20")), + ((!wifi_busy) ? "idle" : + ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ? + "uplink" : "downlink"))); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS, &wifi_link_status); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d/ %d/ %d", - "sta/vwifi/hs/p2pGo/p2pGc", - ((wifi_link_status & WIFI_STA_CONNECTED) ? 1 : 0), - ((wifi_link_status & WIFI_AP_CONNECTED) ? 1 : 0), - ((wifi_link_status & WIFI_HS_CONNECTED) ? 1 : 0), - ((wifi_link_status & WIFI_P2P_GO_CONNECTED) ? 1 : 0), - ((wifi_link_status & WIFI_P2P_GC_CONNECTED) ? 1 : 0)); - - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = [%s/ %d/ %d] ", - "BT [status/ rssi/ retryCnt]", - ((coex_sta->bt_disabled) ? ("disabled") : - ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") : - ((BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == - coex_dm->bt_status) ? - "non-connected idle" : - ((BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == - coex_dm->bt_status) ? - "connected-idle" : "busy")))), - coex_sta->bt_rssi, coex_sta->bt_retry_cnt); - - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d / %d / %d / %d", - "SCO/HID/PAN/A2DP", bt_link_info->sco_exist, - bt_link_info->hid_exist, bt_link_info->pan_exist, - bt_link_info->a2dp_exist); - btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO); + seq_printf(m, "\n %-35s = %d/ %d/ %d/ %d/ %d", + "sta/vwifi/hs/p2pGo/p2pGc", + ((wifi_link_status & WIFI_STA_CONNECTED) ? 1 : 0), + ((wifi_link_status & WIFI_AP_CONNECTED) ? 1 : 0), + ((wifi_link_status & WIFI_HS_CONNECTED) ? 1 : 0), + ((wifi_link_status & WIFI_P2P_GO_CONNECTED) ? 1 : 0), + ((wifi_link_status & WIFI_P2P_GC_CONNECTED) ? 1 : 0)); + + seq_printf(m, "\n %-35s = [%s/ %d/ %d] ", + "BT [status/ rssi/ retryCnt]", + ((coex_sta->bt_disabled) ? ("disabled") : + ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") : + ((BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == + coex_dm->bt_status) ? + "non-connected idle" : + ((BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == + coex_dm->bt_status) ? + "connected-idle" : "busy")))), + coex_sta->bt_rssi, coex_sta->bt_retry_cnt); + + seq_printf(m, "\n %-35s = %d / %d / %d / %d", + "SCO/HID/PAN/A2DP", bt_link_info->sco_exist, + bt_link_info->hid_exist, bt_link_info->pan_exist, + bt_link_info->a2dp_exist); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO, m); bt_info_ext = coex_sta->bt_info_ext; - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s", - "BT Info A2DP rate", - (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate"); + seq_printf(m, "\n %-35s = %s", + "BT Info A2DP rate", + (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate"); for (i = 0; i < BT_INFO_SRC_8723B_1ANT_MAX; i++) { if (coex_sta->bt_info_c2h_cnt[i]) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %7ph(%d)", - glbt_info_src_8723b_1ant[i], - coex_sta->bt_info_c2h[i], - coex_sta->bt_info_c2h_cnt[i]); + seq_printf(m, "\n %-35s = %7ph(%d)", + glbt_info_src_8723b_1ant[i], + coex_sta->bt_info_c2h[i], + coex_sta->bt_info_c2h_cnt[i]); } } - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %s/%s, (0x%x/0x%x)", - "PS state, IPS/LPS, (lps/rpwm)", - ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")), - ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")), - btcoexist->bt_info.lps_val, - btcoexist->bt_info.rpwm_val); - btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD); + seq_printf(m, "\n %-35s = %s/%s, (0x%x/0x%x)", + "PS state, IPS/LPS, (lps/rpwm)", + ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")), + ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")), + btcoexist->bt_info.lps_val, + btcoexist->bt_info.rpwm_val); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD, m); if (!btcoexist->manual_control) { /* Sw mechanism */ - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", - "============[Sw mechanism]============"); + seq_printf(m, "\n %-35s", + "============[Sw mechanism]============"); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/", - "SM[LowPenaltyRA]", coex_dm->cur_low_penalty_ra); + seq_printf(m, "\n %-35s = %d/", + "SM[LowPenaltyRA]", coex_dm->cur_low_penalty_ra); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/ %s/ %d ", - "DelBA/ BtCtrlAgg/ AggSize", + seq_printf(m, "\n %-35s = %s/ %s/ %d ", + "DelBA/ BtCtrlAgg/ AggSize", (btcoexist->bt_info.reject_agg_pkt ? "Yes" : "No"), (btcoexist->bt_info.bt_ctrl_buf_size ? "Yes" : "No"), btcoexist->bt_info.agg_buf_size); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ", - "Rate Mask", btcoexist->bt_info.ra_mask); + seq_printf(m, "\n %-35s = 0x%x ", + "Rate Mask", btcoexist->bt_info.ra_mask); /* Fw mechanism */ - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", - "============[Fw mechanism]============"); + seq_printf(m, "\n %-35s", + "============[Fw mechanism]============"); pstdmacase = coex_dm->cur_ps_tdma; - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %5ph case-%d (auto:%d)", + seq_printf(m, "\n %-35s = %5ph case-%d (auto:%d)", "PS TDMA", coex_dm->ps_tdma_para, pstdmacase, coex_dm->auto_tdma_adjust); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d ", - "IgnWlanAct", coex_dm->cur_ignore_wlan_act); + seq_printf(m, "\n %-35s = %d ", + "IgnWlanAct", coex_dm->cur_ignore_wlan_act); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ", - "Latest error condition(should be 0)", + seq_printf(m, "\n %-35s = 0x%x ", + "Latest error condition(should be 0)", coex_dm->error_condition); } - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d", - "Coex Table Type", coex_sta->coex_table_type); + seq_printf(m, "\n %-35s = %d", + "Coex Table Type", coex_sta->coex_table_type); /* Hw setting */ - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", - "============[Hw setting]============"); + seq_printf(m, "\n %-35s", + "============[Hw setting]============"); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", - "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1, + seq_printf(m, "\n %-35s = 0x%x/0x%x/0x%x/0x%x", + "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1, coex_dm->backup_arfr_cnt2, coex_dm->backup_retry_limit, coex_dm->backup_ampdu_max_time); @@ -2666,50 +2656,49 @@ void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist) u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434); u16tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", - "0x430/0x434/0x42a/0x456", - u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]); + seq_printf(m, "\n %-35s = 0x%x/0x%x/0x%x/0x%x", + "0x430/0x434/0x42a/0x456", + u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6cc); u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x880); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", - "0x778/0x6cc/0x880[29:25]", u8tmp[0], u32tmp[0], - (u32tmp[1] & 0x3e000000) >> 25); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0x778/0x6cc/0x880[29:25]", u8tmp[0], u32tmp[0], + (u32tmp[1] & 0x3e000000) >> 25); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67); u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", - "0x948/ 0x67[5] / 0x765", - u32tmp[0], ((u8tmp[0] & 0x20) >> 5), u8tmp[1]); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0x948/ 0x67[5] / 0x765", + u32tmp[0], ((u8tmp[0] & 0x20) >> 5), u8tmp[1]); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c); u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930); u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", - "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]", - u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]", + u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39); u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c); u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", - "0x38[11]/0x40/0x4c[24:23]/0x64[0]", - ((u8tmp[0] & 0x8) >> 3), u8tmp[1], - ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", + "0x38[11]/0x40/0x4c[24:23]/0x64[0]", + ((u8tmp[0] & 0x8) >> 3), u8tmp[1], + ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", - "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x", + "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", - "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x", + "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0); u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4); @@ -2727,26 +2716,26 @@ void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist) (u32tmp[3] & 0xffff); fa_cck = (u8tmp[0] << 8) + u8tmp[1]; - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", - "OFDM-CCA/OFDM-FA/CCK-FA", + seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x", + "OFDM-CCA/OFDM-FA/CCK-FA", u32tmp[0] & 0xffff, fa_ofdm, fa_cck); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0); u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4); u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", - "0x6c0/0x6c4/0x6c8(coexTable)", - u32tmp[0], u32tmp[1], u32tmp[2]); - - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", - "0x770(high-pri rx/tx)", coex_sta->high_priority_rx, - coex_sta->high_priority_tx); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", - "0x774(low-pri rx/tx)", coex_sta->low_priority_rx, - coex_sta->low_priority_tx); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0x6c0/0x6c4/0x6c8(coexTable)", + u32tmp[0], u32tmp[1], u32tmp[2]); + + seq_printf(m, "\n %-35s = %d/ %d", + "0x770(high-pri rx/tx)", coex_sta->high_priority_rx, + coex_sta->high_priority_tx); + seq_printf(m, "\n %-35s = %d/ %d", + "0x774(low-pri rx/tx)", coex_sta->low_priority_rx, + coex_sta->low_priority_tx); if (btcoexist->auto_report_1ant) halbtc8723b1ant_monitor_bt_ctr(btcoexist); - btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS, m); } void ex_btc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h index 8d4fde235e11..934f27893c16 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h @@ -220,5 +220,6 @@ void ex_btc8723b1ant_halt_notify(struct btc_coexist *btcoexist); void ex_btc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnpstate); void ex_btc8723b1ant_coex_dm_reset(struct btc_coexist *btcoexist); void ex_btc8723b1ant_periodical(struct btc_coexist *btcoexist); -void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist); +void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist, + struct seq_file *m); void ex_btc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state); diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c index e8f07573aed9..4907c2ffadfe 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c @@ -3780,12 +3780,12 @@ void ex_btc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist) btc8723b2ant_init_coex_dm(btcoexist); } -void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist) +void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist, + struct seq_file *m) { struct btc_board_info *board_info = &btcoexist->board_info; struct btc_stack_info *stack_info = &btcoexist->stack_info; struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; - struct rtl_priv *rtlpriv = btcoexist->adapter; u8 u8tmp[4], i, bt_info_ext, ps_tdma_case = 0; u32 u32tmp[4]; bool roam = false, scan = false; @@ -3797,173 +3797,161 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist) u32 fw_ver = 0, bt_patch_ver = 0; u8 ap_num = 0; - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n ============[BT Coexist info]============"); + seq_puts(m, "\n ============[BT Coexist info]============"); if (btcoexist->manual_control) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n ==========[Under Manual Control]============"); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n =========================================="); - } - - if (!board_info->bt_exist) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n BT not exists !!!"); - return; + seq_puts(m, "\n ==========[Under Manual Control]============"); + seq_puts(m, "\n =========================================="); } - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ", - "Ant PG number/ Ant mechanism:", - board_info->pg_ant_num, board_info->btdm_ant_num); + seq_printf(m, "\n %-35s = %d/ %d ", + "Ant PG number/ Ant mechanism:", + board_info->pg_ant_num, board_info->btdm_ant_num); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %d", - "BT stack/ hci ext ver", - ((stack_info->profile_notified) ? "Yes" : "No"), - stack_info->hci_version); + seq_printf(m, "\n %-35s = %s / %d", + "BT stack/ hci ext ver", + ((stack_info->profile_notified) ? "Yes" : "No"), + stack_info->hci_version); btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)", - "CoexVer/ FwVer/ PatchVer", - glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant, - fw_ver, bt_patch_ver, bt_patch_ver); + seq_printf(m, "\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)", + "CoexVer/ FwVer/ PatchVer", + glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant, + fw_ver, bt_patch_ver, bt_patch_ver); btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifi_dot11_chnl); btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d(%d)", - "Dot11 channel / HsChnl(HsMode)", - wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on); + seq_printf(m, "\n %-35s = %d / %d(%d)", + "Dot11 channel / HsChnl(HsMode)", + wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %3ph ", - "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info); + seq_printf(m, "\n %-35s = %3ph ", + "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info); btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi); btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi); btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d", - "Wifi rssi/ HS rssi/ AP#", wifi_rssi, bt_hs_rssi, ap_num); + seq_printf(m, "\n %-35s = %d/ %d/ %d", + "Wifi rssi/ HS rssi/ AP#", wifi_rssi, bt_hs_rssi, ap_num); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ", - "Wifi link/ roam/ scan", link, roam, scan); + seq_printf(m, "\n %-35s = %d/ %d/ %d ", + "Wifi link/ roam/ scan", link, roam, scan); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifi_traffic_dir); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %s/ %s ", - "Wifi status", (wifi_under_5g ? "5G" : "2.4G"), + seq_printf(m, "\n %-35s = %s / %s/ %s ", + "Wifi status", (wifi_under_5g ? "5G" : "2.4G"), ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" : (((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20"))), ((!wifi_busy) ? "idle" : ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ? "uplink" : "downlink"))); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d / %d / %d", - "SCO/HID/PAN/A2DP", - bt_link_info->sco_exist, bt_link_info->hid_exist, - bt_link_info->pan_exist, bt_link_info->a2dp_exist); - btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO); + seq_printf(m, "\n %-35s = %d / %d / %d / %d", + "SCO/HID/PAN/A2DP", + bt_link_info->sco_exist, bt_link_info->hid_exist, + bt_link_info->pan_exist, bt_link_info->a2dp_exist); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO, m); bt_info_ext = coex_sta->bt_info_ext; - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s", - "BT Info A2DP rate", - (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate"); + seq_printf(m, "\n %-35s = %s", + "BT Info A2DP rate", + (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate"); for (i = 0; i < BT_INFO_SRC_8723B_2ANT_MAX; i++) { if (coex_sta->bt_info_c2h_cnt[i]) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %7ph(%d)", - glbt_info_src_8723b_2ant[i], - coex_sta->bt_info_c2h[i], - coex_sta->bt_info_c2h_cnt[i]); + seq_printf(m, "\n %-35s = %7ph(%d)", + glbt_info_src_8723b_2ant[i], + coex_sta->bt_info_c2h[i], + coex_sta->bt_info_c2h_cnt[i]); } } - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/%s", - "PS state, IPS/LPS", - ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")), - ((coex_sta->under_lps ? "LPS ON" : "LPS OFF"))); - btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD); + seq_printf(m, "\n %-35s = %s/%s", + "PS state, IPS/LPS", + ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")), + ((coex_sta->under_lps ? "LPS ON" : "LPS OFF"))); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD, m); /* Sw mechanism */ - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s", "============[Sw mechanism]============"); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ", - "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink, - coex_dm->cur_low_penalty_ra, coex_dm->limited_dig); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d(0x%x) ", - "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", - coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off, - coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl); + seq_printf(m, + "\n %-35s", "============[Sw mechanism]============"); + seq_printf(m, "\n %-35s = %d/ %d/ %d ", + "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink, + coex_dm->cur_low_penalty_ra, coex_dm->limited_dig); + seq_printf(m, "\n %-35s = %d/ %d/ %d(0x%x) ", + "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", + coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off, + coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl); /* Fw mechanism */ - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", - "============[Fw mechanism]============"); + seq_printf(m, "\n %-35s", + "============[Fw mechanism]============"); ps_tdma_case = coex_dm->cur_ps_tdma; - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %5ph case-%d (auto:%d)", - "PS TDMA", coex_dm->ps_tdma_para, - ps_tdma_case, coex_dm->auto_tdma_adjust); + seq_printf(m, "\n %-35s = %5ph case-%d (auto:%d)", + "PS TDMA", coex_dm->ps_tdma_para, + ps_tdma_case, coex_dm->auto_tdma_adjust); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ", - "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr_lvl, - coex_dm->cur_ignore_wlan_act); + seq_printf(m, "\n %-35s = %d/ %d ", + "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr_lvl, + coex_dm->cur_ignore_wlan_act); /* Hw setting */ - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", - "============[Hw setting]============"); + seq_printf(m, "\n %-35s", + "============[Hw setting]============"); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x", - "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup); + seq_printf(m, "\n %-35s = 0x%x", + "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x880); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", - "0x778/0x880[29:25]", u8tmp[0], - (u32tmp[0] & 0x3e000000) >> 25); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x", + "0x778/0x880[29:25]", u8tmp[0], + (u32tmp[0] & 0x3e000000) >> 25); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67); u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", - "0x948/ 0x67[5] / 0x765", - u32tmp[0], ((u8tmp[0] & 0x20) >> 5), u8tmp[1]); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0x948/ 0x67[5] / 0x765", + u32tmp[0], ((u8tmp[0] & 0x20) >> 5), u8tmp[1]); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c); u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930); u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", - "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]", - u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]", + u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39); u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c); u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", - "0x38[11]/0x40/0x4c[24:23]/0x64[0]", - ((u8tmp[0] & 0x8) >> 3), u8tmp[1], - ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", + "0x38[11]/0x40/0x4c[24:23]/0x64[0]", + ((u8tmp[0] & 0x8) >> 3), u8tmp[1], + ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", - "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x", + "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", - "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x", + "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0); u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4); @@ -3981,29 +3969,27 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist) (u32tmp[3] & 0xffff); fa_cck = (u8tmp[0] << 8) + u8tmp[1]; - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", - "OFDM-CCA/OFDM-FA/CCK-FA", - u32tmp[0] & 0xffff, fa_ofdm, fa_cck); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x", + "OFDM-CCA/OFDM-FA/CCK-FA", + u32tmp[0] & 0xffff, fa_ofdm, fa_cck); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0); u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4); u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", - "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", - u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]); - - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", - "0x770(high-pri rx/tx)", - coex_sta->high_priority_rx, coex_sta->high_priority_tx); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", - "0x774(low-pri rx/tx)", coex_sta->low_priority_rx, - coex_sta->low_priority_tx); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", + "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", + u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]); + + seq_printf(m, "\n %-35s = %d/ %d", + "0x770(high-pri rx/tx)", + coex_sta->high_priority_rx, coex_sta->high_priority_tx); + seq_printf(m, "\n %-35s = %d/ %d", + "0x774(low-pri rx/tx)", coex_sta->low_priority_rx, + coex_sta->low_priority_tx); if (btcoexist->auto_report_2ant) btc8723b2ant_monitor_bt_ctr(btcoexist); - btcoexist->btc_disp_dbg_msg(btcoexist, - BTC_DBG_DISP_COEX_STATISTICS); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS, m); } void ex_btc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h index bc1e3042e271..aa24da402fb9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h @@ -195,7 +195,8 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, u8 *tmpbuf, u8 length); void ex_btc8723b2ant_halt_notify(struct btc_coexist *btcoexist); void ex_btc8723b2ant_periodical(struct btc_coexist *btcoexist); -void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist); +void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist, + struct seq_file *m); void ex_btc8723b2ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state); void ex_btc8723b2ant_pre_load_firmware(struct btc_coexist *btcoexist); void ex_btc8723b2ant_power_on_setting(struct btc_coexist *btcoexist); diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c index 4efac5fe9982..0b26419881c0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c @@ -2172,12 +2172,12 @@ void ex_btc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist) btc8821a1ant_query_bt_info(btcoexist); } -void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist) +void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist, + struct seq_file *m) { struct btc_board_info *board_info = &btcoexist->board_info; struct btc_stack_info *stack_info = &btcoexist->stack_info; struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; - struct rtl_priv *rtlpriv = btcoexist->adapter; u8 u1_tmp[4], i, bt_info_ext, ps_tdma_case = 0; u16 u2_tmp[4]; u32 u4_tmp[4]; @@ -2188,49 +2188,36 @@ void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist) u8 wifi_dot11_chnl, wifi_hs_chnl; u32 fw_ver = 0, bt_patch_ver = 0; - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n ============[BT Coexist info]============"); + seq_puts(m, "\n ============[BT Coexist info]============"); if (btcoexist->manual_control) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n ============[Under Manual Control]============"); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n =========================================="); + seq_puts(m, "\n ============[Under Manual Control]============"); + seq_puts(m, "\n =========================================="); } if (btcoexist->stop_coex_dm) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n ============[Coex is STOPPED]============"); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n =========================================="); - } - - if (!board_info->bt_exist) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n BT not exists !!!"); - return; + seq_puts(m, "\n ============[Coex is STOPPED]============"); + seq_puts(m, "\n =========================================="); } - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d/ %d/ %d", - "Ant PG Num/ Ant Mech/ Ant Pos:", - board_info->pg_ant_num, - board_info->btdm_ant_num, - board_info->btdm_ant_pos); + seq_printf(m, "\n %-35s = %d/ %d/ %d", + "Ant PG Num/ Ant Mech/ Ant Pos:", + board_info->pg_ant_num, + board_info->btdm_ant_num, + board_info->btdm_ant_pos); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %s / %d", "BT stack/ hci ext ver", - ((stack_info->profile_notified) ? "Yes" : "No"), - stack_info->hci_version); + seq_printf(m, "\n %-35s = %s / %d", "BT stack/ hci ext ver", + ((stack_info->profile_notified) ? "Yes" : "No"), + stack_info->hci_version); btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)", - "CoexVer/ FwVer/ PatchVer", - glcoex_ver_date_8821a_1ant, - glcoex_ver_8821a_1ant, - fw_ver, bt_patch_ver, - bt_patch_ver); + seq_printf(m, "\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)", + "CoexVer/ FwVer/ PatchVer", + glcoex_ver_date_8821a_1ant, + glcoex_ver_8821a_1ant, + fw_ver, bt_patch_ver, + bt_patch_ver); btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); @@ -2238,28 +2225,24 @@ void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist) &wifi_dot11_chnl); btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d / %d(%d)", - "Dot11 channel / HsChnl(HsMode)", - wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on); + seq_printf(m, "\n %-35s = %d / %d(%d)", + "Dot11 channel / HsChnl(HsMode)", + wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %3ph ", - "H2C Wifi inform bt chnl Info", - coex_dm->wifi_chnl_info); + seq_printf(m, "\n %-35s = %3ph ", + "H2C Wifi inform bt chnl Info", + coex_dm->wifi_chnl_info); btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi); btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi", - (int)wifi_rssi, (int)bt_hs_rssi); + seq_printf(m, "\n %-35s = %d/ %d", "Wifi rssi/ HS rssi", + (int)wifi_rssi, (int)bt_hs_rssi); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan", - link, roam, scan); + seq_printf(m, "\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan", + link, roam, scan); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); @@ -2269,16 +2252,15 @@ void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist) &wifi_busy); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifi_traffic_dir); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %s / %s/ %s ", "Wifi status", - (wifi_under_5g ? "5G" : "2.4G"), - ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" : - (((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20"))), - ((!wifi_busy) ? "idle" : - ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ? - "uplink" : "downlink"))); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]", + seq_printf(m, "\n %-35s = %s / %s/ %s ", "Wifi status", + (wifi_under_5g ? "5G" : "2.4G"), + ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" : + (((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20"))), + ((!wifi_busy) ? "idle" : + ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ? + "uplink" : "downlink"))); + seq_printf(m, "\n %-35s = [%s/ %d/ %d] ", + "BT [status/ rssi/ retryCnt]", ((coex_sta->bt_disabled) ? ("disabled") : ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") : ((BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == @@ -2289,166 +2271,143 @@ void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist) "connected-idle" : "busy")))), coex_sta->bt_rssi, coex_sta->bt_retry_cnt); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP", - bt_link_info->sco_exist, - bt_link_info->hid_exist, - bt_link_info->pan_exist, - bt_link_info->a2dp_exist); - btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO); + seq_printf(m, "\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP", + bt_link_info->sco_exist, + bt_link_info->hid_exist, + bt_link_info->pan_exist, + bt_link_info->a2dp_exist); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO, m); bt_info_ext = coex_sta->bt_info_ext; - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %s", - "BT Info A2DP rate", - (bt_info_ext & BIT0) ? - "Basic rate" : "EDR rate"); + seq_printf(m, "\n %-35s = %s", + "BT Info A2DP rate", + (bt_info_ext & BIT0) ? + "Basic rate" : "EDR rate"); for (i = 0; i < BT_INFO_SRC_8821A_1ANT_MAX; i++) { if (coex_sta->bt_info_c2h_cnt[i]) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %7ph(%d)", - glbt_info_src_8821a_1ant[i], - coex_sta->bt_info_c2h[i], - coex_sta->bt_info_c2h_cnt[i]); + seq_printf(m, "\n %-35s = %7ph(%d)", + glbt_info_src_8821a_1ant[i], + coex_sta->bt_info_c2h[i], + coex_sta->bt_info_c2h_cnt[i]); } } - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %s/%s, (0x%x/0x%x)", - "PS state, IPS/LPS, (lps/rpwm)", - ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")), - ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")), - btcoexist->bt_info.lps_val, - btcoexist->bt_info.rpwm_val); - btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD); + seq_printf(m, "\n %-35s = %s/%s, (0x%x/0x%x)", + "PS state, IPS/LPS, (lps/rpwm)", + ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")), + ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")), + btcoexist->bt_info.lps_val, + btcoexist->bt_info.rpwm_val); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD, m); if (!btcoexist->manual_control) { /* Sw mechanism*/ - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s", - "============[Sw mechanism]============"); - - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d", "SM[LowPenaltyRA]", - coex_dm->cur_low_penalty_ra); - - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %s/ %s/ %d ", - "DelBA/ BtCtrlAgg/ AggSize", - (btcoexist->bt_info.reject_agg_pkt ? "Yes" : "No"), - (btcoexist->bt_info.bt_ctrl_buf_size ? "Yes" : "No"), - btcoexist->bt_info.agg_buf_size); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x ", "Rate Mask", - btcoexist->bt_info.ra_mask); + seq_printf(m, "\n %-35s", + "============[Sw mechanism]============"); + + seq_printf(m, "\n %-35s = %d", "SM[LowPenaltyRA]", + coex_dm->cur_low_penalty_ra); + + seq_printf(m, "\n %-35s = %s/ %s/ %d ", + "DelBA/ BtCtrlAgg/ AggSize", + (btcoexist->bt_info.reject_agg_pkt ? "Yes" : "No"), + (btcoexist->bt_info.bt_ctrl_buf_size ? "Yes" : "No"), + btcoexist->bt_info.agg_buf_size); + seq_printf(m, "\n %-35s = 0x%x ", "Rate Mask", + btcoexist->bt_info.ra_mask); /* Fw mechanism */ - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", - "============[Fw mechanism]============"); + seq_printf(m, "\n %-35s", + "============[Fw mechanism]============"); ps_tdma_case = coex_dm->cur_ps_tdma; - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %5ph case-%d (auto:%d)", - "PS TDMA", - coex_dm->ps_tdma_para, - ps_tdma_case, - coex_dm->auto_tdma_adjust); - - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x ", - "Latest error condition(should be 0)", + seq_printf(m, "\n %-35s = %5ph case-%d (auto:%d)", + "PS TDMA", + coex_dm->ps_tdma_para, + ps_tdma_case, + coex_dm->auto_tdma_adjust); + + seq_printf(m, "\n %-35s = 0x%x ", + "Latest error condition(should be 0)", coex_dm->error_condition); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d ", "IgnWlanAct", - coex_dm->cur_ignore_wlan_act); + seq_printf(m, "\n %-35s = %d ", "IgnWlanAct", + coex_dm->cur_ignore_wlan_act); } /* Hw setting */ - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s", "============[Hw setting]============"); + seq_printf(m, "\n %-35s", "============[Hw setting]============"); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", - "backup ARFR1/ARFR2/RL/AMaxTime", - coex_dm->backup_arfr_cnt1, - coex_dm->backup_arfr_cnt2, - coex_dm->backup_retry_limit, - coex_dm->backup_ampdu_max_time); + seq_printf(m, "\n %-35s = 0x%x/0x%x/0x%x/0x%x", + "backup ARFR1/ARFR2/RL/AMaxTime", + coex_dm->backup_arfr_cnt1, + coex_dm->backup_arfr_cnt2, + coex_dm->backup_retry_limit, + coex_dm->backup_ampdu_max_time); u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x430); u4_tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434); u2_tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a); u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", - "0x430/0x434/0x42a/0x456", - u4_tmp[0], u4_tmp[1], u2_tmp[0], u1_tmp[0]); + seq_printf(m, "\n %-35s = 0x%x/0x%x/0x%x/0x%x", + "0x430/0x434/0x42a/0x456", + u4_tmp[0], u4_tmp[1], u2_tmp[0], u1_tmp[0]); u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778); u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc58); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x/ 0x%x", "0x778/ 0xc58[29:25]", - u1_tmp[0], (u4_tmp[0] & 0x3e000000) >> 25); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x", "0x778/ 0xc58[29:25]", + u1_tmp[0], (u4_tmp[0] & 0x3e000000) >> 25); u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x8db); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x", "0x8db[6:5]", - ((u1_tmp[0] & 0x60) >> 5)); + seq_printf(m, "\n %-35s = 0x%x", "0x8db[6:5]", + ((u1_tmp[0] & 0x60) >> 5)); u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x975); u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xcb4); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", - "0xcb4[29:28]/0xcb4[7:0]/0x974[9:8]", - (u4_tmp[0] & 0x30000000) >> 28, - u4_tmp[0] & 0xff, - u1_tmp[0] & 0x3); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0xcb4[29:28]/0xcb4[7:0]/0x974[9:8]", + (u4_tmp[0] & 0x30000000) >> 28, + u4_tmp[0] & 0xff, + u1_tmp[0] & 0x3); u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40); u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c); u1_tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x64); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", - "0x40/0x4c[24:23]/0x64[0]", - u1_tmp[0], ((u4_tmp[0] & 0x01800000) >> 23), u1_tmp[1] & 0x1); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0x40/0x4c[24:23]/0x64[0]", + u1_tmp[0], ((u4_tmp[0] & 0x01800000) >> 23), + u1_tmp[1] & 0x1); u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550); u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522", - u4_tmp[0], u1_tmp[0]); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522", + u4_tmp[0], u1_tmp[0]); u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x", "0xc50(dig)", - u4_tmp[0] & 0xff); + seq_printf(m, "\n %-35s = 0x%x", "0xc50(dig)", + u4_tmp[0] & 0xff); u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xf48); u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5d); u1_tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x/ 0x%x", "OFDM-FA/ CCK-FA", - u4_tmp[0], (u1_tmp[0] << 8) + u1_tmp[1]); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x", "OFDM-FA/ CCK-FA", + u4_tmp[0], (u1_tmp[0] << 8) + u1_tmp[1]); u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0); u4_tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4); u4_tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8); u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", - "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", + seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", + "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", u4_tmp[0], u4_tmp[1], u4_tmp[2], u1_tmp[0]); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d/ %d", "0x770(high-pri rx/tx)", - coex_sta->high_priority_rx, coex_sta->high_priority_tx); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d/ %d", "0x774(low-pri rx/tx)", - coex_sta->low_priority_rx, coex_sta->low_priority_tx); + seq_printf(m, "\n %-35s = %d/ %d", "0x770(high-pri rx/tx)", + coex_sta->high_priority_rx, coex_sta->high_priority_tx); + seq_printf(m, "\n %-35s = %d/ %d", "0x774(low-pri rx/tx)", + coex_sta->low_priority_rx, coex_sta->low_priority_tx); if (btcoexist->auto_report_1ant) btc8821a1ant_monitor_bt_ctr(btcoexist); - btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS, m); } void ex_btc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h index b0a6626fbb66..a498ff5b1b9c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h @@ -186,7 +186,8 @@ void ex_btc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist, void ex_btc8821a1ant_halt_notify(struct btc_coexist *btcoexist); void ex_btc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnpstate); void ex_btc8821a1ant_periodical(struct btc_coexist *btcoexist); -void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist); +void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist, + struct seq_file *m); void ex_btc8821a1ant_dbg_control(struct btc_coexist *btcoexist, u8 op_code, u8 op_len, u8 *data); void ex_btc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state); diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c index 41943c34edff..d5f282cb9d24 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c @@ -3657,11 +3657,11 @@ void ex_btc8821a2ant_init_coex_dm(struct btc_coexist *btcoexist) btc8821a2ant_init_coex_dm(btcoexist); } -void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist) +void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist, + struct seq_file *m) { struct btc_board_info *board_info = &btcoexist->board_info; struct btc_stack_info *stack_info = &btcoexist->stack_info; - struct rtl_priv *rtlpriv = btcoexist->adapter; u8 u1tmp[4], i, bt_info_ext, ps_tdma_case = 0; u32 u4tmp[4]; bool roam = false, scan = false, link = false, wifi_under_5g = false; @@ -3671,32 +3671,22 @@ void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist) u8 wifi_dot_11_chnl, wifi_hs_chnl; u32 fw_ver = 0, bt_patch_ver = 0; - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n ============[BT Coexist info]============"); - - if (!board_info->bt_exist) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n BT not exists !!!"); - return; - } + seq_puts(m, "\n ============[BT Coexist info]============"); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:", - board_info->pg_ant_num, board_info->btdm_ant_num); + seq_printf(m, "\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:", + board_info->pg_ant_num, board_info->btdm_ant_num); if (btcoexist->manual_control) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s", "[Action Manual control]!!"); + seq_printf(m, "\n %-35s", "[Action Manual control]!!"); } - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %s / %d", "BT stack/ hci ext ver", + seq_printf(m, "\n %-35s = %s / %d", "BT stack/ hci ext ver", ((stack_info->profile_notified) ? "Yes" : "No"), stack_info->hci_version); btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)", + seq_printf(m, "\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)", "CoexVer/ FwVer/ PatchVer", glcoex_ver_date_8821a_2ant, glcoex_ver_8821a_2ant, fw_ver, bt_patch_ver, bt_patch_ver); @@ -3707,27 +3697,23 @@ void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist) BTC_GET_U1_WIFI_DOT11_CHNL, &wifi_dot_11_chnl); btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d / %d(%d)", + seq_printf(m, "\n %-35s = %d / %d(%d)", "Dot11 channel / HsMode(HsChnl)", wifi_dot_11_chnl, bt_hs_on, wifi_hs_chnl); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %3ph ", + seq_printf(m, "\n %-35s = %3ph ", "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info); btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi); btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %ld/ %ld", "Wifi rssi/ HS rssi", + seq_printf(m, "\n %-35s = %ld/ %ld", "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan", + seq_printf(m, "\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan", link, roam, scan); btcoexist->btc_get(btcoexist, @@ -3738,8 +3724,7 @@ void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist) BTC_GET_BL_WIFI_BUSY, &wifi_busy); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifi_traffic_dir); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %s / %s/ %s ", "Wifi status", + seq_printf(m, "\n %-35s = %s / %s/ %s ", "Wifi status", (wifi_under_5g ? "5G" : "2.4G"), ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" : (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))), @@ -3748,134 +3733,128 @@ void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist) "uplink" : "downlink"))); if (stack_info->profile_notified) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP", + seq_printf(m, "\n %-35s = %d / %d / %d / %d", + "SCO/HID/PAN/A2DP", stack_info->sco_exist, stack_info->hid_exist, stack_info->pan_exist, stack_info->a2dp_exist); btcoexist->btc_disp_dbg_msg(btcoexist, - BTC_DBG_DISP_BT_LINK_INFO); + BTC_DBG_DISP_BT_LINK_INFO, + m); } bt_info_ext = coex_sta->bt_info_ext; - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s", - "BT Info A2DP rate", + seq_printf(m, "\n %-35s = %s", "BT Info A2DP rate", (bt_info_ext&BIT0) ? "Basic rate" : "EDR rate"); for (i = 0; i < BT_INFO_SRC_8821A_2ANT_MAX; i++) { if (coex_sta->bt_info_c2h_cnt[i]) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %7ph(%d)", - glbt_info_src_8821a_2ant[i], - coex_sta->bt_info_c2h[i], - coex_sta->bt_info_c2h_cnt[i]); + seq_printf(m, "\n %-35s = %7ph(%d)", + glbt_info_src_8821a_2ant[i], + coex_sta->bt_info_c2h[i], + coex_sta->bt_info_c2h_cnt[i]); } } - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/%s", - "PS state, IPS/LPS", - ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")), - ((coex_sta->under_lps ? "LPS ON" : "LPS OFF"))); - btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD); + seq_printf(m, "\n %-35s = %s/%s", + "PS state, IPS/LPS", + ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")), + ((coex_sta->under_lps ? "LPS ON" : "LPS OFF"))); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD, m); /* Sw mechanism*/ - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", - "============[Sw mechanism]============"); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d/ %d/ %d(0x%x) ", - "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", - coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off, - coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl); + seq_printf(m, "\n %-35s", + "============[Sw mechanism]============"); + seq_printf(m, "\n %-35s = %d/ %d/ %d(0x%x) ", + "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", + coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off, + coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl); /* Fw mechanism*/ - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", - "============[Fw mechanism]============"); + seq_printf(m, "\n %-35s", + "============[Fw mechanism]============"); if (!btcoexist->manual_control) { ps_tdma_case = coex_dm->cur_ps_tdma; - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %5ph case-%d", - "PS TDMA", - coex_dm->ps_tdma_para, ps_tdma_case); - - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct", - coex_dm->cur_dec_bt_pwr_lvl, - coex_dm->cur_ignore_wlan_act); + seq_printf(m, "\n %-35s = %5ph case-%d", + "PS TDMA", + coex_dm->ps_tdma_para, ps_tdma_case); + + seq_printf(m, "\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct", + coex_dm->cur_dec_bt_pwr_lvl, + coex_dm->cur_ignore_wlan_act); } /* Hw setting*/ - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s", "============[Hw setting]============"); + seq_printf(m, "\n %-35s", "============[Hw setting]============"); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "\r\n %-35s = 0x%x", "RF-A, 0x1e initVal", - coex_dm->bt_rf0x1e_backup); + seq_printf(m, "\n %-35s = 0x%x", "RF-A, 0x1e initVal", + coex_dm->bt_rf0x1e_backup); u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778); u1tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x6cc); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x ", - "0x778 (W_Act)/ 0x6cc (CoTab Sel)", - u1tmp[0], u1tmp[1]); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x ", + "0x778 (W_Act)/ 0x6cc (CoTab Sel)", + u1tmp[0], u1tmp[1]); u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x8db); u1tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xc5b); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", - "0x8db(ADC)/0xc5b[29:25](DAC)", - ((u1tmp[0] & 0x60) >> 5), ((u1tmp[1] & 0x3e) >> 1)); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x", + "0x8db(ADC)/0xc5b[29:25](DAC)", + ((u1tmp[0] & 0x60) >> 5), ((u1tmp[1] & 0x3e) >> 1)); u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xcb4); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", - "0xcb4[7:0](ctrl)/ 0xcb4[29:28](val)", - u4tmp[0] & 0xff, ((u4tmp[0] & 0x30000000) >> 28)); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x", + "0xcb4[7:0](ctrl)/ 0xcb4[29:28](val)", + u4tmp[0] & 0xff, ((u4tmp[0] & 0x30000000) >> 28)); u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40); u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c); u4tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x974); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", - "0x40/ 0x4c[24:23]/ 0x974", - u1tmp[0], ((u4tmp[0] & 0x01800000) >> 23), u4tmp[1]); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0x40/ 0x4c[24:23]/ 0x974", + u1tmp[0], ((u4tmp[0] & 0x01800000) >> 23), u4tmp[1]); u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550); u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", - "0x550(bcn ctrl)/0x522", - u4tmp[0], u1tmp[0]); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x", + "0x550(bcn ctrl)/0x522", + u4tmp[0], u1tmp[0]); u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50); u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa0a); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", - "0xc50(DIG)/0xa0a(CCK-TH)", - u4tmp[0], u1tmp[0]); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x", + "0xc50(DIG)/0xa0a(CCK-TH)", + u4tmp[0], u1tmp[0]); u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xf48); u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5b); u1tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", - "OFDM-FA/ CCK-FA", - u4tmp[0], (u1tmp[0] << 8) + u1tmp[1]); + seq_printf(m, "\n %-35s = 0x%x/ 0x%x", + "OFDM-FA/ CCK-FA", + u4tmp[0], (u1tmp[0] << 8) + u1tmp[1]); u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0); u4tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4); u4tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", - "0x6c0/0x6c4/0x6c8", - u4tmp[0], u4tmp[1], u4tmp[2]); - - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", - "0x770 (hi-pri Rx/Tx)", - coex_sta->high_priority_rx, coex_sta->high_priority_tx); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", + seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0x6c0/0x6c4/0x6c8", + u4tmp[0], u4tmp[1], u4tmp[2]); + + seq_printf(m, "\n %-35s = %d/ %d", + "0x770 (hi-pri Rx/Tx)", + coex_sta->high_priority_rx, coex_sta->high_priority_tx); + seq_printf(m, "\n %-35s = %d/ %d", "0x774(low-pri Rx/Tx)", coex_sta->low_priority_rx, coex_sta->low_priority_tx); /* Tx mgnt queue hang or not, 0x41b should = 0xf, ex: 0xd ==>hang*/ u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x41b); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x", - "0x41b (mgntQ hang chk == 0xf)", - u1tmp[0]); + seq_printf(m, "\n %-35s = 0x%x", + "0x41b (mgntQ hang chk == 0xf)", + u1tmp[0]); - btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS, m); } void ex_btc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h index a839d5574422..ce3e58c4e660 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h @@ -226,7 +226,8 @@ ex_btc8821a2ant_periodical( ); void ex_btc8821a2ant_display_coex_info( - struct btc_coexist *btcoexist + struct btc_coexist *btcoexist, + struct seq_file *m ); void ex_btc8821a2ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state); void ex_btc8821a2ant_pre_load_firmware(struct btc_coexist *btcoexist); diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c index b5e9877d935c..1404729441a2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c @@ -25,19 +25,11 @@ #include "halbt_precomp.h" -/*********************************************** - * Global variables - ***********************************************/ - -struct btc_coexist gl_bt_coexist; - -u32 btc_dbg_type[BTC_MSG_MAX]; - /*************************************************** * Debug related function ***************************************************/ -const char *const gl_btc_wifi_bw_string[] = { +static const char *const gl_btc_wifi_bw_string[] = { "11bg", "HT20", "HT40", @@ -45,7 +37,7 @@ const char *const gl_btc_wifi_bw_string[] = { "HT160" }; -const char *const gl_btc_wifi_freq_string[] = { +static const char *const gl_btc_wifi_freq_string[] = { "2.4G", "5G" }; @@ -103,21 +95,6 @@ static bool is_any_client_connect_to_ap(struct btc_coexist *btcoexist) return false; } -static bool halbtc_is_bt40(struct rtl_priv *adapter) -{ - struct rtl_priv *rtlpriv = adapter; - struct rtl_phy *rtlphy = &(rtlpriv->phy); - bool is_ht40 = true; - enum ht_channel_width bw = rtlphy->current_chan_bw; - - if (bw == HT_CHANNEL_WIDTH_20) - is_ht40 = false; - else if (bw == HT_CHANNEL_WIDTH_20_40) - is_ht40 = true; - - return is_ht40; -} - static bool halbtc_legacy(struct rtl_priv *adapter) { struct rtl_priv *rtlpriv = adapter; @@ -143,18 +120,26 @@ bool halbtc_is_wifi_uplink(struct rtl_priv *adapter) static u32 halbtc_get_wifi_bw(struct btc_coexist *btcoexist) { - struct rtl_priv *rtlpriv = - (struct rtl_priv *)btcoexist->adapter; + struct rtl_priv *rtlpriv = btcoexist->adapter; + struct rtl_phy *rtlphy = &rtlpriv->phy; u32 wifi_bw = BTC_WIFI_BW_HT20; - if (halbtc_is_bt40(rtlpriv)) { - wifi_bw = BTC_WIFI_BW_HT40; + if (halbtc_legacy(rtlpriv)) { + wifi_bw = BTC_WIFI_BW_LEGACY; } else { - if (halbtc_legacy(rtlpriv)) - wifi_bw = BTC_WIFI_BW_LEGACY; - else + switch (rtlphy->current_chan_bw) { + case HT_CHANNEL_WIDTH_20: wifi_bw = BTC_WIFI_BW_HT20; + break; + case HT_CHANNEL_WIDTH_20_40: + wifi_bw = BTC_WIFI_BW_HT40; + break; + case HT_CHANNEL_WIDTH_80: + wifi_bw = BTC_WIFI_BW_HT80; + break; + } } + return wifi_bw; } @@ -171,7 +156,7 @@ static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist) return chnl; } -u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv) +static u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv) { struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params; @@ -186,12 +171,12 @@ u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv) return rtlpriv->btcoexist.btc_info.single_ant_path; } -u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv) +static u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv) { return rtlpriv->btcoexist.btc_info.bt_type; } -u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv) +static u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv) { struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params; u8 num; @@ -208,13 +193,117 @@ u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv) return num; } -u8 rtl_get_hwpg_package_type(struct rtl_priv *rtlpriv) +static u8 rtl_get_hwpg_package_type(struct rtl_priv *rtlpriv) { struct rtl_hal *rtlhal = rtl_hal(rtlpriv); return rtlhal->package_type; } +static +u8 rtl_get_hwpg_rfe_type(struct rtl_priv *rtlpriv) +{ + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + + return rtlhal->rfe_type; +} + +static +bool halbtc_is_hw_mailbox_exist(struct btc_coexist *btcoexist) +{ + if (IS_HARDWARE_TYPE_8812(btcoexist->adapter)) + return false; + else + return true; +} + +static +bool halbtc_send_bt_mp_operation(struct btc_coexist *btcoexist, u8 op_code, + u8 *cmd, u32 len, unsigned long wait_ms) +{ + struct rtl_priv *rtlpriv; + const u8 oper_ver = 0; + u8 req_num; + + if (!halbtc_is_hw_mailbox_exist(btcoexist)) + return false; + + if (wait_ms) /* before h2c to avoid race condition */ + reinit_completion(&btcoexist->bt_mp_comp); + + rtlpriv = btcoexist->adapter; + + /* fill req_num by op_code, and rtl_btc_btmpinfo_notify() use it + * to know message type + */ + switch (op_code) { + case BT_OP_GET_BT_VERSION: + req_num = BT_SEQ_GET_BT_VERSION; + break; + case BT_OP_GET_AFH_MAP_L: + req_num = BT_SEQ_GET_AFH_MAP_L; + break; + case BT_OP_GET_AFH_MAP_M: + req_num = BT_SEQ_GET_AFH_MAP_M; + break; + case BT_OP_GET_AFH_MAP_H: + req_num = BT_SEQ_GET_AFH_MAP_H; + break; + case BT_OP_GET_BT_COEX_SUPPORTED_FEATURE: + req_num = BT_SEQ_GET_BT_COEX_SUPPORTED_FEATURE; + break; + case BT_OP_GET_BT_COEX_SUPPORTED_VERSION: + req_num = BT_SEQ_GET_BT_COEX_SUPPORTED_VERSION; + break; + case BT_OP_GET_BT_ANT_DET_VAL: + req_num = BT_SEQ_GET_BT_ANT_DET_VAL; + break; + case BT_OP_GET_BT_BLE_SCAN_PARA: + req_num = BT_SEQ_GET_BT_BLE_SCAN_PARA; + break; + case BT_OP_GET_BT_BLE_SCAN_TYPE: + req_num = BT_SEQ_GET_BT_BLE_SCAN_TYPE; + break; + case BT_OP_GET_BT_DEVICE_INFO: + req_num = BT_SEQ_GET_BT_DEVICE_INFO; + break; + case BT_OP_GET_BT_FORBIDDEN_SLOT_VAL: + req_num = BT_SEQ_GET_BT_FORB_SLOT_VAL; + break; + case BT_OP_WRITE_REG_ADDR: + case BT_OP_WRITE_REG_VALUE: + case BT_OP_READ_REG: + default: + req_num = BT_SEQ_DONT_CARE; + break; + } + + cmd[0] |= (oper_ver & 0x0f); /* Set OperVer */ + cmd[0] |= ((req_num << 4) & 0xf0); /* Set ReqNum */ + cmd[1] = op_code; + rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, 0x67, len, cmd); + + /* wait? */ + if (!wait_ms) + return true; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "btmpinfo wait req_num=%d wait=%ld\n", req_num, wait_ms); + + if (in_interrupt()) + return false; + + if (wait_for_completion_timeout(&btcoexist->bt_mp_comp, + msecs_to_jiffies(wait_ms)) == 0) { + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, + "btmpinfo wait (req_num=%d) timeout\n", req_num); + + return false; /* timeout */ + } + + return true; +} + static void halbtc_leave_lps(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv; @@ -342,25 +431,80 @@ static void halbtc_aggregation_check(struct btc_coexist *btcoexist) static u32 halbtc_get_bt_patch_version(struct btc_coexist *btcoexist) { - struct rtl_priv *rtlpriv = btcoexist->adapter; u8 cmd_buffer[4] = {0}; - u8 oper_ver = 0; - u8 req_num = 0x0E; if (btcoexist->bt_info.bt_real_fw_ver) goto label_done; - cmd_buffer[0] |= (oper_ver & 0x0f); /* Set OperVer */ - cmd_buffer[0] |= ((req_num << 4) & 0xf0); /* Set ReqNum */ - cmd_buffer[1] = 0; /* BT_OP_GET_BT_VERSION = 0 */ - rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, 0x67, 4, - &cmd_buffer[0]); + /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */ + halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_BT_VERSION, + cmd_buffer, 4, 200); label_done: return btcoexist->bt_info.bt_real_fw_ver; } -u32 halbtc_get_wifi_link_status(struct btc_coexist *btcoexist) +static u32 halbtc_get_bt_coex_supported_feature(void *btc_context) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context; + u8 cmd_buffer[4] = {0}; + + if (btcoexist->bt_info.bt_supported_feature) + goto label_done; + + /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */ + halbtc_send_bt_mp_operation(btcoexist, + BT_OP_GET_BT_COEX_SUPPORTED_FEATURE, + cmd_buffer, 4, 200); + +label_done: + return btcoexist->bt_info.bt_supported_feature; +} + +static u32 halbtc_get_bt_coex_supported_version(void *btc_context) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context; + u8 cmd_buffer[4] = {0}; + + if (btcoexist->bt_info.bt_supported_version) + goto label_done; + + /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */ + halbtc_send_bt_mp_operation(btcoexist, + BT_OP_GET_BT_COEX_SUPPORTED_VERSION, + cmd_buffer, 4, 200); + +label_done: + return btcoexist->bt_info.bt_supported_version; +} + +static u32 halbtc_get_bt_device_info(void *btc_context) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context; + u8 cmd_buffer[4] = {0}; + + /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */ + halbtc_send_bt_mp_operation(btcoexist, + BT_OP_GET_BT_DEVICE_INFO, + cmd_buffer, 4, 200); + + return btcoexist->bt_info.bt_device_info; +} + +static u32 halbtc_get_bt_forbidden_slot_val(void *btc_context) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context; + u8 cmd_buffer[4] = {0}; + + /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */ + halbtc_send_bt_mp_operation(btcoexist, + BT_OP_GET_BT_FORBIDDEN_SLOT_VAL, + cmd_buffer, 4, 200); + + return btcoexist->bt_info.bt_forb_slot_val; +} + +static u32 halbtc_get_wifi_link_status(struct btc_coexist *btcoexist) { /* return value: * [31:16] => connected port number @@ -521,6 +665,18 @@ static bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf) case BTC_GET_U4_VENDOR: *u32_tmp = BTC_VENDOR_OTHER; break; + case BTC_GET_U4_SUPPORTED_VERSION: + *u32_tmp = halbtc_get_bt_coex_supported_version(btcoexist); + break; + case BTC_GET_U4_SUPPORTED_FEATURE: + *u32_tmp = halbtc_get_bt_coex_supported_feature(btcoexist); + break; + case BTC_GET_U4_BT_DEVICE_INFO: + *u32_tmp = halbtc_get_bt_device_info(btcoexist); + break; + case BTC_GET_U4_BT_FORBIDDEN_SLOT_VAL: + *u32_tmp = halbtc_get_bt_forbidden_slot_val(btcoexist); + break; case BTC_GET_U1_WIFI_DOT11_CHNL: *u8_tmp = rtlphy->current_channel; break; @@ -653,6 +809,104 @@ static bool halbtc_set(void *void_btcoexist, u8 set_type, void *in_buf) return ret; } +static void halbtc_display_coex_statistics(struct btc_coexist *btcoexist, + struct seq_file *m) +{ +} + +static void halbtc_display_bt_link_info(struct btc_coexist *btcoexist, + struct seq_file *m) +{ +} + +static void halbtc_display_wifi_status(struct btc_coexist *btcoexist, + struct seq_file *m) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + s32 wifi_rssi = 0, bt_hs_rssi = 0; + bool scan = false, link = false, roam = false, wifi_busy = false; + bool wifi_under_b_mode = false; + bool wifi_under_5g = false; + u32 wifi_bw = BTC_WIFI_BW_HT20; + u32 wifi_traffic_dir = BTC_WIFI_TRAFFIC_TX; + u32 wifi_freq = BTC_FREQ_2_4G; + u32 wifi_link_status = 0x0; + bool bt_hs_on = false, under_ips = false, under_lps = false; + bool low_power = false, dc_mode = false; + u8 wifi_chnl = 0, wifi_hs_chnl = 0; + u8 ap_num = 0; + + wifi_link_status = halbtc_get_wifi_link_status(btcoexist); + seq_printf(m, "\n %-35s = %d/ %d/ %d/ %d/ %d", + "STA/vWifi/HS/p2pGo/p2pGc", + ((wifi_link_status & WIFI_STA_CONNECTED) ? 1 : 0), + ((wifi_link_status & WIFI_AP_CONNECTED) ? 1 : 0), + ((wifi_link_status & WIFI_HS_CONNECTED) ? 1 : 0), + ((wifi_link_status & WIFI_P2P_GO_CONNECTED) ? 1 : 0), + ((wifi_link_status & WIFI_P2P_GC_CONNECTED) ? 1 : 0)); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifi_chnl); + btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl); + seq_printf(m, "\n %-35s = %d / %d(%d)", + "Dot11 channel / HsChnl(High Speed)", + wifi_chnl, wifi_hs_chnl, bt_hs_on); + + btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi); + btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi); + seq_printf(m, "\n %-35s = %d/ %d", + "Wifi rssi/ HS rssi", + wifi_rssi - 100, bt_hs_rssi - 100); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); + seq_printf(m, "\n %-35s = %d/ %d/ %d ", + "Wifi link/ roam/ scan", + link, roam, scan); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, + &wifi_traffic_dir); + btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num); + wifi_freq = (wifi_under_5g ? BTC_FREQ_5G : BTC_FREQ_2_4G); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_B_MODE, + &wifi_under_b_mode); + + seq_printf(m, "\n %-35s = %s / %s/ %s/ AP=%d ", + "Wifi freq/ bw/ traffic", + gl_btc_wifi_freq_string[wifi_freq], + ((wifi_under_b_mode) ? "11b" : + gl_btc_wifi_bw_string[wifi_bw]), + ((!wifi_busy) ? "idle" : ((BTC_WIFI_TRAFFIC_TX == + wifi_traffic_dir) ? "uplink" : + "downlink")), + ap_num); + + /* power status */ + dc_mode = true; /*TODO*/ + under_ips = rtlpriv->psc.inactive_pwrstate == ERFOFF ? 1 : 0; + under_lps = rtlpriv->psc.dot11_psmode == EACTIVE ? 0 : 1; + low_power = 0; /*TODO*/ + seq_printf(m, "\n %-35s = %s%s%s%s", + "Power Status", + (dc_mode ? "DC mode" : "AC mode"), + (under_ips ? ", IPS ON" : ""), + (under_lps ? ", LPS ON" : ""), + (low_power ? ", 32k" : "")); + + seq_printf(m, + "\n %-35s = %02x %02x %02x %02x %02x %02x (0x%x/0x%x)", + "Power mode cmd(lps/rpwm)", + btcoexist->pwr_mode_val[0], btcoexist->pwr_mode_val[1], + btcoexist->pwr_mode_val[2], btcoexist->pwr_mode_val[3], + btcoexist->pwr_mode_val[4], btcoexist->pwr_mode_val[5], + btcoexist->bt_info.lps_val, + btcoexist->bt_info.rpwm_val); +} + /************************************************************ * IO related function ************************************************************/ @@ -726,7 +980,8 @@ static void halbtc_write_4byte(void *bt_context, u32 reg_addr, u32 data) rtl_write_dword(rtlpriv, reg_addr, data); } -void halbtc_write_local_reg_1byte(void *btc_context, u32 reg_addr, u8 data) +static void halbtc_write_local_reg_1byte(void *btc_context, u32 reg_addr, + u8 data) { struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context; struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -739,22 +994,6 @@ void halbtc_write_local_reg_1byte(void *btc_context, u32 reg_addr, u8 data) rtl_write_byte(rtlpriv, reg_addr, data); } -void halbtc_set_macreg(void *btc_context, u32 reg_addr, u32 bit_mask, u32 data) -{ - struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context; - struct rtl_priv *rtlpriv = btcoexist->adapter; - - rtl_set_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask, data); -} - -u32 halbtc_get_macreg(void *btc_context, u32 reg_addr, u32 bit_mask) -{ - struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context; - struct rtl_priv *rtlpriv = btcoexist->adapter; - - return rtl_get_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask); -} - static void halbtc_set_bbreg(void *bt_context, u32 reg_addr, u32 bit_mask, u32 data) { @@ -800,38 +1039,47 @@ static void halbtc_fill_h2c_cmd(void *bt_context, u8 element_id, cmd_len, cmd_buf); } +static void halbtc_set_bt_reg(void *btc_context, u8 reg_type, u32 offset, u32 set_val) { struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context; - struct rtl_priv *rtlpriv = btcoexist->adapter; u8 cmd_buffer1[4] = {0}; u8 cmd_buffer2[4] = {0}; - u8 *addr_to_set = (u8 *)&offset; - u8 *value_to_set = (u8 *)&set_val; - u8 oper_ver = 0; - u8 req_num = 0; - if (IS_HARDWARE_TYPE_8723B(btcoexist->adapter)) { - cmd_buffer1[0] |= (oper_ver & 0x0f); /* Set OperVer */ - cmd_buffer1[0] |= ((req_num << 4) & 0xf0); /* Set ReqNum */ - cmd_buffer1[1] = 0x0d; /* OpCode: BT_LO_OP_WRITE_REG_VALUE */ - cmd_buffer1[2] = value_to_set[0]; /* Set WriteRegValue */ - rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, 0x67, 4, - &cmd_buffer1[0]); - - msleep(200); - req_num++; - - cmd_buffer2[0] |= (oper_ver & 0x0f); /* Set OperVer */ - cmd_buffer2[0] |= ((req_num << 4) & 0xf0); /* Set ReqNum */ - cmd_buffer2[1] = 0x0c; /* OpCode: BT_LO_OP_WRITE_REG_ADDR */ - cmd_buffer2[3] = addr_to_set[0]; /* Set WriteRegAddr */ - rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, 0x67, 4, - &cmd_buffer2[0]); + /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */ + *((__le16 *)&cmd_buffer1[2]) = cpu_to_le16((u16)set_val); + if (!halbtc_send_bt_mp_operation(btcoexist, BT_OP_WRITE_REG_VALUE, + cmd_buffer1, 4, 200)) + return; + + /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */ + cmd_buffer2[2] = reg_type; + *((u8 *)&cmd_buffer2[3]) = (u8)offset; + halbtc_send_bt_mp_operation(btcoexist, BT_OP_WRITE_REG_ADDR, + cmd_buffer2, 4, 200); +} + +static void halbtc_display_dbg_msg(void *bt_context, u8 disp_type, + struct seq_file *m) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context; + + switch (disp_type) { + case BTC_DBG_DISP_COEX_STATISTICS: + halbtc_display_coex_statistics(btcoexist, m); + break; + case BTC_DBG_DISP_BT_LINK_INFO: + halbtc_display_bt_link_info(btcoexist, m); + break; + case BTC_DBG_DISP_WIFI_STATUS: + halbtc_display_wifi_status(btcoexist, m); + break; + default: + break; } } -bool halbtc_under_ips(struct btc_coexist *btcoexist) +static bool halbtc_under_ips(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv); @@ -849,12 +1097,95 @@ bool halbtc_under_ips(struct btc_coexist *btcoexist) return false; } +static u8 halbtc_get_ant_det_val_from_bt(void *btc_context) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context; + u8 cmd_buffer[4] = {0}; + + /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */ + halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_BT_ANT_DET_VAL, + cmd_buffer, 4, 200); + + /* need wait completion to return correct value */ + + return btcoexist->bt_info.bt_ant_det_val; +} + +static u8 halbtc_get_ble_scan_type_from_bt(void *btc_context) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context; + u8 cmd_buffer[4] = {0}; + + /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */ + halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_BT_BLE_SCAN_TYPE, + cmd_buffer, 4, 200); + + /* need wait completion to return correct value */ + + return btcoexist->bt_info.bt_ble_scan_type; +} + +static u32 halbtc_get_ble_scan_para_from_bt(void *btc_context, u8 scan_type) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context; + u8 cmd_buffer[4] = {0}; + + /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */ + halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_BT_BLE_SCAN_PARA, + cmd_buffer, 4, 200); + + /* need wait completion to return correct value */ + + return btcoexist->bt_info.bt_ble_scan_para; +} + +static bool halbtc_get_bt_afh_map_from_bt(void *btc_context, u8 map_type, + u8 *afh_map) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context; + u8 cmd_buffer[2] = {0}; + bool ret; + u32 *afh_map_l = (u32 *)afh_map; + u32 *afh_map_m = (u32 *)(afh_map + 4); + u16 *afh_map_h = (u16 *)(afh_map + 8); + + /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */ + ret = halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_AFH_MAP_L, + cmd_buffer, 2, 200); + if (!ret) + goto exit; + + *afh_map_l = btcoexist->bt_info.afh_map_l; + + /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */ + ret = halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_AFH_MAP_M, + cmd_buffer, 2, 200); + if (!ret) + goto exit; + + *afh_map_m = btcoexist->bt_info.afh_map_m; + + /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */ + ret = halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_AFH_MAP_H, + cmd_buffer, 2, 200); + if (!ret) + goto exit; + + *afh_map_h = btcoexist->bt_info.afh_map_h; + +exit: + return ret; +} + /***************************************************************** * Extern functions called by other module *****************************************************************/ -bool exhalbtc_initlize_variables(void) +bool exhalbtc_initlize_variables(struct rtl_priv *rtlpriv) { - struct btc_coexist *btcoexist = &gl_bt_coexist; + struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv); + + if (!btcoexist) + return false; halbtc_dbg_init(); @@ -874,24 +1205,76 @@ bool exhalbtc_initlize_variables(void) btcoexist->btc_get_rf_reg = halbtc_get_rfreg; btcoexist->btc_fill_h2c = halbtc_fill_h2c_cmd; + btcoexist->btc_disp_dbg_msg = halbtc_display_dbg_msg; btcoexist->btc_get = halbtc_get; btcoexist->btc_set = halbtc_set; btcoexist->btc_set_bt_reg = halbtc_set_bt_reg; - btcoexist->bt_info.bt_ctrl_buf_size = false; btcoexist->bt_info.agg_buf_size = 5; btcoexist->bt_info.increase_scan_dev_num = false; + + btcoexist->btc_get_bt_coex_supported_feature = + halbtc_get_bt_coex_supported_feature; + btcoexist->btc_get_bt_coex_supported_version = + halbtc_get_bt_coex_supported_version; + btcoexist->btc_get_ant_det_val_from_bt = halbtc_get_ant_det_val_from_bt; + btcoexist->btc_get_ble_scan_type_from_bt = + halbtc_get_ble_scan_type_from_bt; + btcoexist->btc_get_ble_scan_para_from_bt = + halbtc_get_ble_scan_para_from_bt; + btcoexist->btc_get_bt_afh_map_from_bt = + halbtc_get_bt_afh_map_from_bt; + + init_completion(&btcoexist->bt_mp_comp); + + return true; +} + +bool exhalbtc_initlize_variables_wifi_only(struct rtl_priv *rtlpriv) +{ + struct wifi_only_cfg *wifionly_cfg = rtl_btc_wifi_only(rtlpriv); + struct wifi_only_haldata *wifionly_haldata; + + if (!wifionly_cfg) + return false; + + wifionly_cfg->adapter = rtlpriv; + + switch (rtlpriv->rtlhal.interface) { + case INTF_PCI: + wifionly_cfg->chip_interface = BTC_INTF_PCI; + break; + case INTF_USB: + wifionly_cfg->chip_interface = BTC_INTF_USB; + break; + default: + wifionly_cfg->chip_interface = BTC_INTF_UNKNOWN; + break; + } + + wifionly_haldata = &wifionly_cfg->haldata_info; + + wifionly_haldata->customer_id = CUSTOMER_NORMAL; + wifionly_haldata->efuse_pg_antnum = rtl_get_hwpg_ant_num(rtlpriv); + wifionly_haldata->efuse_pg_antpath = + rtl_get_hwpg_single_ant_path(rtlpriv); + wifionly_haldata->rfe_type = rtl_get_hwpg_rfe_type(rtlpriv); + wifionly_haldata->ant_div_cfg = 0; + return true; } bool exhalbtc_bind_bt_coex_withadapter(void *adapter) { - struct btc_coexist *btcoexist = &gl_bt_coexist; struct rtl_priv *rtlpriv = adapter; - u8 ant_num = 2, chip_type; + struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv); + u8 ant_num = 2, chip_type, single_ant_path = 0; + + if (!btcoexist) + return false; if (btcoexist->binded) return false; @@ -922,10 +1305,16 @@ bool exhalbtc_bind_bt_coex_withadapter(void *adapter) btcoexist->bt_info.miracast_plus_bt = false; chip_type = rtl_get_hwpg_bt_type(rtlpriv); - exhalbtc_set_chip_type(chip_type); + exhalbtc_set_chip_type(btcoexist, chip_type); ant_num = rtl_get_hwpg_ant_num(rtlpriv); exhalbtc_set_ant_num(rtlpriv, BT_COEX_ANT_TYPE_PG, ant_num); + /* set default antenna position to main port */ + btcoexist->board_info.btdm_ant_pos = BTC_ANTENNA_AT_MAIN_PORT; + + single_ant_path = rtl_get_hwpg_single_ant_path(rtlpriv); + exhalbtc_set_single_ant_path(btcoexist, single_ant_path); + if (rtl_get_hwpg_package_type(rtlpriv) == 0) btcoexist->board_info.tfbga_package = false; else if (rtl_get_hwpg_package_type(rtlpriv) == 1) @@ -940,6 +1329,9 @@ bool exhalbtc_bind_bt_coex_withadapter(void *adapter) RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Package Type = Non-TFBGA\n"); + btcoexist->board_info.rfe_type = rtl_get_hwpg_rfe_type(rtlpriv); + btcoexist->board_info.ant_div_cfg = 0; + return true; } @@ -996,6 +1388,10 @@ void exhalbtc_init_hw_config(struct btc_coexist *btcoexist, bool wifi_only) } } +void exhalbtc_init_hw_config_wifi_only(struct wifi_only_cfg *wifionly_cfg) +{ +} + void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist) { if (!halbtc_is_bt_coexist_available(btcoexist)) @@ -1122,6 +1518,11 @@ void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type) halbtc_normal_low_power(btcoexist); } +void exhalbtc_scan_notify_wifi_only(struct wifi_only_cfg *wifionly_cfg, + u8 is_5g) +{ +} + void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action) { u8 asso_type; @@ -1430,30 +1831,25 @@ void exhalbtc_stack_update_profile_info(void) { } -void exhalbtc_update_min_bt_rssi(s8 bt_rssi) +void exhalbtc_update_min_bt_rssi(struct btc_coexist *btcoexist, s8 bt_rssi) { - struct btc_coexist *btcoexist = &gl_bt_coexist; - if (!halbtc_is_bt_coexist_available(btcoexist)) return; btcoexist->stack_info.min_bt_rssi = bt_rssi; } -void exhalbtc_set_hci_version(u16 hci_version) +void exhalbtc_set_hci_version(struct btc_coexist *btcoexist, u16 hci_version) { - struct btc_coexist *btcoexist = &gl_bt_coexist; - if (!halbtc_is_bt_coexist_available(btcoexist)) return; btcoexist->stack_info.hci_version = hci_version; } -void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version) +void exhalbtc_set_bt_patch_version(struct btc_coexist *btcoexist, + u16 bt_hci_version, u16 bt_patch_version) { - struct btc_coexist *btcoexist = &gl_bt_coexist; - if (!halbtc_is_bt_coexist_available(btcoexist)) return; @@ -1461,7 +1857,7 @@ void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version) btcoexist->bt_info.bt_hci_ver = bt_hci_version; } -void exhalbtc_set_chip_type(u8 chip_type) +void exhalbtc_set_chip_type(struct btc_coexist *btcoexist, u8 chip_type) { switch (chip_type) { default: @@ -1469,51 +1865,58 @@ void exhalbtc_set_chip_type(u8 chip_type) case BT_ISSC_3WIRE: case BT_ACCEL: case BT_RTL8756: - gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_UNDEF; + btcoexist->board_info.bt_chip_type = BTC_CHIP_UNDEF; break; case BT_CSR_BC4: - gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_CSR_BC4; + btcoexist->board_info.bt_chip_type = BTC_CHIP_CSR_BC4; break; case BT_CSR_BC8: - gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_CSR_BC8; + btcoexist->board_info.bt_chip_type = BTC_CHIP_CSR_BC8; break; case BT_RTL8723A: - gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8723A; + btcoexist->board_info.bt_chip_type = BTC_CHIP_RTL8723A; break; case BT_RTL8821A: - gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8821; + btcoexist->board_info.bt_chip_type = BTC_CHIP_RTL8821; break; case BT_RTL8723B: - gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8723B; + btcoexist->board_info.bt_chip_type = BTC_CHIP_RTL8723B; break; } } void exhalbtc_set_ant_num(struct rtl_priv *rtlpriv, u8 type, u8 ant_num) { + struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv); + + if (!btcoexist) + return; + if (BT_COEX_ANT_TYPE_PG == type) { - gl_bt_coexist.board_info.pg_ant_num = ant_num; - gl_bt_coexist.board_info.btdm_ant_num = ant_num; + btcoexist->board_info.pg_ant_num = ant_num; + btcoexist->board_info.btdm_ant_num = ant_num; } else if (BT_COEX_ANT_TYPE_ANTDIV == type) { - gl_bt_coexist.board_info.btdm_ant_num = ant_num; + btcoexist->board_info.btdm_ant_num = ant_num; } else if (type == BT_COEX_ANT_TYPE_DETECTED) { - gl_bt_coexist.board_info.btdm_ant_num = ant_num; + btcoexist->board_info.btdm_ant_num = ant_num; if (rtlpriv->cfg->mod_params->ant_sel == 1) - gl_bt_coexist.board_info.btdm_ant_pos = + btcoexist->board_info.btdm_ant_pos = BTC_ANTENNA_AT_AUX_PORT; else - gl_bt_coexist.board_info.btdm_ant_pos = + btcoexist->board_info.btdm_ant_pos = BTC_ANTENNA_AT_MAIN_PORT; } } /* Currently used by 8723b only, S0 or S1 */ -void exhalbtc_set_single_ant_path(u8 single_ant_path) +void exhalbtc_set_single_ant_path(struct btc_coexist *btcoexist, + u8 single_ant_path) { - gl_bt_coexist.board_info.single_ant_path = single_ant_path; + btcoexist->board_info.single_ant_path = single_ant_path; } -void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist) +void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist, + struct seq_file *m) { if (!halbtc_is_bt_coexist_available(btcoexist)) return; @@ -1522,18 +1925,36 @@ void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist) if (IS_HARDWARE_TYPE_8821(btcoexist->adapter)) { if (btcoexist->board_info.btdm_ant_num == 2) - ex_btc8821a2ant_display_coex_info(btcoexist); + ex_btc8821a2ant_display_coex_info(btcoexist, m); else if (btcoexist->board_info.btdm_ant_num == 1) - ex_btc8821a1ant_display_coex_info(btcoexist); + ex_btc8821a1ant_display_coex_info(btcoexist, m); } else if (IS_HARDWARE_TYPE_8723B(btcoexist->adapter)) { if (btcoexist->board_info.btdm_ant_num == 2) - ex_btc8723b2ant_display_coex_info(btcoexist); + ex_btc8723b2ant_display_coex_info(btcoexist, m); else if (btcoexist->board_info.btdm_ant_num == 1) - ex_btc8723b1ant_display_coex_info(btcoexist); + ex_btc8723b1ant_display_coex_info(btcoexist, m); } else if (IS_HARDWARE_TYPE_8192E(btcoexist->adapter)) { if (btcoexist->board_info.btdm_ant_num == 2) - ex_btc8192e2ant_display_coex_info(btcoexist); + ex_btc8192e2ant_display_coex_info(btcoexist, m); } halbtc_normal_low_power(btcoexist); } + +void exhalbtc_switch_band_notify(struct btc_coexist *btcoexist, u8 type) +{ + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + + if (btcoexist->manual_control) + return; + + halbtc_leave_low_power(btcoexist); + + halbtc_normal_low_power(btcoexist); +} + +void exhalbtc_switch_band_notify_wifi_only(struct wifi_only_cfg *wifionly_cfg, + u8 is_5g) +{ +} diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h index f9b87c12db09..8ed217656539 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h @@ -103,8 +103,6 @@ enum btc_msg_type { BTC_MSG_MAX }; -extern u32 btc_dbg_type[]; - /* following is for BTC_MSG_INTERFACE */ #define INTF_INIT BIT0 #define INTF_NOTIFY BIT2 @@ -152,8 +150,10 @@ struct btc_board_info { u8 btdm_ant_num; /* ant number for btdm */ u8 btdm_ant_pos; u8 single_ant_path; /* current used for 8723b only, 1=>s0, 0=>s1 */ - bool bt_exist; bool tfbga_package; + + u8 rfe_type; + u8 ant_div_cfg; }; enum btc_dbg_opcode { @@ -181,10 +181,17 @@ enum btc_wifi_role { BTC_ROLE_MAX }; +enum btc_wireless_freq { + BTC_FREQ_2_4G = 0x0, + BTC_FREQ_5G = 0x1, + BTC_FREQ_MAX +}; + enum btc_wifi_bw_mode { BTC_WIFI_BW_LEGACY = 0x0, BTC_WIFI_BW_HT20 = 0x1, BTC_WIFI_BW_HT40 = 0x2, + BTC_WIFI_BW_HT80 = 0x3, BTC_WIFI_BW_MAX }; @@ -275,6 +282,8 @@ enum btc_get_type { BTC_GET_U4_VENDOR, BTC_GET_U4_SUPPORTED_VERSION, BTC_GET_U4_SUPPORTED_FEATURE, + BTC_GET_U4_BT_DEVICE_INFO, + BTC_GET_U4_BT_FORBIDDEN_SLOT_VAL, BTC_GET_U4_WIFI_IQK_TOTAL, BTC_GET_U4_WIFI_IQK_OK, BTC_GET_U4_WIFI_IQK_FAIL, @@ -355,6 +364,7 @@ enum btc_dbg_disp_type { BTC_DBG_DISP_BT_LINK_INFO = 0x1, BTC_DBG_DISP_BT_FW_VER = 0x2, BTC_DBG_DISP_FW_PWR_MODE_CMD = 0x3, + BTC_DBG_DISP_WIFI_STATUS = 0x04, BTC_DBG_DISP_MAX }; @@ -376,6 +386,14 @@ enum btc_notify_type_scan { BTC_SCAN_MAX }; +enum btc_notify_type_switchband { + BTC_NOT_SWITCH = 0x0, + BTC_SWITCH_TO_24G = 0x1, + BTC_SWITCH_TO_5G = 0x2, + BTC_SWITCH_TO_24G_NOFORSCAN = 0x3, + BTC_SWITCH_MAX +}; + enum btc_notify_type_associate { BTC_ASSOCIATE_FINISH = 0x0, BTC_ASSOCIATE_START = 0x1, @@ -417,49 +435,6 @@ enum btc_notify_type_stack_operation { BTC_STACK_OP_MAX }; -typedef u8 (*bfp_btc_r1)(void *btc_context, u32 reg_addr); - -typedef u16 (*bfp_btc_r2)(void *btc_context, u32 reg_addr); - -typedef u32 (*bfp_btc_r4)(void *btc_context, u32 reg_addr); - -typedef void (*bfp_btc_w1)(void *btc_context, u32 reg_addr, u32 data); - -typedef void (*bfp_btc_w1_bit_mak)(void *btc_context, u32 reg_addr, - u32 bit_mask, u8 data1b); - -typedef void (*bfp_btc_w2)(void *btc_context, u32 reg_addr, u16 data); - -typedef void (*bfp_btc_w4)(void *btc_context, u32 reg_addr, u32 data); - -typedef void (*bfp_btc_local_reg_w1)(void *btc_context, u32 reg_addr, u8 data); -typedef void (*bfp_btc_wr_1byte_bit_mask)(void *btc_context, u32 reg_addr, - u8 bit_mask, u8 data); - -typedef void (*bfp_btc_set_bb_reg)(void *btc_context, u32 reg_addr, - u32 bit_mask, u32 data); - -typedef u32 (*bfp_btc_get_bb_reg)(void *btc_context, u32 reg_addr, - u32 bit_mask); - -typedef void (*bfp_btc_set_rf_reg)(void *btc_context, u8 rf_path, u32 reg_addr, - u32 bit_mask, u32 data); - -typedef u32 (*bfp_btc_get_rf_reg)(void *btc_context, u8 rf_path, - u32 reg_addr, u32 bit_mask); - -typedef void (*bfp_btc_fill_h2c)(void *btc_context, u8 element_id, - u32 cmd_len, u8 *cmd_buffer); - -typedef bool (*bfp_btc_get)(void *btcoexist, u8 get_type, void *out_buf); - -typedef bool (*bfp_btc_set)(void *btcoexist, u8 set_type, void *in_buf); - -typedef void (*bfp_btc_set_bt_reg)(void *btc_context, u8 reg_type, u32 offset, - u32 value); - -typedef void (*bfp_btc_disp_dbg_msg)(void *btcoexist, u8 disp_type); - struct btc_bt_info { bool bt_disabled; u8 rssi_adjust_for_agc_table_on; @@ -491,6 +466,17 @@ struct btc_bt_info { u8 lps_val; u8 rpwm_val; u32 ra_mask; + + u32 afh_map_l; + u32 afh_map_m; + u16 afh_map_h; + u32 bt_supported_feature; + u32 bt_supported_version; + u32 bt_device_info; + u32 bt_forb_slot_val; + u8 bt_ant_det_val; + u8 bt_ble_scan_type; + u32 bt_ble_scan_para; }; struct btc_stack_info { @@ -546,6 +532,40 @@ enum btc_antenna_pos { BTC_ANTENNA_AT_AUX_PORT = 0x2, }; +enum btc_mp_h2c_op_code { + BT_OP_GET_BT_VERSION = 0, + BT_OP_WRITE_REG_ADDR = 12, + BT_OP_WRITE_REG_VALUE = 13, + BT_OP_READ_REG = 17, + BT_OP_GET_AFH_MAP_L = 30, + BT_OP_GET_AFH_MAP_M = 31, + BT_OP_GET_AFH_MAP_H = 32, + BT_OP_GET_BT_COEX_SUPPORTED_FEATURE = 42, + BT_OP_GET_BT_COEX_SUPPORTED_VERSION = 43, + BT_OP_GET_BT_ANT_DET_VAL = 44, + BT_OP_GET_BT_BLE_SCAN_PARA = 45, + BT_OP_GET_BT_BLE_SCAN_TYPE = 46, + BT_OP_GET_BT_DEVICE_INFO = 48, + BT_OP_GET_BT_FORBIDDEN_SLOT_VAL = 49, + BT_OP_MAX +}; + +enum btc_mp_h2c_req_num { + /* 4 bits only */ + BT_SEQ_DONT_CARE = 0, + BT_SEQ_GET_BT_VERSION = 0xE, + BT_SEQ_GET_AFH_MAP_L = 0x5, + BT_SEQ_GET_AFH_MAP_M = 0x6, + BT_SEQ_GET_AFH_MAP_H = 0x9, + BT_SEQ_GET_BT_COEX_SUPPORTED_FEATURE = 0x7, + BT_SEQ_GET_BT_COEX_SUPPORTED_VERSION = 0x8, + BT_SEQ_GET_BT_ANT_DET_VAL = 0x2, + BT_SEQ_GET_BT_BLE_SCAN_PARA = 0x3, + BT_SEQ_GET_BT_BLE_SCAN_TYPE = 0x4, + BT_SEQ_GET_BT_DEVICE_INFO = 0xA, + BT_SEQ_GET_BT_FORB_SLOT_VAL = 0xB, +}; + struct btc_coexist { /* make sure only one adapter can bind the data context */ bool binded; @@ -563,55 +583,86 @@ struct btc_coexist { */ bool auto_report_1ant; bool auto_report_2ant; + bool dbg_mode_1ant; + bool dbg_mode_2ant; bool initilized; bool stop_coex_dm; bool manual_control; struct btc_statistics statistics; u8 pwr_mode_val[10]; - /* function pointers - io related */ - bfp_btc_r1 btc_read_1byte; - bfp_btc_w1 btc_write_1byte; - bfp_btc_w1_bit_mak btc_write_1byte_bitmask; - bfp_btc_r2 btc_read_2byte; - bfp_btc_w2 btc_write_2byte; - bfp_btc_r4 btc_read_4byte; - bfp_btc_w4 btc_write_4byte; - bfp_btc_local_reg_w1 btc_write_local_reg_1byte; - - bfp_btc_set_bb_reg btc_set_bb_reg; - bfp_btc_get_bb_reg btc_get_bb_reg; + struct completion bt_mp_comp; - bfp_btc_set_rf_reg btc_set_rf_reg; - bfp_btc_get_rf_reg btc_get_rf_reg; - - bfp_btc_fill_h2c btc_fill_h2c; - - bfp_btc_disp_dbg_msg btc_disp_dbg_msg; - - bfp_btc_get btc_get; - bfp_btc_set btc_set; - - bfp_btc_set_bt_reg btc_set_bt_reg; + /* function pointers - io related */ + u8 (*btc_read_1byte)(void *btc_context, u32 reg_addr); + void (*btc_write_1byte)(void *btc_context, u32 reg_addr, u32 data); + void (*btc_write_1byte_bitmask)(void *btc_context, u32 reg_addr, + u32 bit_mask, u8 data1b); + u16 (*btc_read_2byte)(void *btc_context, u32 reg_addr); + void (*btc_write_2byte)(void *btc_context, u32 reg_addr, u16 data); + u32 (*btc_read_4byte)(void *btc_context, u32 reg_addr); + void (*btc_write_4byte)(void *btc_context, u32 reg_addr, u32 data); + + void (*btc_write_local_reg_1byte)(void *btc_context, u32 reg_addr, + u8 data); + void (*btc_set_bb_reg)(void *btc_context, u32 reg_addr, + u32 bit_mask, u32 data); + u32 (*btc_get_bb_reg)(void *btc_context, u32 reg_addr, + u32 bit_mask); + void (*btc_set_rf_reg)(void *btc_context, u8 rf_path, u32 reg_addr, + u32 bit_mask, u32 data); + u32 (*btc_get_rf_reg)(void *btc_context, u8 rf_path, + u32 reg_addr, u32 bit_mask); + + void (*btc_fill_h2c)(void *btc_context, u8 element_id, + u32 cmd_len, u8 *cmd_buffer); + + void (*btc_disp_dbg_msg)(void *btcoexist, u8 disp_type, + struct seq_file *m); + + bool (*btc_get)(void *btcoexist, u8 get_type, void *out_buf); + bool (*btc_set)(void *btcoexist, u8 set_type, void *in_buf); + + void (*btc_set_bt_reg)(void *btc_context, u8 reg_type, u32 offset, + u32 value); + u32 (*btc_get_bt_coex_supported_feature)(void *btcoexist); + u32 (*btc_get_bt_coex_supported_version)(void *btcoexist); + u8 (*btc_get_ant_det_val_from_bt)(void *btcoexist); + u8 (*btc_get_ble_scan_type_from_bt)(void *btcoexist); + u32 (*btc_get_ble_scan_para_from_bt)(void *btcoexist, u8 scan_type); + bool (*btc_get_bt_afh_map_from_bt)(void *btcoexist, u8 map_type, + u8 *afh_map); }; bool halbtc_is_wifi_uplink(struct rtl_priv *adapter); -extern struct btc_coexist gl_bt_coexist; +#define rtl_btc_coexist(rtlpriv) \ + ((struct btc_coexist *)((rtlpriv)->btcoexist.btc_context)) +#define rtl_btc_wifi_only(rtlpriv) \ + ((struct wifi_only_cfg *)((rtlpriv)->btcoexist.wifi_only_context)) -bool exhalbtc_initlize_variables(void); +struct wifi_only_cfg; + +bool exhalbtc_initlize_variables(struct rtl_priv *rtlpriv); +bool exhalbtc_initlize_variables_wifi_only(struct rtl_priv *rtlpriv); bool exhalbtc_bind_bt_coex_withadapter(void *adapter); +void exhalbtc_power_on_setting(struct btc_coexist *btcoexist); +void exhalbtc_pre_load_firmware(struct btc_coexist *btcoexist); void exhalbtc_init_hw_config(struct btc_coexist *btcoexist, bool wifi_only); +void exhalbtc_init_hw_config_wifi_only(struct wifi_only_cfg *wifionly_cfg); void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist); void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type); void exhalbtc_lps_notify(struct btc_coexist *btcoexist, u8 type); void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type); +void exhalbtc_scan_notify_wifi_only(struct wifi_only_cfg *wifionly_cfg, + u8 is_5g); void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action); void exhalbtc_mediastatus_notify(struct btc_coexist *btcoexist, enum rt_media_status media_status); void exhalbtc_special_packet_notify(struct btc_coexist *btcoexist, u8 pkt_type); void exhalbtc_bt_info_notify(struct btc_coexist *btcoexist, u8 *tmp_buf, u8 length); +void exhalbtc_rf_status_notify(struct btc_coexist *btcoexist, u8 type); void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type); void exhalbtc_halt_notify(struct btc_coexist *btcoexist); void exhalbtc_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state); @@ -619,18 +670,63 @@ void exhalbtc_coex_dm_switch(struct btc_coexist *btcoexist); void exhalbtc_periodical(struct btc_coexist *btcoexist); void exhalbtc_dbg_control(struct btc_coexist *btcoexist, u8 code, u8 len, u8 *data); +void exhalbtc_antenna_detection(struct btc_coexist *btcoexist, u32 cent_freq, + u32 offset, u32 span, u32 seconds); void exhalbtc_stack_update_profile_info(void); -void exhalbtc_set_hci_version(u16 hci_version); -void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version); -void exhalbtc_update_min_bt_rssi(s8 bt_rssi); -void exhalbtc_set_bt_exist(bool bt_exist); -void exhalbtc_set_chip_type(u8 chip_type); +void exhalbtc_set_hci_version(struct btc_coexist *btcoexist, u16 hci_version); +void exhalbtc_set_bt_patch_version(struct btc_coexist *btcoexist, + u16 bt_hci_version, u16 bt_patch_version); +void exhalbtc_update_min_bt_rssi(struct btc_coexist *btcoexist, s8 bt_rssi); +void exhalbtc_set_bt_exist(struct btc_coexist *btcoexist, bool bt_exist); +void exhalbtc_set_chip_type(struct btc_coexist *btcoexist, u8 chip_type); void exhalbtc_set_ant_num(struct rtl_priv *rtlpriv, u8 type, u8 ant_num); -void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist); +void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist, + struct seq_file *m); +void exhalbtc_switch_band_notify(struct btc_coexist *btcoexist, u8 type); +void exhalbtc_switch_band_notify_wifi_only(struct wifi_only_cfg *wifionly_cfg, + u8 is_5g); void exhalbtc_signal_compensation(struct btc_coexist *btcoexist, u8 *rssi_wifi, u8 *rssi_bt); void exhalbtc_lps_leave(struct btc_coexist *btcoexist); void exhalbtc_low_wifi_traffic_notify(struct btc_coexist *btcoexist); -void exhalbtc_set_single_ant_path(u8 single_ant_path); +void exhalbtc_set_single_ant_path(struct btc_coexist *btcoexist, + u8 single_ant_path); + +/* The following are used by wifi_only case */ +enum wifionly_chip_interface { + WIFIONLY_INTF_UNKNOWN = 0, + WIFIONLY_INTF_PCI = 1, + WIFIONLY_INTF_USB = 2, + WIFIONLY_INTF_SDIO = 3, + WIFIONLY_INTF_MAX +}; + +enum wifionly_customer_id { + CUSTOMER_NORMAL = 0, + CUSTOMER_HP_1 = 1, +}; + +struct wifi_only_haldata { + u16 customer_id; + u8 efuse_pg_antnum; + u8 efuse_pg_antpath; + u8 rfe_type; + u8 ant_div_cfg; +}; + +struct wifi_only_cfg { + void *adapter; + struct wifi_only_haldata haldata_info; + enum wifionly_chip_interface chip_interface; +}; + +static inline +void halwifionly_phy_set_bb_reg(struct wifi_only_cfg *wifi_conly_cfg, + u32 regaddr, u32 bitmask, u32 data) +{ + struct rtl_priv *rtlpriv = (struct rtl_priv *)wifi_conly_cfg->adapter; + + rtl_set_bbreg(rtlpriv->hw, regaddr, bitmask, data); +} #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c index 7d296a401b6f..cce4a37ea408 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c @@ -31,11 +31,16 @@ static struct rtl_btc_ops rtl_btc_operation = { .btc_init_variables = rtl_btc_init_variables, + .btc_init_variables_wifi_only = rtl_btc_init_variables_wifi_only, + .btc_deinit_variables = rtl_btc_deinit_variables, .btc_init_hal_vars = rtl_btc_init_hal_vars, + .btc_power_on_setting = rtl_btc_power_on_setting, .btc_init_hw_config = rtl_btc_init_hw_config, + .btc_init_hw_config_wifi_only = rtl_btc_init_hw_config_wifi_only, .btc_ips_notify = rtl_btc_ips_notify, .btc_lps_notify = rtl_btc_lps_notify, .btc_scan_notify = rtl_btc_scan_notify, + .btc_scan_notify_wifi_only = rtl_btc_scan_notify_wifi_only, .btc_connect_notify = rtl_btc_connect_notify, .btc_mediastatus_notify = rtl_btc_mediastatus_notify, .btc_periodical = rtl_btc_periodical, @@ -46,63 +51,143 @@ static struct rtl_btc_ops rtl_btc_operation = { .btc_is_disable_edca_turbo = rtl_btc_is_disable_edca_turbo, .btc_is_bt_disabled = rtl_btc_is_bt_disabled, .btc_special_packet_notify = rtl_btc_special_packet_notify, + .btc_switch_band_notify = rtl_btc_switch_band_notify, + .btc_switch_band_notify_wifi_only = rtl_btc_switch_band_notify_wifionly, .btc_record_pwr_mode = rtl_btc_record_pwr_mode, .btc_get_lps_val = rtl_btc_get_lps_val, .btc_get_rpwm_val = rtl_btc_get_rpwm_val, .btc_is_bt_ctrl_lps = rtl_btc_is_bt_ctrl_lps, .btc_is_bt_lps_on = rtl_btc_is_bt_lps_on, .btc_get_ampdu_cfg = rtl_btc_get_ampdu_cfg, + .btc_display_bt_coex_info = rtl_btc_display_bt_coex_info, }; +void rtl_btc_display_bt_coex_info(struct rtl_priv *rtlpriv, struct seq_file *m) +{ + struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv); + + if (!btcoexist) { + seq_puts(m, "btc_coexist context is NULL!\n"); + return; + } + + exhalbtc_display_bt_coex_info(btcoexist, m); +} + void rtl_btc_record_pwr_mode(struct rtl_priv *rtlpriv, u8 *buf, u8 len) { + struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv); u8 safe_len; - safe_len = sizeof(gl_bt_coexist.pwr_mode_val); + if (!btcoexist) + return; + + safe_len = sizeof(btcoexist->pwr_mode_val); if (safe_len > len) safe_len = len; - memcpy(gl_bt_coexist.pwr_mode_val, buf, safe_len); + memcpy(btcoexist->pwr_mode_val, buf, safe_len); } u8 rtl_btc_get_lps_val(struct rtl_priv *rtlpriv) { - return gl_bt_coexist.bt_info.lps_val; + struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv); + + if (!btcoexist) + return 0; + + return btcoexist->bt_info.lps_val; } u8 rtl_btc_get_rpwm_val(struct rtl_priv *rtlpriv) { - return gl_bt_coexist.bt_info.rpwm_val; + struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv); + + if (!btcoexist) + return 0; + + return btcoexist->bt_info.rpwm_val; } bool rtl_btc_is_bt_ctrl_lps(struct rtl_priv *rtlpriv) { - return gl_bt_coexist.bt_info.bt_ctrl_lps; + struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv); + + if (!btcoexist) + return false; + + return btcoexist->bt_info.bt_ctrl_lps; } bool rtl_btc_is_bt_lps_on(struct rtl_priv *rtlpriv) { - return gl_bt_coexist.bt_info.bt_lps_on; + struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv); + + if (!btcoexist) + return false; + + return btcoexist->bt_info.bt_lps_on; } void rtl_btc_get_ampdu_cfg(struct rtl_priv *rtlpriv, u8 *reject_agg, u8 *ctrl_agg_size, u8 *agg_size) { + struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv); + + if (!btcoexist) { + *reject_agg = false; + *ctrl_agg_size = false; + return; + } + if (reject_agg) - *reject_agg = gl_bt_coexist.bt_info.reject_agg_pkt; + *reject_agg = btcoexist->bt_info.reject_agg_pkt; if (ctrl_agg_size) - *ctrl_agg_size = gl_bt_coexist.bt_info.bt_ctrl_agg_buf_size; + *ctrl_agg_size = btcoexist->bt_info.bt_ctrl_agg_buf_size; if (agg_size) - *agg_size = gl_bt_coexist.bt_info.agg_buf_size; + *agg_size = btcoexist->bt_info.agg_buf_size; +} + +static void rtl_btc_alloc_variable(struct rtl_priv *rtlpriv, bool wifi_only) +{ + if (wifi_only) + rtlpriv->btcoexist.wifi_only_context = + kzalloc(sizeof(struct wifi_only_cfg), GFP_KERNEL); + else + rtlpriv->btcoexist.btc_context = + kzalloc(sizeof(struct btc_coexist), GFP_KERNEL); +} + +static void rtl_btc_free_variable(struct rtl_priv *rtlpriv) +{ + kfree(rtlpriv->btcoexist.btc_context); + rtlpriv->btcoexist.btc_context = NULL; + + kfree(rtlpriv->btcoexist.wifi_only_context); + rtlpriv->btcoexist.wifi_only_context = NULL; } void rtl_btc_init_variables(struct rtl_priv *rtlpriv) { - exhalbtc_initlize_variables(); + rtl_btc_alloc_variable(rtlpriv, false); + + exhalbtc_initlize_variables(rtlpriv); exhalbtc_bind_bt_coex_withadapter(rtlpriv); } +void rtl_btc_init_variables_wifi_only(struct rtl_priv *rtlpriv) +{ + rtl_btc_alloc_variable(rtlpriv, true); + + exhalbtc_initlize_variables_wifi_only(rtlpriv); +} + +void rtl_btc_deinit_variables(struct rtl_priv *rtlpriv) +{ + rtl_btc_free_variable(rtlpriv); +} + void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv) { /* move ant_num, bt_type and single_ant_path to @@ -110,67 +195,155 @@ void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv) */ } +void rtl_btc_power_on_setting(struct rtl_priv *rtlpriv) +{ + struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv); + + if (!btcoexist) + return; + + exhalbtc_power_on_setting(btcoexist); +} + void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv) { + struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv); + u8 bt_exist; bt_exist = rtl_get_hwpg_bt_exist(rtlpriv); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%s, bt_exist is %d\n", __func__, bt_exist); - exhalbtc_init_hw_config(&gl_bt_coexist, !bt_exist); - exhalbtc_init_coex_dm(&gl_bt_coexist); + if (!btcoexist) + return; + + exhalbtc_init_hw_config(btcoexist, !bt_exist); + exhalbtc_init_coex_dm(btcoexist); +} + +void rtl_btc_init_hw_config_wifi_only(struct rtl_priv *rtlpriv) +{ + struct wifi_only_cfg *wifionly_cfg = rtl_btc_wifi_only(rtlpriv); + + if (!wifionly_cfg) + return; + + exhalbtc_init_hw_config_wifi_only(wifionly_cfg); } void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type) { - exhalbtc_ips_notify(&gl_bt_coexist, type); + struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv); + + if (!btcoexist) + return; + + exhalbtc_ips_notify(btcoexist, type); + + if (type == ERFON) { + /* In some situation, it doesn't scan after leaving IPS, and + * this will cause btcoex in wrong state. + */ + exhalbtc_scan_notify(btcoexist, 1); + exhalbtc_scan_notify(btcoexist, 0); + } } void rtl_btc_lps_notify(struct rtl_priv *rtlpriv, u8 type) { - exhalbtc_lps_notify(&gl_bt_coexist, type); + struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv); + + if (!btcoexist) + return; + + exhalbtc_lps_notify(btcoexist, type); } void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype) { - exhalbtc_scan_notify(&gl_bt_coexist, scantype); + struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv); + + if (!btcoexist) + return; + + exhalbtc_scan_notify(btcoexist, scantype); +} + +void rtl_btc_scan_notify_wifi_only(struct rtl_priv *rtlpriv, u8 scantype) +{ + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + struct wifi_only_cfg *wifionly_cfg = rtl_btc_wifi_only(rtlpriv); + u8 is_5g = (rtlhal->current_bandtype == BAND_ON_5G); + + if (!wifionly_cfg) + return; + + exhalbtc_scan_notify_wifi_only(wifionly_cfg, is_5g); } void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action) { - exhalbtc_connect_notify(&gl_bt_coexist, action); + struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv); + + if (!btcoexist) + return; + + exhalbtc_connect_notify(btcoexist, action); } void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv, enum rt_media_status mstatus) { - exhalbtc_mediastatus_notify(&gl_bt_coexist, mstatus); + struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv); + + if (!btcoexist) + return; + + exhalbtc_mediastatus_notify(btcoexist, mstatus); } void rtl_btc_periodical(struct rtl_priv *rtlpriv) { + struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv); + + if (!btcoexist) + return; + /*rtl_bt_dm_monitor();*/ - exhalbtc_periodical(&gl_bt_coexist); + exhalbtc_periodical(btcoexist); } -void rtl_btc_halt_notify(void) +void rtl_btc_halt_notify(struct rtl_priv *rtlpriv) { - struct btc_coexist *btcoexist = &gl_bt_coexist; + struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv); + + if (!btcoexist) + return; exhalbtc_halt_notify(btcoexist); } void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length) { - exhalbtc_bt_info_notify(&gl_bt_coexist, tmp_buf, length); + struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv); + + if (!btcoexist) + return; + + exhalbtc_bt_info_notify(btcoexist, tmp_buf, length); } void rtl_btc_btmpinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length) { + struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv); u8 extid, seq, len; u16 bt_real_fw_ver; u8 bt_fw_ver; + u8 *data; + + if (!btcoexist) + return; if ((length < 4) || (!tmp_buf)) return; @@ -182,20 +355,70 @@ void rtl_btc_btmpinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length) len = tmp_buf[1] >> 4; seq = tmp_buf[2] >> 4; + data = &tmp_buf[3]; /* BT Firmware version response */ - if (seq == 0x0E) { + switch (seq) { + case BT_SEQ_GET_BT_VERSION: bt_real_fw_ver = tmp_buf[3] | (tmp_buf[4] << 8); bt_fw_ver = tmp_buf[5]; - gl_bt_coexist.bt_info.bt_real_fw_ver = bt_real_fw_ver; - gl_bt_coexist.bt_info.bt_fw_ver = bt_fw_ver; + btcoexist->bt_info.bt_real_fw_ver = bt_real_fw_ver; + btcoexist->bt_info.bt_fw_ver = bt_fw_ver; + break; + case BT_SEQ_GET_AFH_MAP_L: + btcoexist->bt_info.afh_map_l = le32_to_cpu(*(__le32 *)data); + break; + case BT_SEQ_GET_AFH_MAP_M: + btcoexist->bt_info.afh_map_m = le32_to_cpu(*(__le32 *)data); + break; + case BT_SEQ_GET_AFH_MAP_H: + btcoexist->bt_info.afh_map_h = le16_to_cpu(*(__le16 *)data); + break; + case BT_SEQ_GET_BT_COEX_SUPPORTED_FEATURE: + btcoexist->bt_info.bt_supported_feature = tmp_buf[3] | + (tmp_buf[4] << 8); + break; + case BT_SEQ_GET_BT_COEX_SUPPORTED_VERSION: + btcoexist->bt_info.bt_supported_version = tmp_buf[3] | + (tmp_buf[4] << 8); + break; + case BT_SEQ_GET_BT_ANT_DET_VAL: + btcoexist->bt_info.bt_ant_det_val = tmp_buf[3]; + break; + case BT_SEQ_GET_BT_BLE_SCAN_PARA: + btcoexist->bt_info.bt_ble_scan_para = tmp_buf[3] | + (tmp_buf[4] << 8) | + (tmp_buf[5] << 16) | + (tmp_buf[6] << 24); + break; + case BT_SEQ_GET_BT_BLE_SCAN_TYPE: + btcoexist->bt_info.bt_ble_scan_type = tmp_buf[3]; + break; + case BT_SEQ_GET_BT_DEVICE_INFO: + btcoexist->bt_info.bt_device_info = + le32_to_cpu(*(__le32 *)data); + break; + case BT_OP_GET_BT_FORBIDDEN_SLOT_VAL: + btcoexist->bt_info.bt_forb_slot_val = + le32_to_cpu(*(__le32 *)data); + break; } + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "btmpinfo complete req_num=%d\n", seq); + + complete(&btcoexist->bt_mp_comp); } bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv) { - return gl_bt_coexist.bt_info.limited_dig; + struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv); + + if (!btcoexist) + return false; + + return btcoexist->bt_info.limited_dig; } bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv) @@ -227,8 +450,13 @@ bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv) bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv) { + struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv); + + if (!btcoexist) + return true; + /* It seems 'bt_disabled' is never be initialized or set. */ - if (gl_bt_coexist.bt_info.bt_disabled) + if (btcoexist->bt_info.bt_disabled) return true; else return false; @@ -236,7 +464,50 @@ bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv) void rtl_btc_special_packet_notify(struct rtl_priv *rtlpriv, u8 pkt_type) { - return exhalbtc_special_packet_notify(&gl_bt_coexist, pkt_type); + struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv); + + if (!btcoexist) + return; + + return exhalbtc_special_packet_notify(btcoexist, pkt_type); +} + +void rtl_btc_switch_band_notify(struct rtl_priv *rtlpriv, u8 band_type, + bool scanning) +{ + struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv); + u8 type = BTC_NOT_SWITCH; + + if (!btcoexist) + return; + + switch (band_type) { + case BAND_ON_2_4G: + if (scanning) + type = BTC_SWITCH_TO_24G; + else + type = BTC_SWITCH_TO_24G_NOFORSCAN; + break; + + case BAND_ON_5G: + type = BTC_SWITCH_TO_5G; + break; + } + + if (type != BTC_NOT_SWITCH) + exhalbtc_switch_band_notify(btcoexist, type); +} + +void rtl_btc_switch_band_notify_wifionly(struct rtl_priv *rtlpriv, u8 band_type, + bool scanning) +{ + struct wifi_only_cfg *wifionly_cfg = rtl_btc_wifi_only(rtlpriv); + u8 is_5g = (band_type == BAND_ON_5G); + + if (!wifionly_cfg) + return; + + exhalbtc_switch_band_notify_wifi_only(wifionly_cfg, is_5g); } struct rtl_btc_ops *rtl_btc_get_ops_pointer(void) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h index ac1253c46f44..8c996055de71 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h @@ -28,22 +28,32 @@ #include "halbt_precomp.h" void rtl_btc_init_variables(struct rtl_priv *rtlpriv); +void rtl_btc_init_variables_wifi_only(struct rtl_priv *rtlpriv); +void rtl_btc_deinit_variables(struct rtl_priv *rtlpriv); void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv); +void rtl_btc_power_on_setting(struct rtl_priv *rtlpriv); void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv); +void rtl_btc_init_hw_config_wifi_only(struct rtl_priv *rtlpriv); void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type); void rtl_btc_lps_notify(struct rtl_priv *rtlpriv, u8 type); void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype); +void rtl_btc_scan_notify_wifi_only(struct rtl_priv *rtlpriv, u8 scantype); void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action); void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv, enum rt_media_status mstatus); void rtl_btc_periodical(struct rtl_priv *rtlpriv); -void rtl_btc_halt_notify(void); +void rtl_btc_halt_notify(struct rtl_priv *rtlpriv); void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 *tmpbuf, u8 length); void rtl_btc_btmpinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length); bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv); bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv); bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv); void rtl_btc_special_packet_notify(struct rtl_priv *rtlpriv, u8 pkt_type); +void rtl_btc_switch_band_notify(struct rtl_priv *rtlpriv, u8 band_type, + bool scanning); +void rtl_btc_switch_band_notify_wifionly(struct rtl_priv *rtlpriv, u8 band_type, + bool scanning); +void rtl_btc_display_bt_coex_info(struct rtl_priv *rtlpriv, struct seq_file *m); void rtl_btc_record_pwr_mode(struct rtl_priv *rtlpriv, u8 *buf, u8 len); u8 rtl_btc_get_lps_val(struct rtl_priv *rtlpriv); u8 rtl_btc_get_rpwm_val(struct rtl_priv *rtlpriv); diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index 3cb88825473e..cfea57efa7f4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -345,9 +345,9 @@ static void rtl_op_remove_interface(struct ieee80211_hw *hw, mutex_lock(&rtlpriv->locks.conf_mutex); /* Free beacon resources */ - if ((vif->type == NL80211_IFTYPE_AP) || - (vif->type == NL80211_IFTYPE_ADHOC) || - (vif->type == NL80211_IFTYPE_MESH_POINT)) { + if (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC || + vif->type == NL80211_IFTYPE_MESH_POINT) { if (mac->beacon_enabled == 1) { mac->beacon_enabled = 0; rtlpriv->cfg->ops->update_interrupt_mask(hw, 0, @@ -858,8 +858,8 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw, * here just used for linked scanning, & linked * and nolink check bssid is set in set network_type */ - if ((changed_flags & FIF_BCN_PRBRESP_PROMISC) && - (mac->link_state >= MAC80211_LINKED)) { + if (changed_flags & FIF_BCN_PRBRESP_PROMISC && + mac->link_state >= MAC80211_LINKED) { if (mac->opmode != NL80211_IFTYPE_AP && mac->opmode != NL80211_IFTYPE_MESH_POINT) { if (*new_flags & FIF_BCN_PRBRESP_PROMISC) @@ -1044,10 +1044,10 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); mutex_lock(&rtlpriv->locks.conf_mutex); - if ((vif->type == NL80211_IFTYPE_ADHOC) || - (vif->type == NL80211_IFTYPE_AP) || - (vif->type == NL80211_IFTYPE_MESH_POINT)) { - if ((changed & BSS_CHANGED_BEACON) || + if (vif->type == NL80211_IFTYPE_ADHOC || + vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_MESH_POINT) { + if (changed & BSS_CHANGED_BEACON || (changed & BSS_CHANGED_BEACON_ENABLED && bss_conf->enable_beacon)) { if (mac->beacon_enabled == 0) { @@ -1162,6 +1162,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, "BSS_CHANGED_ASSOC\n"); } else { + struct cfg80211_bss *bss = NULL; + mstatus = RT_MEDIA_DISCONNECT; if (mac->link_state == MAC80211_LINKED) @@ -1169,6 +1171,22 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE) rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE); mac->link_state = MAC80211_NOLINK; + + bss = cfg80211_get_bss(hw->wiphy, NULL, + (u8 *)mac->bssid, NULL, 0, + IEEE80211_BSS_TYPE_ESS, + IEEE80211_PRIVACY_OFF); + + RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, + "bssid = %pMF\n", mac->bssid); + + if (bss) { + cfg80211_unlink_bss(hw->wiphy, bss); + cfg80211_put_bss(hw->wiphy, bss); + RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, + "cfg80211_unlink !!\n"); + } + eth_zero_addr(mac->bssid); mac->vendor = PEER_UNKNOWN; mac->mode = 0; @@ -1435,6 +1453,9 @@ static void rtl_op_sw_scan_start(struct ieee80211_hw *hw, if (rtlpriv->cfg->ops->get_btc_status()) rtlpriv->btcoexist.btc_ops->btc_scan_notify(rtlpriv, 1); + else if (rtlpriv->btcoexist.btc_ops) + rtlpriv->btcoexist.btc_ops->btc_scan_notify_wifi_only(rtlpriv, + 1); if (rtlpriv->dm.supp_phymode_switch) { if (rtlpriv->cfg->ops->chk_switch_dmdp) @@ -1490,6 +1511,9 @@ static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw, rtlpriv->cfg->ops->scan_operation_backup(hw, SCAN_OPT_RESTORE); if (rtlpriv->cfg->ops->get_btc_status()) rtlpriv->btcoexist.btc_ops->btc_scan_notify(rtlpriv, 0); + else if (rtlpriv->btcoexist.btc_ops) + rtlpriv->btcoexist.btc_ops->btc_scan_notify_wifi_only(rtlpriv, + 0); } static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, @@ -1513,9 +1537,9 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, return -ENOSPC; /*User disabled HW-crypto */ } /* To support IBSS, use sw-crypto for GTK */ - if (((vif->type == NL80211_IFTYPE_ADHOC) || - (vif->type == NL80211_IFTYPE_MESH_POINT)) && - !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + if ((vif->type == NL80211_IFTYPE_ADHOC || + vif->type == NL80211_IFTYPE_MESH_POINT) && + !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) return -ENOSPC; RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "%s hardware based encryption for keyidx: %d, mac: %pM\n", @@ -1588,7 +1612,7 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, rtlpriv->cfg->ops->enable_hw_sec(hw); } } else { - if ((!group_key) || (vif->type == NL80211_IFTYPE_ADHOC) || + if (!group_key || vif->type == NL80211_IFTYPE_ADHOC || rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION) { if (rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION && @@ -1775,7 +1799,7 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version, break; case PWR_CMD_WRITE: RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "rtl_hal_pwrseqcmdparsing(): PWR_CMD_WRITE\n"); + "%s(): PWR_CMD_WRITE\n", __func__); offset = GET_PWR_CFG_OFFSET(cfg_cmd); /*Read the value from system register*/ diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c index 38fef6dbb44b..d70385be9976 100644 --- a/drivers/net/wireless/realtek/rtlwifi/debug.c +++ b/drivers/net/wireless/realtek/rtlwifi/debug.c @@ -23,15 +23,17 @@ *****************************************************************************/ #include "wifi.h" +#include "cam.h" #include <linux/moduleparam.h> +#include <linux/vmalloc.h> #ifdef CONFIG_RTLWIFI_DEBUG void _rtl_dbg_trace(struct rtl_priv *rtlpriv, u64 comp, int level, const char *fmt, ...) { if (unlikely((comp & rtlpriv->cfg->mod_params->debug_mask) && - (level <= rtlpriv->cfg->mod_params->debug_level))) { + level <= rtlpriv->cfg->mod_params->debug_level)) { struct va_format vaf; va_list args; @@ -51,7 +53,7 @@ void _rtl_dbg_print(struct rtl_priv *rtlpriv, u64 comp, int level, const char *fmt, ...) { if (unlikely((comp & rtlpriv->cfg->mod_params->debug_mask) && - (level <= rtlpriv->cfg->mod_params->debug_level))) { + level <= rtlpriv->cfg->mod_params->debug_level)) { struct va_format vaf; va_list args; @@ -81,4 +83,481 @@ void _rtl_dbg_print_data(struct rtl_priv *rtlpriv, u64 comp, int level, } EXPORT_SYMBOL_GPL(_rtl_dbg_print_data); +struct rtl_debugfs_priv { + struct rtl_priv *rtlpriv; + int (*cb_read)(struct seq_file *m, void *v); + ssize_t (*cb_write)(struct file *filp, const char __user *buffer, + size_t count, loff_t *loff); + u32 cb_data; +}; + +static struct dentry *debugfs_topdir; + +static int rtl_debug_get_common(struct seq_file *m, void *v) +{ + struct rtl_debugfs_priv *debugfs_priv = m->private; + + return debugfs_priv->cb_read(m, v); +} + +static int dl_debug_open_common(struct inode *inode, struct file *file) +{ + return single_open(file, rtl_debug_get_common, inode->i_private); +} + +static const struct file_operations file_ops_common = { + .open = dl_debug_open_common, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int rtl_debug_get_mac_page(struct seq_file *m, void *v) +{ + struct rtl_debugfs_priv *debugfs_priv = m->private; + struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv; + u32 page = debugfs_priv->cb_data; + int i, n; + int max = 0xff; + + for (n = 0; n <= max; ) { + seq_printf(m, "\n%8.8x ", n + page); + for (i = 0; i < 4 && n <= max; i++, n += 4) + seq_printf(m, "%8.8x ", + rtl_read_dword(rtlpriv, (page | n))); + } + seq_puts(m, "\n"); + return 0; +} + +#define RTL_DEBUG_IMPL_MAC_SERIES(page, addr) \ +static struct rtl_debugfs_priv rtl_debug_priv_mac_ ##page = { \ + .cb_read = rtl_debug_get_mac_page, \ + .cb_data = addr, \ +} + +RTL_DEBUG_IMPL_MAC_SERIES(0, 0x0000); +RTL_DEBUG_IMPL_MAC_SERIES(1, 0x0100); +RTL_DEBUG_IMPL_MAC_SERIES(2, 0x0200); +RTL_DEBUG_IMPL_MAC_SERIES(3, 0x0300); +RTL_DEBUG_IMPL_MAC_SERIES(4, 0x0400); +RTL_DEBUG_IMPL_MAC_SERIES(5, 0x0500); +RTL_DEBUG_IMPL_MAC_SERIES(6, 0x0600); +RTL_DEBUG_IMPL_MAC_SERIES(7, 0x0700); +RTL_DEBUG_IMPL_MAC_SERIES(10, 0x1000); +RTL_DEBUG_IMPL_MAC_SERIES(11, 0x1100); +RTL_DEBUG_IMPL_MAC_SERIES(12, 0x1200); +RTL_DEBUG_IMPL_MAC_SERIES(13, 0x1300); +RTL_DEBUG_IMPL_MAC_SERIES(14, 0x1400); +RTL_DEBUG_IMPL_MAC_SERIES(15, 0x1500); +RTL_DEBUG_IMPL_MAC_SERIES(16, 0x1600); +RTL_DEBUG_IMPL_MAC_SERIES(17, 0x1700); + +static int rtl_debug_get_bb_page(struct seq_file *m, void *v) +{ + struct rtl_debugfs_priv *debugfs_priv = m->private; + struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv; + struct ieee80211_hw *hw = rtlpriv->hw; + u32 page = debugfs_priv->cb_data; + int i, n; + int max = 0xff; + + for (n = 0; n <= max; ) { + seq_printf(m, "\n%8.8x ", n + page); + for (i = 0; i < 4 && n <= max; i++, n += 4) + seq_printf(m, "%8.8x ", + rtl_get_bbreg(hw, (page | n), 0xffffffff)); + } + seq_puts(m, "\n"); + return 0; +} + +#define RTL_DEBUG_IMPL_BB_SERIES(page, addr) \ +static struct rtl_debugfs_priv rtl_debug_priv_bb_ ##page = { \ + .cb_read = rtl_debug_get_bb_page, \ + .cb_data = addr, \ +} + +RTL_DEBUG_IMPL_BB_SERIES(8, 0x0800); +RTL_DEBUG_IMPL_BB_SERIES(9, 0x0900); +RTL_DEBUG_IMPL_BB_SERIES(a, 0x0a00); +RTL_DEBUG_IMPL_BB_SERIES(b, 0x0b00); +RTL_DEBUG_IMPL_BB_SERIES(c, 0x0c00); +RTL_DEBUG_IMPL_BB_SERIES(d, 0x0d00); +RTL_DEBUG_IMPL_BB_SERIES(e, 0x0e00); +RTL_DEBUG_IMPL_BB_SERIES(f, 0x0f00); +RTL_DEBUG_IMPL_BB_SERIES(18, 0x1800); +RTL_DEBUG_IMPL_BB_SERIES(19, 0x1900); +RTL_DEBUG_IMPL_BB_SERIES(1a, 0x1a00); +RTL_DEBUG_IMPL_BB_SERIES(1b, 0x1b00); +RTL_DEBUG_IMPL_BB_SERIES(1c, 0x1c00); +RTL_DEBUG_IMPL_BB_SERIES(1d, 0x1d00); +RTL_DEBUG_IMPL_BB_SERIES(1e, 0x1e00); +RTL_DEBUG_IMPL_BB_SERIES(1f, 0x1f00); + +static int rtl_debug_get_reg_rf(struct seq_file *m, void *v) +{ + struct rtl_debugfs_priv *debugfs_priv = m->private; + struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv; + struct ieee80211_hw *hw = rtlpriv->hw; + enum radio_path rfpath = debugfs_priv->cb_data; + int i, n; + int max = 0x40; + + if (IS_HARDWARE_TYPE_8822B(rtlpriv)) + max = 0xff; + + seq_printf(m, "\nPATH(%d)", rfpath); + + for (n = 0; n <= max; ) { + seq_printf(m, "\n%8.8x ", n); + for (i = 0; i < 4 && n <= max; n += 1, i++) + seq_printf(m, "%8.8x ", + rtl_get_rfreg(hw, rfpath, n, 0xffffffff)); + } + seq_puts(m, "\n"); + return 0; +} + +#define RTL_DEBUG_IMPL_RF_SERIES(page, addr) \ +static struct rtl_debugfs_priv rtl_debug_priv_rf_ ##page = { \ + .cb_read = rtl_debug_get_reg_rf, \ + .cb_data = addr, \ +} + +RTL_DEBUG_IMPL_RF_SERIES(a, RF90_PATH_A); +RTL_DEBUG_IMPL_RF_SERIES(b, RF90_PATH_B); + +static int rtl_debug_get_cam_register(struct seq_file *m, void *v) +{ + struct rtl_debugfs_priv *debugfs_priv = m->private; + struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv; + int start = debugfs_priv->cb_data; + u32 target_cmd = 0; + u32 target_val = 0; + u8 entry_i = 0; + u32 ulstatus; + int i = 100, j = 0; + int end = (start + 11 > TOTAL_CAM_ENTRY ? TOTAL_CAM_ENTRY : start + 11); + + /* This dump the current register page */ + seq_printf(m, + "\n#################### SECURITY CAM (%d-%d) ##################\n", + start, end - 1); + + for (j = start; j < end; j++) { + seq_printf(m, "\nD: %2x > ", j); + for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) { + /* polling bit, and No Write enable, and address */ + target_cmd = entry_i + CAM_CONTENT_COUNT * j; + target_cmd = target_cmd | BIT(31); + + /* Check polling bit is clear */ + while ((i--) >= 0) { + ulstatus = + rtl_read_dword(rtlpriv, + rtlpriv->cfg->maps[RWCAM]); + if (ulstatus & BIT(31)) + continue; + else + break; + } + + rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], + target_cmd); + target_val = rtl_read_dword(rtlpriv, + rtlpriv->cfg->maps[RCAMO]); + seq_printf(m, "%8.8x ", target_val); + } + } + seq_puts(m, "\n"); + return 0; +} + +#define RTL_DEBUG_IMPL_CAM_SERIES(page, addr) \ +static struct rtl_debugfs_priv rtl_debug_priv_cam_ ##page = { \ + .cb_read = rtl_debug_get_cam_register, \ + .cb_data = addr, \ +} + +RTL_DEBUG_IMPL_CAM_SERIES(1, 0); +RTL_DEBUG_IMPL_CAM_SERIES(2, 11); +RTL_DEBUG_IMPL_CAM_SERIES(3, 22); + +static int rtl_debug_get_btcoex(struct seq_file *m, void *v) +{ + struct rtl_debugfs_priv *debugfs_priv = m->private; + struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv; + + if (rtlpriv->cfg->ops->get_btc_status()) + rtlpriv->btcoexist.btc_ops->btc_display_bt_coex_info(rtlpriv, + m); + + seq_puts(m, "\n"); + + return 0; +} + +static struct rtl_debugfs_priv rtl_debug_priv_btcoex = { + .cb_read = rtl_debug_get_btcoex, + .cb_data = 0, +}; + +static ssize_t rtl_debugfs_set_write_reg(struct file *filp, + const char __user *buffer, + size_t count, loff_t *loff) +{ + struct rtl_debugfs_priv *debugfs_priv = filp->private_data; + struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv; + char tmp[32 + 1]; + int tmp_len; + u32 addr, val, len; + int num; + + if (count < 3) + return -EFAULT; + + tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count); + + if (!buffer || copy_from_user(tmp, buffer, tmp_len)) + return count; + + tmp[tmp_len] = '\0'; + + /* write BB/MAC register */ + num = sscanf(tmp, "%x %x %x", &addr, &val, &len); + + if (num != 3) + return count; + + switch (len) { + case 1: + rtl_write_byte(rtlpriv, addr, (u8)val); + break; + case 2: + rtl_write_word(rtlpriv, addr, (u16)val); + break; + case 4: + rtl_write_dword(rtlpriv, addr, val); + break; + default: + /*printk("error write length=%d", len);*/ + break; + } + + return count; +} + +static struct rtl_debugfs_priv rtl_debug_priv_write_reg = { + .cb_write = rtl_debugfs_set_write_reg, +}; + +static ssize_t rtl_debugfs_set_write_h2c(struct file *filp, + const char __user *buffer, + size_t count, loff_t *loff) +{ + struct rtl_debugfs_priv *debugfs_priv = filp->private_data; + struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv; + struct ieee80211_hw *hw = rtlpriv->hw; + char tmp[32 + 1]; + int tmp_len; + u8 h2c_len, h2c_data_packed[8]; + int h2c_data[8]; /* idx 0: cmd */ + int i; + + if (count < 3) + return -EFAULT; + + tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count); + + if (!buffer || copy_from_user(tmp, buffer, tmp_len)) + return count; + + tmp[tmp_len] = '\0'; + + h2c_len = sscanf(tmp, "%X %X %X %X %X %X %X %X", + &h2c_data[0], &h2c_data[1], + &h2c_data[2], &h2c_data[3], + &h2c_data[4], &h2c_data[5], + &h2c_data[6], &h2c_data[7]); + + if (h2c_len <= 0) + return count; + + for (i = 0; i < h2c_len; i++) + h2c_data_packed[i] = (u8)h2c_data[i]; + + rtlpriv->cfg->ops->fill_h2c_cmd(hw, h2c_data_packed[0], + h2c_len - 1, + &h2c_data_packed[1]); + + return count; +} + +static struct rtl_debugfs_priv rtl_debug_priv_write_h2c = { + .cb_write = rtl_debugfs_set_write_h2c, +}; + +static ssize_t rtl_debugfs_set_write_rfreg(struct file *filp, + const char __user *buffer, + size_t count, loff_t *loff) +{ + struct rtl_debugfs_priv *debugfs_priv = filp->private_data; + struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv; + struct ieee80211_hw *hw = rtlpriv->hw; + char tmp[32 + 1]; + int tmp_len; + int num; + int path; + u32 addr, bitmask, data; + + if (count < 3) + return -EFAULT; + + tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count); + + if (!buffer || copy_from_user(tmp, buffer, tmp_len)) + return count; + + tmp[tmp_len] = '\0'; + + num = sscanf(tmp, "%X %X %X %X", + &path, &addr, &bitmask, &data); + + if (num != 4) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG, + "Format is <path> <addr> <mask> <data>\n"); + return count; + } + + rtl_set_rfreg(hw, path, addr, bitmask, data); + + return count; +} + +static struct rtl_debugfs_priv rtl_debug_priv_write_rfreg = { + .cb_write = rtl_debugfs_set_write_rfreg, +}; + +static int rtl_debugfs_close(struct inode *inode, struct file *filp) +{ + return 0; +} + +static ssize_t rtl_debugfs_common_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *loff) +{ + struct rtl_debugfs_priv *debugfs_priv = filp->private_data; + + return debugfs_priv->cb_write(filp, buffer, count, loff); +} + +static const struct file_operations file_ops_common_write = { + .owner = THIS_MODULE, + .write = rtl_debugfs_common_write, + .open = simple_open, + .release = rtl_debugfs_close, +}; + +#define RTL_DEBUGFS_ADD_CORE(name, mode, fopname) \ + do { \ + rtl_debug_priv_ ##name.rtlpriv = rtlpriv; \ + if (!debugfs_create_file(#name, mode, \ + parent, &rtl_debug_priv_ ##name, \ + &file_ops_ ##fopname)) \ + pr_err("Unable to initialize debugfs:%s/%s\n", \ + rtlpriv->dbg.debugfs_name, \ + #name); \ + } while (0) + +#define RTL_DEBUGFS_ADD(name) \ + RTL_DEBUGFS_ADD_CORE(name, S_IFREG | 0444, common) +#define RTL_DEBUGFS_ADD_W(name) \ + RTL_DEBUGFS_ADD_CORE(name, S_IFREG | 0222, common_write) + +void rtl_debug_add_one(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct dentry *parent; + + snprintf(rtlpriv->dbg.debugfs_name, 18, "%pMF", rtlefuse->dev_addr); + + rtlpriv->dbg.debugfs_dir = + debugfs_create_dir(rtlpriv->dbg.debugfs_name, debugfs_topdir); + if (!rtlpriv->dbg.debugfs_dir) { + pr_err("Unable to init debugfs:/%s/%s\n", rtlpriv->cfg->name, + rtlpriv->dbg.debugfs_name); + return; + } + + parent = rtlpriv->dbg.debugfs_dir; + + RTL_DEBUGFS_ADD(mac_0); + RTL_DEBUGFS_ADD(mac_1); + RTL_DEBUGFS_ADD(mac_2); + RTL_DEBUGFS_ADD(mac_3); + RTL_DEBUGFS_ADD(mac_4); + RTL_DEBUGFS_ADD(mac_5); + RTL_DEBUGFS_ADD(mac_6); + RTL_DEBUGFS_ADD(mac_7); + RTL_DEBUGFS_ADD(bb_8); + RTL_DEBUGFS_ADD(bb_9); + RTL_DEBUGFS_ADD(bb_a); + RTL_DEBUGFS_ADD(bb_b); + RTL_DEBUGFS_ADD(bb_c); + RTL_DEBUGFS_ADD(bb_d); + RTL_DEBUGFS_ADD(bb_e); + RTL_DEBUGFS_ADD(bb_f); + RTL_DEBUGFS_ADD(mac_10); + RTL_DEBUGFS_ADD(mac_11); + RTL_DEBUGFS_ADD(mac_12); + RTL_DEBUGFS_ADD(mac_13); + RTL_DEBUGFS_ADD(mac_14); + RTL_DEBUGFS_ADD(mac_15); + RTL_DEBUGFS_ADD(mac_16); + RTL_DEBUGFS_ADD(mac_17); + RTL_DEBUGFS_ADD(bb_18); + RTL_DEBUGFS_ADD(bb_19); + RTL_DEBUGFS_ADD(bb_1a); + RTL_DEBUGFS_ADD(bb_1b); + RTL_DEBUGFS_ADD(bb_1c); + RTL_DEBUGFS_ADD(bb_1d); + RTL_DEBUGFS_ADD(bb_1e); + RTL_DEBUGFS_ADD(bb_1f); + RTL_DEBUGFS_ADD(rf_a); + RTL_DEBUGFS_ADD(rf_b); + + RTL_DEBUGFS_ADD(cam_1); + RTL_DEBUGFS_ADD(cam_2); + RTL_DEBUGFS_ADD(cam_3); + + RTL_DEBUGFS_ADD(btcoex); + + RTL_DEBUGFS_ADD_W(write_reg); + RTL_DEBUGFS_ADD_W(write_h2c); + RTL_DEBUGFS_ADD_W(write_rfreg); +} +EXPORT_SYMBOL_GPL(rtl_debug_add_one); + +void rtl_debug_remove_one(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + debugfs_remove_recursive(rtlpriv->dbg.debugfs_dir); + rtlpriv->dbg.debugfs_dir = NULL; +} +EXPORT_SYMBOL_GPL(rtl_debug_remove_one); + +void rtl_debugfs_add_topdir(void) +{ + debugfs_topdir = debugfs_create_dir("rtlwifi", NULL); +} + +void rtl_debugfs_remove_topdir(void) +{ + debugfs_remove_recursive(debugfs_topdir); +} + #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.h b/drivers/net/wireless/realtek/rtlwifi/debug.h index 947718001457..ad6834af618b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/debug.h +++ b/drivers/net/wireless/realtek/rtlwifi/debug.h @@ -219,4 +219,16 @@ static inline void RT_PRINT_DATA(struct rtl_priv *rtlpriv, } #endif + +#ifdef CONFIG_RTLWIFI_DEBUG +void rtl_debug_add_one(struct ieee80211_hw *hw); +void rtl_debug_remove_one(struct ieee80211_hw *hw); +void rtl_debugfs_add_topdir(void); +void rtl_debugfs_remove_topdir(void); +#else +#define rtl_debug_add_one(hw) +#define rtl_debug_remove_one(hw) +#define rtl_debugfs_add_topdir() +#define rtl_debugfs_remove_topdir() +#endif #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c index ef9acd466cca..35b50be633f1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/efuse.c +++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c @@ -257,11 +257,11 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf) sizeof(u8), GFP_ATOMIC); if (!efuse_tbl) return; - efuse_word = kzalloc(EFUSE_MAX_WORD_UNIT * sizeof(u16 *), GFP_ATOMIC); + efuse_word = kcalloc(EFUSE_MAX_WORD_UNIT, sizeof(u16 *), GFP_ATOMIC); if (!efuse_word) goto out; for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) { - efuse_word[i] = kzalloc(efuse_max_section * sizeof(u16), + efuse_word[i] = kcalloc(efuse_max_section, sizeof(u16), GFP_ATOMIC); if (!efuse_word[i]) goto done; diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index c2575b0b9440..01ccf8884831 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -59,6 +59,7 @@ static u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw, struct sk_buff *skb) struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); __le16 fc = rtl_get_fc(skb); u8 queue_index = skb_get_queue_mapping(skb); + struct ieee80211_hdr *hdr; if (unlikely(ieee80211_is_beacon(fc))) return BEACON_QUEUE; @@ -67,6 +68,13 @@ static u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw, struct sk_buff *skb) if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) if (ieee80211_is_nullfunc(fc)) return HIGH_QUEUE; + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8822BE) { + hdr = rtl_get_hdr(skb); + + if (is_multicast_ether_addr(hdr->addr1) || + is_broadcast_ether_addr(hdr->addr1)) + return HIGH_QUEUE; + } return ac_to_hwq[queue_index]; } @@ -557,13 +565,6 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio) else entry = (u8 *)(&ring->desc[ring->idx]); - if (rtlpriv->cfg->ops->get_available_desc && - rtlpriv->cfg->ops->get_available_desc(hw, prio) <= 1) { - RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_DMESG, - "no available desc!\n"); - return; - } - if (!rtlpriv->cfg->ops->is_tx_desc_closed(hw, prio, ring->idx)) return; ring->idx = (ring->idx + 1) % ring->entries; @@ -747,7 +748,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) u8 tmp_one; bool unicast = false; u8 hw_queue = 0; - unsigned int rx_remained_cnt; + unsigned int rx_remained_cnt = 0; struct rtl_stats stats = { .signal = 0, .rate = 0, @@ -768,7 +769,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) struct sk_buff *new_skb; if (rtlpriv->use_new_trx_flow) { - rx_remained_cnt = + if (rx_remained_cnt == 0) + rx_remained_cnt = rtlpriv->cfg->ops->rx_desc_buff_remained_cnt(hw, hw_queue); if (rx_remained_cnt == 0) @@ -924,10 +926,8 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); unsigned long flags; - u32 inta = 0; - u32 intb = 0; - u32 intc = 0; - u32 intd = 0; + struct rtl_int intvec = {0}; + irqreturn_t ret = IRQ_HANDLED; if (rtlpci->irq_enabled == 0) @@ -937,47 +937,47 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id) rtlpriv->cfg->ops->disable_interrupt(hw); /*read ISR: 4/8bytes */ - rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb, &intc, &intd); + rtlpriv->cfg->ops->interrupt_recognized(hw, &intvec); /*Shared IRQ or HW disappeared */ - if (!inta || inta == 0xffff) + if (!intvec.inta || intvec.inta == 0xffff) goto done; /*<1> beacon related */ - if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) + if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "beacon ok interrupt!\n"); - if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TBDER])) + if (unlikely(intvec.inta & rtlpriv->cfg->maps[RTL_IMR_TBDER])) RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "beacon err interrupt!\n"); - if (inta & rtlpriv->cfg->maps[RTL_IMR_BDOK]) + if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BDOK]) RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "beacon interrupt!\n"); - if (inta & rtlpriv->cfg->maps[RTL_IMR_BCNINT]) { + if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BCNINT]) { RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "prepare beacon for interrupt!\n"); tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet); } /*<2> Tx related */ - if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_TXFOVW])) + if (unlikely(intvec.intb & rtlpriv->cfg->maps[RTL_IMR_TXFOVW])) RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "IMR_TXFOVW!\n"); - if (inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) { + if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) { RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "Manage ok interrupt!\n"); _rtl_pci_tx_isr(hw, MGNT_QUEUE); } - if (inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) { + if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) { RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "HIGH_QUEUE ok interrupt!\n"); _rtl_pci_tx_isr(hw, HIGH_QUEUE); } - if (inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) { + if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) { rtlpriv->link_info.num_tx_inperiod++; RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, @@ -985,7 +985,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id) _rtl_pci_tx_isr(hw, BK_QUEUE); } - if (inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) { + if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) { rtlpriv->link_info.num_tx_inperiod++; RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, @@ -993,7 +993,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id) _rtl_pci_tx_isr(hw, BE_QUEUE); } - if (inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) { + if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) { rtlpriv->link_info.num_tx_inperiod++; RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, @@ -1001,7 +1001,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id) _rtl_pci_tx_isr(hw, VI_QUEUE); } - if (inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) { + if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) { rtlpriv->link_info.num_tx_inperiod++; RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, @@ -1010,7 +1010,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id) } if (rtlhal->hw_type == HARDWARE_TYPE_RTL8822BE) { - if (intd & rtlpriv->cfg->maps[RTL_IMR_H2CDOK]) { + if (intvec.intd & rtlpriv->cfg->maps[RTL_IMR_H2CDOK]) { rtlpriv->link_info.num_tx_inperiod++; RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, @@ -1020,7 +1020,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id) } if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) { - if (inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) { + if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) { rtlpriv->link_info.num_tx_inperiod++; RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, @@ -1030,25 +1030,25 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id) } /*<3> Rx related */ - if (inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) { + if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) { RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "Rx ok interrupt!\n"); _rtl_pci_rx_interrupt(hw); } - if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) { + if (unlikely(intvec.inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "rx descriptor unavailable!\n"); _rtl_pci_rx_interrupt(hw); } - if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) { + if (unlikely(intvec.intb & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "rx overflow !\n"); _rtl_pci_rx_interrupt(hw); } /*<4> fw related*/ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723AE) { - if (inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) { + if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) { RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "firmware interrupt!\n"); queue_delayed_work(rtlpriv->works.rtl_wq, @@ -1064,7 +1064,8 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id) */ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8188EE || rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) { - if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_HSISR_IND])) { + if (unlikely(intvec.inta & + rtlpriv->cfg->maps[RTL_IMR_HSISR_IND])) { RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "hsisr interrupt!\n"); _rtl_pci_hs_interrupt(hw); @@ -1250,7 +1251,6 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw, rtlpci->tx_ring[prio].cur_tx_rp = 0; rtlpci->tx_ring[prio].cur_tx_wp = 0; - rtlpci->tx_ring[prio].avl_desc = entries; } /* alloc dma for this ring */ @@ -1555,7 +1555,14 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw) dev_kfree_skb_irq(skb); ring->idx = (ring->idx + 1) % ring->entries; } + + if (rtlpriv->use_new_trx_flow) { + rtlpci->tx_ring[i].cur_tx_rp = 0; + rtlpci->tx_ring[i].cur_tx_wp = 0; + } + ring->idx = 0; + ring->entries = rtlpci->txringcount[i]; } } spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); @@ -1787,6 +1794,7 @@ static int rtl_pci_start(struct ieee80211_hw *hw) struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw)); + struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops; int err; @@ -1796,9 +1804,12 @@ static int rtl_pci_start(struct ieee80211_hw *hw) if (rtlpriv->cfg->ops->get_btc_status && rtlpriv->cfg->ops->get_btc_status()) { rtlpriv->btcoexist.btc_info.ap_num = 36; - rtlpriv->btcoexist.btc_ops->btc_init_variables(rtlpriv); - rtlpriv->btcoexist.btc_ops->btc_init_hal_vars(rtlpriv); + btc_ops->btc_init_variables(rtlpriv); + btc_ops->btc_init_hal_vars(rtlpriv); + } else if (btc_ops) { + btc_ops->btc_init_variables_wifi_only(rtlpriv); } + err = rtlpriv->cfg->ops->hw_init(hw); if (err) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, @@ -1834,7 +1845,10 @@ static void rtl_pci_stop(struct ieee80211_hw *hw) u8 rf_timeout = 0; if (rtlpriv->cfg->ops->get_btc_status()) - rtlpriv->btcoexist.btc_ops->btc_halt_notify(); + rtlpriv->btcoexist.btc_ops->btc_halt_notify(rtlpriv); + + if (rtlpriv->btcoexist.btc_ops) + rtlpriv->btcoexist.btc_ops->btc_deinit_variables(rtlpriv); /*should be before disable interrupt&adapter *and will do it immediately. @@ -2302,6 +2316,9 @@ int rtl_pci_probe(struct pci_dev *pdev, } rtlpriv->mac80211.mac80211_registered = 1; + /* add for debug */ + rtl_debug_add_one(hw); + /*init rfkill */ rtl_init_rfkill(hw); /* Init PCI sw */ @@ -2350,6 +2367,9 @@ void rtl_pci_disconnect(struct pci_dev *pdev) wait_for_completion(&rtlpriv->firmware_loading_complete); clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status); + /* remove form debug */ + rtl_debug_remove_one(hw); + /*ieee80211_unregister_hw will call ops_stop */ if (rtlmac->mac80211_registered == 1) { ieee80211_unregister_hw(hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.h b/drivers/net/wireless/realtek/rtlwifi/pci.h index e7d070e8da2d..3fb56c845a61 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.h +++ b/drivers/net/wireless/realtek/rtlwifi/pci.h @@ -173,7 +173,6 @@ struct rtl8192_tx_ring { /*add for new trx flow*/ struct rtl_tx_buffer_desc *buffer_desc; /*tx buffer descriptor*/ dma_addr_t buffer_desc_dma; /*tx bufferd desc dma memory*/ - u16 avl_desc; /* available_desc_to_write */ u16 cur_tx_wp; /* current_tx_write_point */ u16 cur_tx_rp; /* current_tx_read_point */ }; @@ -320,10 +319,10 @@ static inline void pci_write32_async(struct rtl_priv *rtlpriv, writel(val, (u8 __iomem *)rtlpriv->io.pci_mem_start + addr); } -static inline u16 calc_fifo_space(u16 rp, u16 wp) +static inline u16 calc_fifo_space(u16 rp, u16 wp, u16 size) { if (rp <= wp) - return RTL_PCI_MAX_RX_COUNT - 1 + rp - wp; + return size - 1 + rp - wp; return rp - wp - 1; } diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c index 24c87fae5382..71af24e2e051 100644 --- a/drivers/net/wireless/realtek/rtlwifi/ps.c +++ b/drivers/net/wireless/realtek/rtlwifi/ps.c @@ -51,6 +51,11 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw) &rtlmac->retry_long); RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); + rtlpriv->cfg->ops->switch_channel(hw); + rtlpriv->cfg->ops->set_channel_access(hw); + rtlpriv->cfg->ops->set_bw_mode(hw, + cfg80211_get_chandef_type(&hw->conf.chandef)); + /*<3> Enable Interrupt */ rtlpriv->cfg->ops->enable_interrupt(hw); @@ -289,7 +294,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw) cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq); - spin_lock(&rtlpriv->locks.ips_lock); + mutex_lock(&rtlpriv->locks.ips_mutex); if (ppsc->inactiveps) { rtstate = ppsc->rfpwr_state; @@ -306,7 +311,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw) ppsc->inactive_pwrstate); } } - spin_unlock(&rtlpriv->locks.ips_lock); + mutex_unlock(&rtlpriv->locks.ips_mutex); } EXPORT_SYMBOL_GPL(rtl_ips_nic_on); @@ -415,7 +420,6 @@ static void rtl_lps_enter_core(struct ieee80211_hw *hw) struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_priv *rtlpriv = rtl_priv(hw); - unsigned long flag; if (!ppsc->fwctrl_lps) return; @@ -436,7 +440,7 @@ static void rtl_lps_enter_core(struct ieee80211_hw *hw) if (mac->link_state != MAC80211_LINKED) return; - spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag); + mutex_lock(&rtlpriv->locks.lps_mutex); /* Don't need to check (ppsc->dot11_psmode == EACTIVE), because * bt_ccoexist may ask to enter lps. @@ -446,7 +450,7 @@ static void rtl_lps_enter_core(struct ieee80211_hw *hw) "Enter 802.11 power save mode...\n"); rtl_lps_set_psmode(hw, EAUTOPS); - spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag); + mutex_unlock(&rtlpriv->locks.lps_mutex); } /* Interrupt safe routine to leave the leisure power save mode.*/ @@ -455,9 +459,8 @@ static void rtl_lps_leave_core(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - unsigned long flag; - spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag); + mutex_lock(&rtlpriv->locks.lps_mutex); if (ppsc->fwctrl_lps) { if (ppsc->dot11_psmode != EACTIVE) { @@ -478,7 +481,7 @@ static void rtl_lps_leave_core(struct ieee80211_hw *hw) rtl_lps_set_psmode(hw, EACTIVE); } } - spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag); + mutex_unlock(&rtlpriv->locks.lps_mutex); } /* For sw LPS*/ @@ -568,7 +571,6 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - unsigned long flag; if (!rtlpriv->psc.swctrl_lps) return; @@ -581,9 +583,9 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw) RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM); } - spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag); + mutex_lock(&rtlpriv->locks.lps_mutex); rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS); - spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag); + mutex_unlock(&rtlpriv->locks.lps_mutex); } void rtl_swlps_rfon_wq_callback(void *data) @@ -600,7 +602,6 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - unsigned long flag; u8 sleep_intv; if (!rtlpriv->psc.sw_ps_enabled) @@ -624,9 +625,9 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw) } spin_unlock(&rtlpriv->locks.rf_ps_lock); - spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag); + mutex_lock(&rtlpriv->locks.lps_mutex); rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS); - spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag); + mutex_unlock(&rtlpriv->locks.lps_mutex); if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM && !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) { diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c index 02811eda57cd..d1cb7d405618 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rc.c +++ b/drivers/net/wireless/realtek/rtlwifi/rc.c @@ -123,7 +123,7 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv, if (sta && (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)) rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; - if (sta && (sta->vht_cap.vht_supported)) + if (sta && sta->vht_cap.vht_supported) rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH; } else { if (mac->bw_40) @@ -135,8 +135,8 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv, if (sgi_20 || sgi_40 || sgi_80) rate->flags |= IEEE80211_TX_RC_SHORT_GI; if (sta && sta->ht_cap.ht_supported && - ((wireless_mode == WIRELESS_MODE_N_5G) || - (wireless_mode == WIRELESS_MODE_N_24G))) + (wireless_mode == WIRELESS_MODE_N_5G || + wireless_mode == WIRELESS_MODE_N_24G)) rate->flags |= IEEE80211_TX_RC_MCS; if (sta && sta->vht_cap.vht_supported && (wireless_mode == WIRELESS_MODE_AC_5G || @@ -216,8 +216,8 @@ static void rtl_tx_status(void *ppriv, if (sta) { /* Check if aggregation has to be enabled for this tid */ - sta_entry = (struct rtl_sta_info *) sta->drv_priv; - if ((sta->ht_cap.ht_supported) && + sta_entry = (struct rtl_sta_info *)sta->drv_priv; + if (sta->ht_cap.ht_supported && !(skb->protocol == cpu_to_be16(ETH_P_PAE))) { if (ieee80211_is_data_qos(fc)) { u8 tid = rtl_get_tid(skb); @@ -265,11 +265,9 @@ static void *rtl_rate_alloc_sta(void *ppriv, struct rtl_priv *rtlpriv = ppriv; struct rtl_rate_priv *rate_priv; - rate_priv = kzalloc(sizeof(struct rtl_rate_priv), gfp); - if (!rate_priv) { - pr_err("Unable to allocate private rc structure\n"); + rate_priv = kzalloc(sizeof(*rate_priv), gfp); + if (!rate_priv) return NULL; - } rtlpriv->rate_priv = rate_priv; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c index a2eca669873b..63874512598b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c @@ -141,6 +141,8 @@ int rtl88e_download_fw(struct ieee80211_hw *hw, return 1; pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware; + rtlhal->fw_version = le16_to_cpu(pfwheader->version); + rtlhal->fw_subversion = pfwheader->subversion; pfwdata = rtlhal->pfirmware; fwsize = rtlhal->fwsize; RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c index e30a18e64ff5..988d5ac57d02 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c @@ -1472,17 +1472,16 @@ void rtl88ee_card_disable(struct ieee80211_hw *hw) } void rtl88ee_interrupt_recognized(struct ieee80211_hw *hw, - u32 *p_inta, u32 *p_intb, - u32 *p_intc, u32 *p_intd) + struct rtl_int *intvec) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0]; - rtl_write_dword(rtlpriv, ISR, *p_inta); + intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0]; + rtl_write_dword(rtlpriv, ISR, intvec->inta); - *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1]; - rtl_write_dword(rtlpriv, REG_HISRE, *p_intb); + intvec->intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1]; + rtl_write_dword(rtlpriv, REG_HISRE, intvec->intb); } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h index cdf49de1e6ed..214cd2a32018 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h @@ -29,8 +29,7 @@ void rtl88ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); void rtl88ee_read_eeprom_info(struct ieee80211_hw *hw); void rtl88ee_interrupt_recognized(struct ieee80211_hw *hw, - u32 *p_inta, u32 *p_intb, - u32 *p_intc, u32 *p_intd); + struct rtl_int *int_vec); int rtl88ee_hw_init(struct ieee80211_hw *hw); void rtl88ee_card_disable(struct ieee80211_hw *hw); void rtl88ee_enable_interrupt(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c index 0f4c86a28716..4a81e0ef4b8e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c @@ -1375,19 +1375,13 @@ void rtl92ce_card_disable(struct ieee80211_hw *hw) } void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw, - u32 *p_inta, u32 *p_intb, - u32 *p_intc, u32 *p_intd) + struct rtl_int *intvec) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0]; - rtl_write_dword(rtlpriv, ISR, *p_inta); - - /* - * *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1]; - * rtl_write_dword(rtlpriv, ISR + 4, *p_intb); - */ + intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0]; + rtl_write_dword(rtlpriv, ISR, intvec->inta); } void rtl92ce_set_beacon_related_registers(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h index b5c8e2fc1ba2..6711ea1a75d9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h @@ -42,8 +42,7 @@ static inline u8 rtl92c_get_chnl_group(u8 chnl) void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw); void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw, - u32 *p_inta, u32 *p_intb, - u32 *p_intc, u32 *p_intd); + struct rtl_int *int_vec); int rtl92ce_hw_init(struct ieee80211_hw *hw); void rtl92ce_card_disable(struct ieee80211_hw *hw); void rtl92ce_enable_interrupt(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c index 0da6c0136857..80123fd97221 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c @@ -1356,19 +1356,13 @@ void rtl92de_card_disable(struct ieee80211_hw *hw) } void rtl92de_interrupt_recognized(struct ieee80211_hw *hw, - u32 *p_inta, u32 *p_intb, - u32 *p_intc, u32 *p_intd) + struct rtl_int *intvec) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0]; - rtl_write_dword(rtlpriv, ISR, *p_inta); - - /* - * *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1]; - * rtl_write_dword(rtlpriv, ISR + 4, *p_intb); - */ + intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0]; + rtl_write_dword(rtlpriv, ISR, intvec->inta); } void rtl92de_set_beacon_related_registers(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h index 9236aa91273d..e6c702e69ecf 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h @@ -29,8 +29,7 @@ void rtl92de_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); void rtl92de_read_eeprom_info(struct ieee80211_hw *hw); void rtl92de_interrupt_recognized(struct ieee80211_hw *hw, - u32 *p_inta, u32 *p_intb, - u32 *p_intc, u32 *p_intd); + struct rtl_int *int_vec); int rtl92de_hw_init(struct ieee80211_hw *hw); void rtl92de_card_disable(struct ieee80211_hw *hw); void rtl92de_enable_interrupt(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c index fe5da637e77a..fd7928fdbd1a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c @@ -1694,17 +1694,16 @@ void rtl92ee_card_disable(struct ieee80211_hw *hw) } void rtl92ee_interrupt_recognized(struct ieee80211_hw *hw, - u32 *p_inta, u32 *p_intb, - u32 *p_intc, u32 *p_intd) + struct rtl_int *intvec) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0]; - rtl_write_dword(rtlpriv, ISR, *p_inta); + intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0]; + rtl_write_dword(rtlpriv, ISR, intvec->inta); - *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1]; - rtl_write_dword(rtlpriv, REG_HISRE, *p_intb); + intvec->intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1]; + rtl_write_dword(rtlpriv, REG_HISRE, intvec->intb); } void rtl92ee_set_beacon_related_registers(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h index cd6d3322f033..3a63bec9b0cc 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h @@ -29,8 +29,7 @@ void rtl92ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); void rtl92ee_read_eeprom_info(struct ieee80211_hw *hw); void rtl92ee_interrupt_recognized(struct ieee80211_hw *hw, - u32 *p_inta, u32 *p_intb, - u32 *p_intc, u32 *p_intd); + struct rtl_int *int_vec); int rtl92ee_hw_init(struct ieee80211_hw *hw); void rtl92ee_card_disable(struct ieee80211_hw *hw); void rtl92ee_enable_interrupt(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c index 12255682e890..4f7444331b07 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c @@ -498,7 +498,8 @@ u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw, u8 queue_index) if (!start_rx) return 0; - remind_cnt = calc_fifo_space(read_point, write_point); + remind_cnt = calc_fifo_space(read_point, write_point, + RTL_PCI_MAX_RX_COUNT); if (remind_cnt == 0) return 0; @@ -548,7 +549,6 @@ static u16 get_desc_addr_fr_q_idx(u16 queue_index) u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx) { - struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_priv *rtlpriv = rtl_priv(hw); u16 point_diff = 0; u16 current_tx_read_point = 0, current_tx_write_point = 0; @@ -560,9 +560,9 @@ u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx) current_tx_write_point = (u16)((tmp_4byte) & 0x0fff); point_diff = calc_fifo_space(current_tx_read_point, - current_tx_write_point); + current_tx_write_point, + TX_DESC_NUM_92E); - rtlpci->tx_ring[q_idx].avl_desc = point_diff; return point_diff; } @@ -907,10 +907,6 @@ void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, u8 desc_name, u8 *val) { struct rtl_priv *rtlpriv = rtl_priv(hw); - u16 cur_tx_rp = 0; - u16 cur_tx_wp = 0; - static bool over_run; - u32 tmp = 0; u8 q_idx = *val; bool dma64 = rtlpriv->cfg->mod_params->dma64; @@ -931,38 +927,12 @@ void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, return; } + /* make sure tx desc is available by caller */ ring->cur_tx_wp = ((ring->cur_tx_wp + 1) % max_tx_desc); - if (over_run) { - ring->cur_tx_wp = 0; - over_run = false; - } - if (ring->avl_desc > 1) { - ring->avl_desc--; - - rtl_write_word(rtlpriv, - get_desc_addr_fr_q_idx(q_idx), - ring->cur_tx_wp); - } - - if (ring->avl_desc < (max_tx_desc - 15)) { - u16 point_diff = 0; - - tmp = - rtl_read_dword(rtlpriv, - get_desc_addr_fr_q_idx(q_idx)); - cur_tx_rp = (u16)((tmp >> 16) & 0x0fff); - cur_tx_wp = (u16)(tmp & 0x0fff); - - ring->cur_tx_wp = cur_tx_wp; - ring->cur_tx_rp = cur_tx_rp; - point_diff = ((cur_tx_rp > cur_tx_wp) ? - (cur_tx_rp - cur_tx_wp) : - (TX_DESC_NUM_92E - 1 - - cur_tx_wp + cur_tx_rp)); - - ring->avl_desc = point_diff; - } + rtl_write_word(rtlpriv, + get_desc_addr_fr_q_idx(q_idx), + ring->cur_tx_wp); } break; } @@ -1044,13 +1014,12 @@ bool rtl92ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index) { struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_priv *rtlpriv = rtl_priv(hw); - u16 read_point, write_point, available_desc_num; + u16 read_point, write_point; bool ret = false; static u8 stop_report_cnt; struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue]; { - u16 point_diff = 0; u16 cur_tx_rp, cur_tx_wp; u32 tmpu32 = 0; @@ -1060,18 +1029,12 @@ bool rtl92ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index) cur_tx_rp = (u16)((tmpu32 >> 16) & 0x0fff); cur_tx_wp = (u16)(tmpu32 & 0x0fff); - ring->cur_tx_wp = cur_tx_wp; + /* don't need to update ring->cur_tx_wp */ ring->cur_tx_rp = cur_tx_rp; - point_diff = ((cur_tx_rp > cur_tx_wp) ? - (cur_tx_rp - cur_tx_wp) : - (TX_DESC_NUM_92E - cur_tx_wp + cur_tx_rp)); - - ring->avl_desc = point_diff; } read_point = ring->cur_tx_rp; write_point = ring->cur_tx_wp; - available_desc_num = ring->avl_desc; if (write_point > read_point) { if (index < write_point && index >= read_point) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c index 76bf089cced4..30dea7b9bc17 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c @@ -1558,17 +1558,17 @@ void rtl92se_card_disable(struct ieee80211_hw *hw) udelay(100); } -void rtl92se_interrupt_recognized(struct ieee80211_hw *hw, u32 *p_inta, - u32 *p_intb, u32 *p_intc, u32 *p_intd) +void rtl92se_interrupt_recognized(struct ieee80211_hw *hw, + struct rtl_int *intvec) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0]; - rtl_write_dword(rtlpriv, ISR, *p_inta); + intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0]; + rtl_write_dword(rtlpriv, ISR, intvec->inta); - *p_intb = rtl_read_dword(rtlpriv, ISR + 4) & rtlpci->irq_mask[1]; - rtl_write_dword(rtlpriv, ISR + 4, *p_intb); + intvec->intb = rtl_read_dword(rtlpriv, ISR + 4) & rtlpci->irq_mask[1]; + rtl_write_dword(rtlpriv, ISR + 4, intvec->intb); } void rtl92se_set_beacon_related_registers(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h index 607056010974..fa836ceefc8f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h @@ -42,8 +42,7 @@ void rtl92se_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); void rtl92se_read_eeprom_info(struct ieee80211_hw *hw); void rtl92se_interrupt_recognized(struct ieee80211_hw *hw, - u32 *p_inta, u32 *p_intb, - u32 *p_intc, u32 *p_intd); + struct rtl_int *int_vec); int rtl92se_hw_init(struct ieee80211_hw *hw); void rtl92se_card_disable(struct ieee80211_hw *hw); void rtl92se_enable_interrupt(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c index c3f98d58124c..545115db507e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c @@ -1340,14 +1340,13 @@ void rtl8723e_card_disable(struct ieee80211_hw *hw) } void rtl8723e_interrupt_recognized(struct ieee80211_hw *hw, - u32 *p_inta, u32 *p_intb, - u32 *p_intc, u32 *p_intd) + struct rtl_int *intvec) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - *p_inta = rtl_read_dword(rtlpriv, 0x3a0) & rtlpci->irq_mask[0]; - rtl_write_dword(rtlpriv, 0x3a0, *p_inta); + intvec->inta = rtl_read_dword(rtlpriv, 0x3a0) & rtlpci->irq_mask[0]; + rtl_write_dword(rtlpriv, 0x3a0, intvec->inta); } void rtl8723e_set_beacon_related_registers(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h index 19e467a37c72..c76e453f4f43 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h @@ -34,8 +34,7 @@ void rtl8723e_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); void rtl8723e_read_eeprom_info(struct ieee80211_hw *hw); void rtl8723e_interrupt_recognized(struct ieee80211_hw *hw, - u32 *p_inta, u32 *p_intb, - u32 *p_intc, u32 *p_intd); + struct rtl_int *int_vec); int rtl8723e_hw_init(struct ieee80211_hw *hw); void rtl8723e_card_disable(struct ieee80211_hw *hw); void rtl8723e_enable_interrupt(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index 7cd1ffa7d4a7..f9ccd13c79f9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -43,6 +43,7 @@ #include "../pwrseqcmd.h" #include "pwrseq.h" #include "../btcoexist/rtl_btc.h" +#include <linux/kernel.h> #define LLT_CONFIG 5 @@ -1682,18 +1683,17 @@ void rtl8723be_card_disable(struct ieee80211_hw *hw) } void rtl8723be_interrupt_recognized(struct ieee80211_hw *hw, - u32 *p_inta, u32 *p_intb, - u32 *p_intc, u32 *p_intd) + struct rtl_int *intvec) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0]; - rtl_write_dword(rtlpriv, ISR, *p_inta); + intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0]; + rtl_write_dword(rtlpriv, ISR, intvec->inta); - *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & - rtlpci->irq_mask[1]; - rtl_write_dword(rtlpriv, REG_HISRE, *p_intb); + intvec->intb = rtl_read_dword(rtlpriv, REG_HISRE) & + rtlpci->irq_mask[1]; + rtl_write_dword(rtlpriv, REG_HISRE, intvec->intb); } void rtl8723be_set_beacon_related_registers(struct ieee80211_hw *hw) @@ -2127,28 +2127,28 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw, if (rtlhal->oem_id == RT_CID_DEFAULT) { /* Does this one have a Toshiba SMID from group 1? */ - for (i = 0; i < sizeof(toshiba_smid1) / sizeof(u16); i++) { + for (i = 0; i < ARRAY_SIZE(toshiba_smid1); i++) { if (rtlefuse->eeprom_smid == toshiba_smid1[i]) { is_toshiba_smid1 = true; break; } } /* Does this one have a Toshiba SMID from group 2? */ - for (i = 0; i < sizeof(toshiba_smid2) / sizeof(u16); i++) { + for (i = 0; i < ARRAY_SIZE(toshiba_smid2); i++) { if (rtlefuse->eeprom_smid == toshiba_smid2[i]) { is_toshiba_smid2 = true; break; } } /* Does this one have a Samsung SMID? */ - for (i = 0; i < sizeof(samsung_smid) / sizeof(u16); i++) { + for (i = 0; i < ARRAY_SIZE(samsung_smid); i++) { if (rtlefuse->eeprom_smid == samsung_smid[i]) { is_samsung_smid = true; break; } } /* Does this one have a Lenovo SMID? */ - for (i = 0; i < sizeof(lenovo_smid) / sizeof(u16); i++) { + for (i = 0; i < ARRAY_SIZE(lenovo_smid); i++) { if (rtlefuse->eeprom_smid == lenovo_smid[i]) { is_lenovo_smid = true; break; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h index 2215a792f6bf..ae856a19e81a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h @@ -30,8 +30,7 @@ void rtl8723be_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); void rtl8723be_read_eeprom_info(struct ieee80211_hw *hw); void rtl8723be_interrupt_recognized(struct ieee80211_hw *hw, - u32 *p_inta, u32 *p_intb, - u32 *p_intc, u32 *p_intd); + struct rtl_int *int_vec); int rtl8723be_hw_init(struct ieee80211_hw *hw); void rtl8723be_card_disable(struct ieee80211_hw *hw); void rtl8723be_enable_interrupt(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c index 9606641519e7..1263b12db5dc 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c @@ -35,6 +35,7 @@ #include "../rtl8723com/dm_common.h" #include "table.h" #include "trx.h" +#include <linux/kernel.h> static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw); static bool _rtl8723be_phy_config_mac_with_headerfile(struct ieee80211_hw *hw); @@ -1143,14 +1144,13 @@ void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel) DESC92C_RATEMCS2, DESC92C_RATEMCS3, DESC92C_RATEMCS4, DESC92C_RATEMCS5, DESC92C_RATEMCS6, DESC92C_RATEMCS7}; - u8 i, size; + u8 i; u8 power_index; if (!rtlefuse->txpwr_fromeprom) return; - size = sizeof(cck_rates) / sizeof(u8); - for (i = 0; i < size; i++) { + for (i = 0; i < ARRAY_SIZE(cck_rates); i++) { power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A, cck_rates[i], rtl_priv(hw)->phy.current_chan_bw, @@ -1158,8 +1158,7 @@ void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel) _rtl8723be_phy_set_txpower_index(hw, power_index, RF90_PATH_A, cck_rates[i]); } - size = sizeof(ofdm_rates) / sizeof(u8); - for (i = 0; i < size; i++) { + for (i = 0; i < ARRAY_SIZE(ofdm_rates); i++) { power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A, ofdm_rates[i], rtl_priv(hw)->phy.current_chan_bw, @@ -1167,8 +1166,7 @@ void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel) _rtl8723be_phy_set_txpower_index(hw, power_index, RF90_PATH_A, ofdm_rates[i]); } - size = sizeof(ht_rates_1t) / sizeof(u8); - for (i = 0; i < size; i++) { + for (i = 0; i < ARRAY_SIZE(ht_rates_1t); i++) { power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A, ht_rates_1t[i], rtl_priv(hw)->phy.current_chan_bw, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c index 381c16b9b3a9..160fee8333ae 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c @@ -25,6 +25,7 @@ * *****************************************************************************/ +#include <linux/kernel.h> #include "table.h" u32 RTL8723BEPHY_REG_1TARRAY[] = { @@ -224,8 +225,7 @@ u32 RTL8723BEPHY_REG_1TARRAY[] = { }; -u32 RTL8723BEPHY_REG_1TARRAYLEN = - sizeof(RTL8723BEPHY_REG_1TARRAY) / sizeof(u32); +u32 RTL8723BEPHY_REG_1TARRAYLEN = ARRAY_SIZE(RTL8723BEPHY_REG_1TARRAY); u32 RTL8723BEPHY_REG_ARRAY_PG[] = { 0, 0, 0, 0x00000e08, 0x0000ff00, 0x00003800, @@ -236,8 +236,7 @@ u32 RTL8723BEPHY_REG_ARRAY_PG[] = { 0, 0, 0, 0x00000e14, 0xffffffff, 0x26303436 }; -u32 RTL8723BEPHY_REG_ARRAY_PGLEN = - sizeof(RTL8723BEPHY_REG_ARRAY_PG) / sizeof(u32); +u32 RTL8723BEPHY_REG_ARRAY_PGLEN = ARRAY_SIZE(RTL8723BEPHY_REG_ARRAY_PG); u32 RTL8723BE_RADIOA_1TARRAY[] = { 0x000, 0x00010000, @@ -373,8 +372,7 @@ u32 RTL8723BE_RADIOA_1TARRAY[] = { }; -u32 RTL8723BE_RADIOA_1TARRAYLEN = - sizeof(RTL8723BE_RADIOA_1TARRAY) / sizeof(u32); +u32 RTL8723BE_RADIOA_1TARRAYLEN = ARRAY_SIZE(RTL8723BE_RADIOA_1TARRAY); u32 RTL8723BEMAC_1T_ARRAY[] = { 0x02F, 0x00000030, @@ -483,7 +481,7 @@ u32 RTL8723BEMAC_1T_ARRAY[] = { }; -u32 RTL8723BEMAC_1T_ARRAYLEN = sizeof(RTL8723BEMAC_1T_ARRAY) / sizeof(u32); +u32 RTL8723BEMAC_1T_ARRAYLEN = ARRAY_SIZE(RTL8723BEMAC_1T_ARRAY); u32 RTL8723BEAGCTAB_1TARRAY[] = { 0xC78, 0xFD000001, @@ -620,4 +618,4 @@ u32 RTL8723BEAGCTAB_1TARRAY[] = { }; -u32 RTL8723BEAGCTAB_1TARRAYLEN = sizeof(RTL8723BEAGCTAB_1TARRAY) / sizeof(u32); +u32 RTL8723BEAGCTAB_1TARRAYLEN = ARRAY_SIZE(RTL8723BEAGCTAB_1TARRAY); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c index efa7e1262461..521039c5d4ce 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c @@ -196,10 +196,12 @@ int rtl8723_download_fw(struct ieee80211_hw *hw, enum version_8723e version = rtlhal->version; int max_page; - if (!rtlhal->pfirmware) + if (rtlpriv->max_fw_size == 0 || !rtlhal->pfirmware) return 1; pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware; + rtlhal->fw_version = le16_to_cpu(pfwheader->version); + rtlhal->fw_subversion = pfwheader->subversion; pfwdata = rtlhal->pfirmware; fwsize = rtlhal->fwsize; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index 43e18c4c1e68..f20e77b4bb65 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -2483,17 +2483,16 @@ void rtl8821ae_card_disable(struct ieee80211_hw *hw) } void rtl8821ae_interrupt_recognized(struct ieee80211_hw *hw, - u32 *p_inta, u32 *p_intb, - u32 *p_intc, u32 *p_intd) + struct rtl_int *intvec) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0]; - rtl_write_dword(rtlpriv, ISR, *p_inta); + intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0]; + rtl_write_dword(rtlpriv, ISR, intvec->inta); - *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1]; - rtl_write_dword(rtlpriv, REG_HISRE, *p_intb); + intvec->intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1]; + rtl_write_dword(rtlpriv, REG_HISRE, intvec->intb); } void rtl8821ae_set_beacon_related_registers(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h index 284d259fe557..e2ab783a2ad9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h @@ -30,8 +30,7 @@ void rtl8821ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); void rtl8821ae_read_eeprom_info(struct ieee80211_hw *hw); void rtl8821ae_interrupt_recognized(struct ieee80211_hw *hw, - u32 *p_inta, u32 *p_intb, - u32 *p_intc, u32 *p_intd); + struct rtl_int *int_vec); int rtl8821ae_hw_init(struct ieee80211_hw *hw); void rtl8821ae_card_disable(struct ieee80211_hw *hw); void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c index 408c4611e5de..f87f9d03b9fa 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c @@ -24,7 +24,7 @@ * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ - +#include <linux/kernel.h> #include "table.h" u32 RTL8812AE_PHY_REG_ARRAY[] = { 0x800, 0x8020D010, @@ -258,8 +258,7 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = { 0xEB8, 0x00508242, }; -u32 RTL8812AE_PHY_REG_1TARRAYLEN = - sizeof(RTL8812AE_PHY_REG_ARRAY) / sizeof(u32); +u32 RTL8812AE_PHY_REG_1TARRAYLEN = ARRAY_SIZE(RTL8812AE_PHY_REG_ARRAY); u32 RTL8821AE_PHY_REG_ARRAY[] = { 0x800, 0x0020D090, @@ -436,8 +435,7 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = { 0xCB8, 0x00508240, }; -u32 RTL8821AE_PHY_REG_1TARRAYLEN = - sizeof(RTL8821AE_PHY_REG_ARRAY) / sizeof(u32); +u32 RTL8821AE_PHY_REG_1TARRAYLEN = ARRAY_SIZE(RTL8821AE_PHY_REG_ARRAY); u32 RTL8812AE_PHY_REG_ARRAY_PG[] = { 0, 0, 0, 0x00000c20, 0xffffffff, 0x34363840, @@ -488,8 +486,7 @@ u32 RTL8812AE_PHY_REG_ARRAY_PG[] = { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628 }; -u32 RTL8812AE_PHY_REG_ARRAY_PGLEN = - sizeof(RTL8812AE_PHY_REG_ARRAY_PG) / sizeof(u32); +u32 RTL8812AE_PHY_REG_ARRAY_PGLEN = ARRAY_SIZE(RTL8812AE_PHY_REG_ARRAY_PG); u32 RTL8821AE_PHY_REG_ARRAY_PG[] = { 0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638, @@ -509,8 +506,7 @@ u32 RTL8821AE_PHY_REG_ARRAY_PG[] = { 1, 0, 0, 0x00000c44, 0x0000ffff, 0x00002022 }; -u32 RTL8821AE_PHY_REG_ARRAY_PGLEN = - sizeof(RTL8821AE_PHY_REG_ARRAY_PG) / sizeof(u32); +u32 RTL8821AE_PHY_REG_ARRAY_PGLEN = ARRAY_SIZE(RTL8821AE_PHY_REG_ARRAY_PG); u32 RTL8812AE_RADIOA_ARRAY[] = { 0x000, 0x00010000, @@ -927,7 +923,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = { 0x018, 0x0001712A, }; -u32 RTL8812AE_RADIOA_1TARRAYLEN = sizeof(RTL8812AE_RADIOA_ARRAY) / sizeof(u32); +u32 RTL8812AE_RADIOA_1TARRAYLEN = ARRAY_SIZE(RTL8812AE_RADIOA_ARRAY); u32 RTL8812AE_RADIOB_ARRAY[] = { 0x056, 0x00051CF2, @@ -1335,7 +1331,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = { 0x008, 0x00008400, }; -u32 RTL8812AE_RADIOB_1TARRAYLEN = sizeof(RTL8812AE_RADIOB_ARRAY) / sizeof(u32); +u32 RTL8812AE_RADIOB_1TARRAYLEN = ARRAY_SIZE(RTL8812AE_RADIOB_ARRAY); u32 RTL8821AE_RADIOA_ARRAY[] = { 0x018, 0x0001712A, @@ -1929,7 +1925,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { }; -u32 RTL8821AE_RADIOA_1TARRAYLEN = sizeof(RTL8821AE_RADIOA_ARRAY) / sizeof(u32); +u32 RTL8821AE_RADIOA_1TARRAYLEN = ARRAY_SIZE(RTL8821AE_RADIOA_ARRAY); u32 RTL8812AE_MAC_REG_ARRAY[] = { 0x010, 0x0000000C, @@ -2041,7 +2037,7 @@ u32 RTL8812AE_MAC_REG_ARRAY[] = { 0x718, 0x00000040, }; -u32 RTL8812AE_MAC_1T_ARRAYLEN = sizeof(RTL8812AE_MAC_REG_ARRAY) / sizeof(u32); +u32 RTL8812AE_MAC_1T_ARRAYLEN = ARRAY_SIZE(RTL8812AE_MAC_REG_ARRAY); u32 RTL8821AE_MAC_REG_ARRAY[] = { 0x428, 0x0000000A, @@ -2143,7 +2139,7 @@ u32 RTL8821AE_MAC_REG_ARRAY[] = { 0x718, 0x00000040, }; -u32 RTL8821AE_MAC_1T_ARRAYLEN = sizeof(RTL8821AE_MAC_REG_ARRAY) / sizeof(u32); +u32 RTL8821AE_MAC_1T_ARRAYLEN = ARRAY_SIZE(RTL8821AE_MAC_REG_ARRAY); u32 RTL8812AE_AGC_TAB_ARRAY[] = { 0x80000001, 0x00000000, 0x40000000, 0x00000000, @@ -2479,8 +2475,7 @@ u32 RTL8812AE_AGC_TAB_ARRAY[] = { 0xE50, 0x00000020, }; -u32 RTL8812AE_AGC_TAB_1TARRAYLEN = - sizeof(RTL8812AE_AGC_TAB_ARRAY) / sizeof(u32); +u32 RTL8812AE_AGC_TAB_1TARRAYLEN = ARRAY_SIZE(RTL8812AE_AGC_TAB_ARRAY); u32 RTL8821AE_AGC_TAB_ARRAY[] = { 0x81C, 0xBF000001, @@ -2676,8 +2671,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = { 0xC50, 0x00000020, }; -u32 RTL8821AE_AGC_TAB_1TARRAYLEN = - sizeof(RTL8821AE_AGC_TAB_ARRAY) / sizeof(u32); +u32 RTL8821AE_AGC_TAB_1TARRAYLEN = ARRAY_SIZE(RTL8821AE_AGC_TAB_ARRAY); /****************************************************************************** * TXPWR_LMT.TXT @@ -3250,7 +3244,7 @@ u8 *RTL8812AE_TXPWR_LMT[] = { "MKK", "5G", "80M", "VHT", "2T", "155", "63" }; -u32 RTL8812AE_TXPWR_LMT_ARRAY_LEN = sizeof(RTL8812AE_TXPWR_LMT) / sizeof(u8 *); +u32 RTL8812AE_TXPWR_LMT_ARRAY_LEN = ARRAY_SIZE(RTL8812AE_TXPWR_LMT); u8 *RTL8821AE_TXPWR_LMT[] = { "FCC", "2.4G", "20M", "CCK", "1T", "01", "32", @@ -3819,4 +3813,4 @@ u8 *RTL8821AE_TXPWR_LMT[] = { "MKK", "5G", "80M", "VHT", "2T", "155", "63" }; -u32 RTL8821AE_TXPWR_LMT_ARRAY_LEN = sizeof(RTL8821AE_TXPWR_LMT) / sizeof(u8 *); +u32 RTL8821AE_TXPWR_LMT_ARRAY_LEN = ARRAY_SIZE(RTL8821AE_TXPWR_LMT); diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 39b033b3b53a..ce3103bb8ebb 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -962,7 +962,6 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); if (ieee80211_is_auth(fc)) { RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n"); - rtl_ips_nic_on(hw); } if (rtlpriv->psc.sw_ps_enabled) { diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 92d4859ec906..a7aacbc3984e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -873,6 +873,24 @@ enum ratr_table_mode { RATR_INX_WIRELESS_AC_24N = 9, }; +enum ratr_table_mode_new { + RATEID_IDX_BGN_40M_2SS = 0, + RATEID_IDX_BGN_40M_1SS = 1, + RATEID_IDX_BGN_20M_2SS_BN = 2, + RATEID_IDX_BGN_20M_1SS_BN = 3, + RATEID_IDX_GN_N2SS = 4, + RATEID_IDX_GN_N1SS = 5, + RATEID_IDX_BG = 6, + RATEID_IDX_G = 7, + RATEID_IDX_B = 8, + RATEID_IDX_VHT_2SS = 9, + RATEID_IDX_VHT_1SS = 10, + RATEID_IDX_MIX1 = 11, + RATEID_IDX_MIX2 = 12, + RATEID_IDX_VHT_3SS = 13, + RATEID_IDX_BGN_3SS = 14, +}; + enum rtl_link_state { MAC80211_NOLINK = 0, MAC80211_LINKING = 1, @@ -931,6 +949,10 @@ enum package_type { PACKAGE_TFBGA79 }; +enum rtl_spec_ver { + RTL_SPEC_NEW_RATEID = BIT(0), /* use ratr_table_mode_new */ +}; + struct octet_string { u8 *octet; u16 length; @@ -2093,14 +2115,21 @@ struct rtl_wow_pattern { u32 mask[4]; }; +/* struct to store contents of interrupt vectors */ +struct rtl_int { + u32 inta; + u32 intb; + u32 intc; + u32 intd; +}; + struct rtl_hal_ops { int (*init_sw_vars) (struct ieee80211_hw *hw); void (*deinit_sw_vars) (struct ieee80211_hw *hw); void (*read_chip_version)(struct ieee80211_hw *hw); void (*read_eeprom_info) (struct ieee80211_hw *hw); void (*interrupt_recognized) (struct ieee80211_hw *hw, - u32 *p_inta, u32 *p_intb, - u32 *p_intc, u32 *p_intd); + struct rtl_int *intvec); int (*hw_init) (struct ieee80211_hw *hw); void (*hw_disable) (struct ieee80211_hw *hw); void (*hw_suspend) (struct ieee80211_hw *hw); @@ -2308,6 +2337,7 @@ struct rtl_hal_cfg { struct rtl_hal_ops *ops; struct rtl_mod_params *mod_params; struct rtl_hal_usbint_cfg *usb_interface_cfg; + enum rtl_spec_ver spec_ver; /*this map used for some registers or vars defined int HAL but used in MAIN */ @@ -2318,17 +2348,14 @@ struct rtl_hal_cfg { struct rtl_locks { /* mutex */ struct mutex conf_mutex; - struct mutex ps_mutex; + struct mutex ips_mutex; /* mutex for enter/leave IPS */ + struct mutex lps_mutex; /* mutex for enter/leave LPS */ /*spin lock */ - spinlock_t ips_lock; spinlock_t irq_th_lock; - spinlock_t irq_pci_lock; - spinlock_t tx_lock; spinlock_t h2c_lock; spinlock_t rf_ps_lock; spinlock_t rf_lock; - spinlock_t lps_lock; spinlock_t waitq_lock; spinlock_t entry_list_lock; spinlock_t usb_lock; @@ -2341,9 +2368,6 @@ struct rtl_locks { /*Dual mac*/ spinlock_t cck_and_rw_pagea_lock; - /*Easy concurrent*/ - spinlock_t check_sendpkt_lock; - spinlock_t iqk_lock; }; @@ -2374,6 +2398,12 @@ struct rtl_works { struct work_struct fill_h2c_cmd; }; +struct rtl_debug { + /* add for debug */ + struct dentry *debugfs_dir; + char debugfs_name[20]; +}; + #define MIMO_PS_STATIC 0 #define MIMO_PS_DYNAMIC 1 #define MIMO_PS_NOLIMIT 3 @@ -2493,6 +2523,9 @@ struct rtl_btc_info { struct bt_coexist_info { struct rtl_btc_ops *btc_ops; struct rtl_btc_info btc_info; + /* btc context */ + void *btc_context; + void *wifi_only_context; /* EEPROM BT info. */ u8 eeprom_bt_coexist; u8 eeprom_bt_type; @@ -2549,16 +2582,22 @@ struct bt_coexist_info { struct rtl_btc_ops { void (*btc_init_variables) (struct rtl_priv *rtlpriv); + void (*btc_init_variables_wifi_only)(struct rtl_priv *rtlpriv); + void (*btc_deinit_variables)(struct rtl_priv *rtlpriv); void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv); + void (*btc_power_on_setting)(struct rtl_priv *rtlpriv); void (*btc_init_hw_config) (struct rtl_priv *rtlpriv); + void (*btc_init_hw_config_wifi_only)(struct rtl_priv *rtlpriv); void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type); void (*btc_lps_notify)(struct rtl_priv *rtlpriv, u8 type); void (*btc_scan_notify) (struct rtl_priv *rtlpriv, u8 scantype); + void (*btc_scan_notify_wifi_only)(struct rtl_priv *rtlpriv, + u8 scantype); void (*btc_connect_notify) (struct rtl_priv *rtlpriv, u8 action); void (*btc_mediastatus_notify) (struct rtl_priv *rtlpriv, enum rt_media_status mstatus); void (*btc_periodical) (struct rtl_priv *rtlpriv); - void (*btc_halt_notify) (void); + void (*btc_halt_notify)(struct rtl_priv *rtlpriv); void (*btc_btinfo_notify) (struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length); void (*btc_btmpinfo_notify)(struct rtl_priv *rtlpriv, @@ -2568,6 +2607,12 @@ struct rtl_btc_ops { bool (*btc_is_bt_disabled) (struct rtl_priv *rtlpriv); void (*btc_special_packet_notify)(struct rtl_priv *rtlpriv, u8 pkt_type); + void (*btc_switch_band_notify)(struct rtl_priv *rtlpriv, u8 type, + bool scanning); + void (*btc_switch_band_notify_wifi_only)(struct rtl_priv *rtlpriv, + u8 type, bool scanning); + void (*btc_display_bt_coex_info)(struct rtl_priv *rtlpriv, + struct seq_file *m); void (*btc_record_pwr_mode)(struct rtl_priv *rtlpriv, u8 *buf, u8 len); u8 (*btc_get_lps_val)(struct rtl_priv *rtlpriv); u8 (*btc_get_rpwm_val)(struct rtl_priv *rtlpriv); @@ -2642,6 +2687,7 @@ struct rtl_priv { /* c2hcmd list for kthread level access */ struct list_head c2hcmd_list; + struct rtl_debug dbg; int max_fw_size; /* diff --git a/drivers/net/wireless/ti/wl1251/init.c b/drivers/net/wireless/ti/wl1251/init.c index 1d799bffaa9f..e7d77acdf18c 100644 --- a/drivers/net/wireless/ti/wl1251/init.c +++ b/drivers/net/wireless/ti/wl1251/init.c @@ -310,10 +310,8 @@ static int wl1251_hw_init_data_path_config(struct wl1251 *wl) /* asking for the data path parameters */ wl->data_path = kzalloc(sizeof(struct acx_data_path_params_resp), GFP_KERNEL); - if (!wl->data_path) { - wl1251_error("Couldnt allocate data path parameters"); + if (!wl->data_path) return -ENOMEM; - } ret = wl1251_acx_data_path_params(wl, wl->data_path); if (ret < 0) { diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c index 6d02c660b4ab..037defd10b91 100644 --- a/drivers/net/wireless/ti/wl1251/main.c +++ b/drivers/net/wireless/ti/wl1251/main.c @@ -1200,8 +1200,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw, WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS); enable = bss_conf->arp_addr_cnt == 1 && bss_conf->assoc; - wl1251_acx_arp_ip_filter(wl, enable, addr); - + ret = wl1251_acx_arp_ip_filter(wl, enable, addr); if (ret < 0) goto out_sleep; } diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c index a4859993db3c..3ca9167d6146 100644 --- a/drivers/net/wireless/ti/wlcore/acx.c +++ b/drivers/net/wireless/ti/wlcore/acx.c @@ -146,7 +146,7 @@ int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif) ret = wl1271_cmd_configure(wl, ACX_FEATURE_CFG, feature, sizeof(*feature)); if (ret < 0) { - wl1271_error("Couldnt set HW encryption"); + wl1271_error("Couldn't set HW encryption"); goto out; } diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h index f46d7fdf9a00..7011c5d9599f 100644 --- a/drivers/net/wireless/ti/wlcore/acx.h +++ b/drivers/net/wireless/ti/wlcore/acx.h @@ -1129,10 +1129,8 @@ int wl12xx_acx_config_hangover(struct wl1271 *wl); int wlcore_acx_average_rssi(struct wl1271 *wl, struct wl12xx_vif *wlvif, s8 *avg_rssi); -#ifdef CONFIG_PM int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable, enum rx_filter_action action); int wl1271_acx_set_rx_filter(struct wl1271 *wl, u8 index, bool enable, struct wl12xx_rx_filter *filter); -#endif /* CONFIG_PM */ #endif /* __WL1271_ACX_H__ */ diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index d47921a84509..09714034dbf1 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -42,6 +42,7 @@ #include "sysfs.h" #define WL1271_BOOT_RETRIES 3 +#define WL1271_SUSPEND_SLEEP 100 static char *fwlog_param; static int fwlog_mem_blocks = -1; @@ -388,7 +389,6 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl, static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status) { struct wl12xx_vif *wlvif; - struct timespec ts; u32 old_tx_blk_count = wl->tx_blocks_available; int avail, freed_blocks; int i; @@ -485,8 +485,7 @@ static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status) } /* update the host-chipset time offset */ - getnstimeofday(&ts); - wl->time_offset = (timespec_to_ns(&ts) >> 10) - + wl->time_offset = (ktime_get_boot_ns() >> 10) - (s64)(status->fw_localtime); wl->fw_fast_lnk_map = status->link_fast_bitmap; @@ -979,6 +978,24 @@ static int wlcore_fw_wakeup(struct wl1271 *wl) return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP); } +static int wlcore_fw_sleep(struct wl1271 *wl) +{ + int ret; + + mutex_lock(&wl->mutex); + ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP); + if (ret < 0) { + wl12xx_queue_recovery_work(wl); + goto out; + } + set_bit(WL1271_FLAG_IN_ELP, &wl->flags); +out: + mutex_unlock(&wl->mutex); + mdelay(WL1271_SUSPEND_SLEEP); + + return 0; +} + static int wl1271_setup(struct wl1271 *wl) { wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL); @@ -1326,7 +1343,6 @@ static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl) } -#ifdef CONFIG_PM static int wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p) { @@ -1698,8 +1714,8 @@ static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif) } } -static int wl1271_op_suspend(struct ieee80211_hw *hw, - struct cfg80211_wowlan *wow) +static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wow) { struct wl1271 *wl = hw->priv; struct wl12xx_vif *wlvif; @@ -1749,7 +1765,6 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw, goto out_sleep; out_sleep: - wl1271_ps_elp_sleep(wl); mutex_unlock(&wl->mutex); if (ret < 0) { @@ -1782,10 +1797,19 @@ out_sleep: */ cancel_delayed_work(&wl->tx_watchdog_work); + /* + * Use an immediate call for allowing the firmware to go into power + * save during suspend. + * Using a workque for this last write was only hapenning on resume + * leaving the firmware with power save disabled during suspend, + * while consuming full power during wowlan suspend. + */ + wlcore_fw_sleep(wl); + return 0; } -static int wl1271_op_resume(struct ieee80211_hw *hw) +static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw) { struct wl1271 *wl = hw->priv; struct wl12xx_vif *wlvif; @@ -1869,7 +1893,6 @@ out: return 0; } -#endif static int wl1271_op_start(struct ieee80211_hw *hw) { diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c index a3f5e9ca492a..00e9b4624dcf 100644 --- a/drivers/net/wireless/ti/wlcore/tx.c +++ b/drivers/net/wireless/ti/wlcore/tx.c @@ -264,7 +264,6 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct sk_buff *skb, u32 extra, struct ieee80211_tx_info *control, u8 hlid) { - struct timespec ts; struct wl1271_tx_hw_descr *desc; int ac, rate_idx; s64 hosttime; @@ -287,8 +286,7 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif, } /* configure packet life time */ - getnstimeofday(&ts); - hosttime = (timespec_to_ns(&ts) >> 10); + hosttime = (ktime_get_boot_ns() >> 10); desc->start_time = cpu_to_le32(hosttime - wl->time_offset); is_dummy = wl12xx_is_dummy_packet(wl, skb); |