diff options
Diffstat (limited to 'drivers/net')
425 files changed, 21043 insertions, 18827 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 15eddca7b4b6..c9e75a9de282 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4027,14 +4027,19 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, const v return true; } -static u32 bond_ip_hash(u32 hash, struct flow_keys *flow) +static u32 bond_ip_hash(u32 hash, struct flow_keys *flow, int xmit_policy) { hash ^= (__force u32)flow_get_u32_dst(flow) ^ (__force u32)flow_get_u32_src(flow); hash ^= (hash >> 16); hash ^= (hash >> 8); + /* discard lowest hash bit to deal with the common even ports pattern */ - return hash >> 1; + if (xmit_policy == BOND_XMIT_POLICY_LAYER34 || + xmit_policy == BOND_XMIT_POLICY_ENCAP34) + return hash >> 1; + + return hash; } /* Generate hash based on xmit policy. If @skb is given it is used to linearize @@ -4064,7 +4069,7 @@ static u32 __bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, const voi memcpy(&hash, &flow.ports.ports, sizeof(hash)); } - return bond_ip_hash(hash, &flow); + return bond_ip_hash(hash, &flow, bond->params.xmit_policy); } /** @@ -5221,7 +5226,7 @@ static void bond_sk_to_flow(struct sock *sk, struct flow_keys *flow) switch (sk->sk_family) { #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: - if (sk->sk_ipv6only || + if (ipv6_only_sock(sk) || ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) { flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; flow->addrs.v6addrs.src = inet6_sk(sk)->saddr; @@ -5259,7 +5264,7 @@ static u32 bond_sk_hash_l34(struct sock *sk) /* L4 */ memcpy(&hash, &flow.ports.ports, sizeof(hash)); /* L3 */ - return bond_ip_hash(hash, &flow); + return bond_ip_hash(hash, &flow, BOND_XMIT_POLICY_LAYER34); } static struct net_device *__bond_sk_get_lower_dev(struct bonding *bond, diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index fff259247d52..ac760fd39282 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -170,6 +170,7 @@ config PCH_CAN source "drivers/net/can/c_can/Kconfig" source "drivers/net/can/cc770/Kconfig" +source "drivers/net/can/ctucanfd/Kconfig" source "drivers/net/can/ifi_canfd/Kconfig" source "drivers/net/can/m_can/Kconfig" source "drivers/net/can/mscan/Kconfig" diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index 1e660afcb61b..0af85983634c 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -16,6 +16,7 @@ obj-y += softing/ obj-$(CONFIG_CAN_AT91) += at91_can.o obj-$(CONFIG_CAN_CC770) += cc770/ obj-$(CONFIG_CAN_C_CAN) += c_can/ +obj-$(CONFIG_CAN_CTUCANFD) += ctucanfd/ obj-$(CONFIG_CAN_FLEXCAN) += flexcan/ obj-$(CONFIG_CAN_GRCAN) += grcan.o obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd/ diff --git a/drivers/net/can/ctucanfd/Kconfig b/drivers/net/can/ctucanfd/Kconfig new file mode 100644 index 000000000000..48963efc7f19 --- /dev/null +++ b/drivers/net/can/ctucanfd/Kconfig @@ -0,0 +1,34 @@ +config CAN_CTUCANFD + tristate "CTU CAN-FD IP core" + help + This driver adds support for the CTU CAN FD open-source IP core. + More documentation and core sources at project page + (https://gitlab.fel.cvut.cz/canbus/ctucanfd_ip_core). + The core integration to Xilinx Zynq system as platform driver + is available (https://gitlab.fel.cvut.cz/canbus/zynq/zynq-can-sja1000-top). + Implementation on Intel FPGA-based PCI Express board is available + from project (https://gitlab.fel.cvut.cz/canbus/pcie-ctucanfd) and + on Intel SoC from project (https://gitlab.fel.cvut.cz/canbus/intel-soc-ctucanfd). + Guidepost CTU FEE CAN bus projects page https://canbus.pages.fel.cvut.cz/ . + +config CAN_CTUCANFD_PCI + tristate "CTU CAN-FD IP core PCI/PCIe driver" + depends on CAN_CTUCANFD + depends on PCI + help + This driver adds PCI/PCIe support for CTU CAN-FD IP core. + The project providing FPGA design for Intel EP4CGX15 based DB4CGX15 + PCIe board with PiKRON.com designed transceiver riser shield is available + at https://gitlab.fel.cvut.cz/canbus/pcie-ctucanfd . + +config CAN_CTUCANFD_PLATFORM + tristate "CTU CAN-FD IP core platform (FPGA, SoC) driver" + depends on CAN_CTUCANFD + depends on OF || COMPILE_TEST + help + The core has been tested together with OpenCores SJA1000 + modified to be CAN FD frames tolerant on MicroZed Zynq based + MZ_APO education kits designed by Petr Porazil from PiKRON.com + company. FPGA design https://gitlab.fel.cvut.cz/canbus/zynq/zynq-can-sja1000-top. + The kit description at the Computer Architectures course pages + https://cw.fel.cvut.cz/wiki/courses/b35apo/documentation/mz_apo/start . diff --git a/drivers/net/can/ctucanfd/Makefile b/drivers/net/can/ctucanfd/Makefile new file mode 100644 index 000000000000..8078f1f2c30f --- /dev/null +++ b/drivers/net/can/ctucanfd/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Makefile for the CTU CAN-FD IP module drivers +# + +obj-$(CONFIG_CAN_CTUCANFD) := ctucanfd.o +ctucanfd-y := ctucanfd_base.o + +obj-$(CONFIG_CAN_CTUCANFD_PCI) += ctucanfd_pci.o +obj-$(CONFIG_CAN_CTUCANFD_PLATFORM) += ctucanfd_platform.o diff --git a/drivers/net/can/ctucanfd/ctucanfd.h b/drivers/net/can/ctucanfd/ctucanfd.h new file mode 100644 index 000000000000..0e9904f6a05d --- /dev/null +++ b/drivers/net/can/ctucanfd/ctucanfd.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/******************************************************************************* + * + * CTU CAN FD IP Core + * + * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU + * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded + * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU + * Copyright (C) 2018-2021 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded + * + * Project advisors: + * Jiri Novak <jnovak@fel.cvut.cz> + * Pavel Pisa <pisa@cmp.felk.cvut.cz> + * + * Department of Measurement (http://meas.fel.cvut.cz/) + * Faculty of Electrical Engineering (http://www.fel.cvut.cz) + * Czech Technical University (http://www.cvut.cz/) + ******************************************************************************/ + +#ifndef __CTUCANFD__ +#define __CTUCANFD__ + +#include <linux/netdevice.h> +#include <linux/can/dev.h> +#include <linux/list.h> + +enum ctu_can_fd_can_registers; + +struct ctucan_priv { + struct can_priv can; /* must be first member! */ + + void __iomem *mem_base; + u32 (*read_reg)(struct ctucan_priv *priv, + enum ctu_can_fd_can_registers reg); + void (*write_reg)(struct ctucan_priv *priv, + enum ctu_can_fd_can_registers reg, u32 val); + + unsigned int txb_head; + unsigned int txb_tail; + u32 txb_prio; + unsigned int ntxbufs; + spinlock_t tx_lock; /* spinlock to serialize allocation and processing of TX buffers */ + + struct napi_struct napi; + struct device *dev; + struct clk *can_clk; + + int irq_flags; + unsigned long drv_flags; + + u32 rxfrm_first_word; + + struct list_head peers_on_pdev; +}; + +/** + * ctucan_probe_common - Device type independent registration call + * + * This function does all the memory allocation and registration for the CAN + * device. + * + * @dev: Handle to the generic device structure + * @addr: Base address of CTU CAN FD core address + * @irq: Interrupt number + * @ntxbufs: Number of implemented Tx buffers + * @can_clk_rate: Clock rate, if 0 then clock are taken from device node + * @pm_enable_call: Whether pm_runtime_enable should be called + * @set_drvdata_fnc: Function to set network driver data for physical device + * + * Return: 0 on success and failure value on error + */ +int ctucan_probe_common(struct device *dev, void __iomem *addr, + int irq, unsigned int ntxbufs, + unsigned long can_clk_rate, + int pm_enable_call, + void (*set_drvdata_fnc)(struct device *dev, + struct net_device *ndev)); + +int ctucan_suspend(struct device *dev) __maybe_unused; +int ctucan_resume(struct device *dev) __maybe_unused; + +#endif /*__CTUCANFD__*/ diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c new file mode 100644 index 000000000000..2ada097d1ede --- /dev/null +++ b/drivers/net/can/ctucanfd/ctucanfd_base.c @@ -0,0 +1,1462 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * + * CTU CAN FD IP Core + * + * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU + * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded + * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU + * Copyright (C) 2018-2022 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded + * + * Project advisors: + * Jiri Novak <jnovak@fel.cvut.cz> + * Pavel Pisa <pisa@cmp.felk.cvut.cz> + * + * Department of Measurement (http://meas.fel.cvut.cz/) + * Faculty of Electrical Engineering (http://www.fel.cvut.cz) + * Czech Technical University (http://www.cvut.cz/) + ******************************************************************************/ + +#include <linux/clk.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/bitfield.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/skbuff.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/can/error.h> +#include <linux/can/led.h> +#include <linux/pm_runtime.h> + +#include "ctucanfd.h" +#include "ctucanfd_kregs.h" +#include "ctucanfd_kframe.h" + +#ifdef DEBUG +#define ctucan_netdev_dbg(ndev, args...) \ + netdev_dbg(ndev, args) +#else +#define ctucan_netdev_dbg(...) do { } while (0) +#endif + +#define CTUCANFD_ID 0xCAFD + +/* TX buffer rotation: + * - when a buffer transitions to empty state, rotate order and priorities + * - if more buffers seem to transition at the same time, rotate by the number of buffers + * - it may be assumed that buffers transition to empty state in FIFO order (because we manage + * priorities that way) + * - at frame filling, do not rotate anything, just increment buffer modulo counter + */ + +#define CTUCANFD_FLAG_RX_FFW_BUFFERED 1 + +#define CTUCAN_STATE_TO_TEXT_ENTRY(st) \ + [st] = #st + +enum ctucan_txtb_status { + TXT_NOT_EXIST = 0x0, + TXT_RDY = 0x1, + TXT_TRAN = 0x2, + TXT_ABTP = 0x3, + TXT_TOK = 0x4, + TXT_ERR = 0x6, + TXT_ABT = 0x7, + TXT_ETY = 0x8, +}; + +enum ctucan_txtb_command { + TXT_CMD_SET_EMPTY = 0x01, + TXT_CMD_SET_READY = 0x02, + TXT_CMD_SET_ABORT = 0x04 +}; + +static const struct can_bittiming_const ctu_can_fd_bit_timing_max = { + .name = "ctu_can_fd", + .tseg1_min = 2, + .tseg1_max = 190, + .tseg2_min = 1, + .tseg2_max = 63, + .sjw_max = 31, + .brp_min = 1, + .brp_max = 8, + .brp_inc = 1, +}; + +static const struct can_bittiming_const ctu_can_fd_bit_timing_data_max = { + .name = "ctu_can_fd", + .tseg1_min = 2, + .tseg1_max = 94, + .tseg2_min = 1, + .tseg2_max = 31, + .sjw_max = 31, + .brp_min = 1, + .brp_max = 2, + .brp_inc = 1, +}; + +static const char * const ctucan_state_strings[CAN_STATE_MAX] = { + CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_ERROR_ACTIVE), + CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_ERROR_WARNING), + CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_ERROR_PASSIVE), + CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_BUS_OFF), + CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_STOPPED), + CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_SLEEPING) +}; + +static void ctucan_write32_le(struct ctucan_priv *priv, + enum ctu_can_fd_can_registers reg, u32 val) +{ + iowrite32(val, priv->mem_base + reg); +} + +static void ctucan_write32_be(struct ctucan_priv *priv, + enum ctu_can_fd_can_registers reg, u32 val) +{ + iowrite32be(val, priv->mem_base + reg); +} + +static u32 ctucan_read32_le(struct ctucan_priv *priv, + enum ctu_can_fd_can_registers reg) +{ + return ioread32(priv->mem_base + reg); +} + +static u32 ctucan_read32_be(struct ctucan_priv *priv, + enum ctu_can_fd_can_registers reg) +{ + return ioread32be(priv->mem_base + reg); +} + +static void ctucan_write32(struct ctucan_priv *priv, enum ctu_can_fd_can_registers reg, u32 val) +{ + priv->write_reg(priv, reg, val); +} + +static u32 ctucan_read32(struct ctucan_priv *priv, enum ctu_can_fd_can_registers reg) +{ + return priv->read_reg(priv, reg); +} + +static void ctucan_write_txt_buf(struct ctucan_priv *priv, enum ctu_can_fd_can_registers buf_base, + u32 offset, u32 val) +{ + priv->write_reg(priv, buf_base + offset, val); +} + +#define CTU_CAN_FD_TXTNF(priv) (!!FIELD_GET(REG_STATUS_TXNF, ctucan_read32(priv, CTUCANFD_STATUS))) +#define CTU_CAN_FD_ENABLED(priv) (!!FIELD_GET(REG_MODE_ENA, ctucan_read32(priv, CTUCANFD_MODE))) + +/** + * ctucan_state_to_str() - Converts CAN controller state code to corresponding text + * @state: CAN controller state code + * + * Return: Pointer to string representation of the error state + */ +static const char *ctucan_state_to_str(enum can_state state) +{ + const char *txt = NULL; + + if (state >= 0 && state < CAN_STATE_MAX) + txt = ctucan_state_strings[state]; + return txt ? txt : "UNKNOWN"; +} + +/** + * ctucan_reset() - Issues software reset request to CTU CAN FD + * @ndev: Pointer to net_device structure + * + * Return: 0 for success, -%ETIMEDOUT if CAN controller does not leave reset + */ +static int ctucan_reset(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + int i = 100; + + ctucan_write32(priv, CTUCANFD_MODE, REG_MODE_RST); + clear_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags); + + do { + u16 device_id = FIELD_GET(REG_DEVICE_ID_DEVICE_ID, + ctucan_read32(priv, CTUCANFD_DEVICE_ID)); + + if (device_id == 0xCAFD) + return 0; + if (!i--) { + netdev_warn(ndev, "device did not leave reset\n"); + return -ETIMEDOUT; + } + usleep_range(100, 200); + } while (1); +} + +/** + * ctucan_set_btr() - Sets CAN bus bit timing in CTU CAN FD + * @ndev: Pointer to net_device structure + * @bt: Pointer to Bit timing structure + * @nominal: True - Nominal bit timing, False - Data bit timing + * + * Return: 0 - OK, -%EPERM if controller is enabled + */ +static int ctucan_set_btr(struct net_device *ndev, struct can_bittiming *bt, bool nominal) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + int max_ph1_len = 31; + u32 btr = 0; + u32 prop_seg = bt->prop_seg; + u32 phase_seg1 = bt->phase_seg1; + + if (CTU_CAN_FD_ENABLED(priv)) { + netdev_err(ndev, "BUG! Cannot set bittiming - CAN is enabled\n"); + return -EPERM; + } + + if (nominal) + max_ph1_len = 63; + + /* The timing calculation functions have only constraints on tseg1, which is prop_seg + + * phase1_seg combined. tseg1 is then split in half and stored into prog_seg and phase_seg1. + * In CTU CAN FD, PROP is 6/7 bits wide but PH1 only 6/5, so we must re-distribute the + * values here. + */ + if (phase_seg1 > max_ph1_len) { + prop_seg += phase_seg1 - max_ph1_len; + phase_seg1 = max_ph1_len; + bt->prop_seg = prop_seg; + bt->phase_seg1 = phase_seg1; + } + + if (nominal) { + btr = FIELD_PREP(REG_BTR_PROP, prop_seg); + btr |= FIELD_PREP(REG_BTR_PH1, phase_seg1); + btr |= FIELD_PREP(REG_BTR_PH2, bt->phase_seg2); + btr |= FIELD_PREP(REG_BTR_BRP, bt->brp); + btr |= FIELD_PREP(REG_BTR_SJW, bt->sjw); + + ctucan_write32(priv, CTUCANFD_BTR, btr); + } else { + btr = FIELD_PREP(REG_BTR_FD_PROP_FD, prop_seg); + btr |= FIELD_PREP(REG_BTR_FD_PH1_FD, phase_seg1); + btr |= FIELD_PREP(REG_BTR_FD_PH2_FD, bt->phase_seg2); + btr |= FIELD_PREP(REG_BTR_FD_BRP_FD, bt->brp); + btr |= FIELD_PREP(REG_BTR_FD_SJW_FD, bt->sjw); + + ctucan_write32(priv, CTUCANFD_BTR_FD, btr); + } + + return 0; +} + +/** + * ctucan_set_bittiming() - CAN set nominal bit timing routine + * @ndev: Pointer to net_device structure + * + * Return: 0 on success, -%EPERM on error + */ +static int ctucan_set_bittiming(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + struct can_bittiming *bt = &priv->can.bittiming; + + /* Note that bt may be modified here */ + return ctucan_set_btr(ndev, bt, true); +} + +/** + * ctucan_set_data_bittiming() - CAN set data bit timing routine + * @ndev: Pointer to net_device structure + * + * Return: 0 on success, -%EPERM on error + */ +static int ctucan_set_data_bittiming(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + struct can_bittiming *dbt = &priv->can.data_bittiming; + + /* Note that dbt may be modified here */ + return ctucan_set_btr(ndev, dbt, false); +} + +/** + * ctucan_set_secondary_sample_point() - Sets secondary sample point in CTU CAN FD + * @ndev: Pointer to net_device structure + * + * Return: 0 on success, -%EPERM if controller is enabled + */ +static int ctucan_set_secondary_sample_point(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + struct can_bittiming *dbt = &priv->can.data_bittiming; + int ssp_offset = 0; + u32 ssp_cfg = 0; /* No SSP by default */ + + if (CTU_CAN_FD_ENABLED(priv)) { + netdev_err(ndev, "BUG! Cannot set SSP - CAN is enabled\n"); + return -EPERM; + } + + /* Use SSP for bit-rates above 1 Mbits/s */ + if (dbt->bitrate > 1000000) { + /* Calculate SSP in minimal time quanta */ + ssp_offset = (priv->can.clock.freq / 1000) * dbt->sample_point / dbt->bitrate; + + if (ssp_offset > 127) { + netdev_warn(ndev, "SSP offset saturated to 127\n"); + ssp_offset = 127; + } + + ssp_cfg = FIELD_PREP(REG_TRV_DELAY_SSP_OFFSET, ssp_offset); + ssp_cfg |= FIELD_PREP(REG_TRV_DELAY_SSP_SRC, 0x1); + } + + ctucan_write32(priv, CTUCANFD_TRV_DELAY, ssp_cfg); + + return 0; +} + +/** + * ctucan_set_mode() - Sets CTU CAN FDs mode + * @priv: Pointer to private data + * @mode: Pointer to controller modes to be set + */ +static void ctucan_set_mode(struct ctucan_priv *priv, const struct can_ctrlmode *mode) +{ + u32 mode_reg = ctucan_read32(priv, CTUCANFD_MODE); + + mode_reg = (mode->flags & CAN_CTRLMODE_LOOPBACK) ? + (mode_reg | REG_MODE_ILBP) : + (mode_reg & ~REG_MODE_ILBP); + + mode_reg = (mode->flags & CAN_CTRLMODE_LISTENONLY) ? + (mode_reg | REG_MODE_BMM) : + (mode_reg & ~REG_MODE_BMM); + + mode_reg = (mode->flags & CAN_CTRLMODE_FD) ? + (mode_reg | REG_MODE_FDE) : + (mode_reg & ~REG_MODE_FDE); + + mode_reg = (mode->flags & CAN_CTRLMODE_PRESUME_ACK) ? + (mode_reg | REG_MODE_ACF) : + (mode_reg & ~REG_MODE_ACF); + + mode_reg = (mode->flags & CAN_CTRLMODE_FD_NON_ISO) ? + (mode_reg | REG_MODE_NISOFD) : + (mode_reg & ~REG_MODE_NISOFD); + + /* One shot mode supported indirectly via Retransmit limit */ + mode_reg &= ~FIELD_PREP(REG_MODE_RTRTH, 0xF); + mode_reg = (mode->flags & CAN_CTRLMODE_ONE_SHOT) ? + (mode_reg | REG_MODE_RTRLE) : + (mode_reg & ~REG_MODE_RTRLE); + + /* Some bits fixed: + * TSTM - Off, User shall not be able to change REC/TEC by hand during operation + */ + mode_reg &= ~REG_MODE_TSTM; + + ctucan_write32(priv, CTUCANFD_MODE, mode_reg); +} + +/** + * ctucan_chip_start() - This routine starts the driver + * @ndev: Pointer to net_device structure + * + * Routine expects that chip is in reset state. It setups initial + * Tx buffers for FIFO priorities, sets bittiming, enables interrupts, + * switches core to operational mode and changes controller + * state to %CAN_STATE_STOPPED. + * + * Return: 0 on success and failure value on error + */ +static int ctucan_chip_start(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + u32 int_ena, int_msk; + u32 mode_reg; + int err; + struct can_ctrlmode mode; + + priv->txb_prio = 0x01234567; + priv->txb_head = 0; + priv->txb_tail = 0; + ctucan_write32(priv, CTUCANFD_TX_PRIORITY, priv->txb_prio); + + /* Configure bit-rates and ssp */ + err = ctucan_set_bittiming(ndev); + if (err < 0) + return err; + + err = ctucan_set_data_bittiming(ndev); + if (err < 0) + return err; + + err = ctucan_set_secondary_sample_point(ndev); + if (err < 0) + return err; + + /* Configure modes */ + mode.flags = priv->can.ctrlmode; + mode.mask = 0xFFFFFFFF; + ctucan_set_mode(priv, &mode); + + /* Configure interrupts */ + int_ena = REG_INT_STAT_RBNEI | + REG_INT_STAT_TXBHCI | + REG_INT_STAT_EWLI | + REG_INT_STAT_FCSI; + + /* Bus error reporting -> Allow Error/Arb.lost interrupts */ + if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) { + int_ena |= REG_INT_STAT_ALI | + REG_INT_STAT_BEI; + } + + int_msk = ~int_ena; /* Mask all disabled interrupts */ + + /* It's after reset, so there is no need to clear anything */ + ctucan_write32(priv, CTUCANFD_INT_MASK_SET, int_msk); + ctucan_write32(priv, CTUCANFD_INT_ENA_SET, int_ena); + + /* Controller enters ERROR_ACTIVE on initial FCSI */ + priv->can.state = CAN_STATE_STOPPED; + + /* Enable the controller */ + mode_reg = ctucan_read32(priv, CTUCANFD_MODE); + mode_reg |= REG_MODE_ENA; + ctucan_write32(priv, CTUCANFD_MODE, mode_reg); + + return 0; +} + +/** + * ctucan_do_set_mode() - Sets mode of the driver + * @ndev: Pointer to net_device structure + * @mode: Tells the mode of the driver + * + * This check the drivers state and calls the corresponding modes to set. + * + * Return: 0 on success and failure value on error + */ +static int ctucan_do_set_mode(struct net_device *ndev, enum can_mode mode) +{ + int ret; + + switch (mode) { + case CAN_MODE_START: + ret = ctucan_reset(ndev); + if (ret < 0) + return ret; + ret = ctucan_chip_start(ndev); + if (ret < 0) { + netdev_err(ndev, "ctucan_chip_start failed!\n"); + return ret; + } + netif_wake_queue(ndev); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +/** + * ctucan_get_tx_status() - Gets status of TXT buffer + * @priv: Pointer to private data + * @buf: Buffer index (0-based) + * + * Return: Status of TXT buffer + */ +static enum ctucan_txtb_status ctucan_get_tx_status(struct ctucan_priv *priv, u8 buf) +{ + u32 tx_status = ctucan_read32(priv, CTUCANFD_TX_STATUS); + enum ctucan_txtb_status status = (tx_status >> (buf * 4)) & 0x7; + + return status; +} + +/** + * ctucan_is_txt_buf_writable() - Checks if frame can be inserted to TXT Buffer + * @priv: Pointer to private data + * @buf: Buffer index (0-based) + * + * Return: True - Frame can be inserted to TXT Buffer, False - If attempted, frame will not be + * inserted to TXT Buffer + */ +static bool ctucan_is_txt_buf_writable(struct ctucan_priv *priv, u8 buf) +{ + enum ctucan_txtb_status buf_status; + + buf_status = ctucan_get_tx_status(priv, buf); + if (buf_status == TXT_RDY || buf_status == TXT_TRAN || buf_status == TXT_ABTP) + return false; + + return true; +} + +/** + * ctucan_insert_frame() - Inserts frame to TXT buffer + * @priv: Pointer to private data + * @cf: Pointer to CAN frame to be inserted + * @buf: TXT Buffer index to which frame is inserted (0-based) + * @isfdf: True - CAN FD Frame, False - CAN 2.0 Frame + * + * Return: True - Frame inserted successfully + * False - Frame was not inserted due to one of: + * 1. TXT Buffer is not writable (it is in wrong state) + * 2. Invalid TXT buffer index + * 3. Invalid frame length + */ +static bool ctucan_insert_frame(struct ctucan_priv *priv, const struct canfd_frame *cf, u8 buf, + bool isfdf) +{ + u32 buf_base; + u32 ffw = 0; + u32 idw = 0; + unsigned int i; + + if (buf >= priv->ntxbufs) + return false; + + if (!ctucan_is_txt_buf_writable(priv, buf)) + return false; + + if (cf->len > CANFD_MAX_DLEN) + return false; + + /* Prepare Frame format */ + if (cf->can_id & CAN_RTR_FLAG) + ffw |= REG_FRAME_FORMAT_W_RTR; + + if (cf->can_id & CAN_EFF_FLAG) + ffw |= REG_FRAME_FORMAT_W_IDE; + + if (isfdf) { + ffw |= REG_FRAME_FORMAT_W_FDF; + if (cf->flags & CANFD_BRS) + ffw |= REG_FRAME_FORMAT_W_BRS; + } + + ffw |= FIELD_PREP(REG_FRAME_FORMAT_W_DLC, can_fd_len2dlc(cf->len)); + + /* Prepare identifier */ + if (cf->can_id & CAN_EFF_FLAG) + idw = cf->can_id & CAN_EFF_MASK; + else + idw = FIELD_PREP(REG_IDENTIFIER_W_IDENTIFIER_BASE, cf->can_id & CAN_SFF_MASK); + + /* Write ID, Frame format, Don't write timestamp -> Time triggered transmission disabled */ + buf_base = (buf + 1) * 0x100; + ctucan_write_txt_buf(priv, buf_base, CTUCANFD_FRAME_FORMAT_W, ffw); + ctucan_write_txt_buf(priv, buf_base, CTUCANFD_IDENTIFIER_W, idw); + + /* Write Data payload */ + if (!(cf->can_id & CAN_RTR_FLAG)) { + for (i = 0; i < cf->len; i += 4) { + u32 data = le32_to_cpu(*(__le32 *)(cf->data + i)); + + ctucan_write_txt_buf(priv, buf_base, CTUCANFD_DATA_1_4_W + i, data); + } + } + + return true; +} + +/** + * ctucan_give_txtb_cmd() - Applies command on TXT buffer + * @priv: Pointer to private data + * @cmd: Command to give + * @buf: Buffer index (0-based) + */ +static void ctucan_give_txtb_cmd(struct ctucan_priv *priv, enum ctucan_txtb_command cmd, u8 buf) +{ + u32 tx_cmd = cmd; + + tx_cmd |= 1 << (buf + 8); + ctucan_write32(priv, CTUCANFD_TX_COMMAND, tx_cmd); +} + +/** + * ctucan_start_xmit() - Starts the transmission + * @skb: sk_buff pointer that contains data to be Txed + * @ndev: Pointer to net_device structure + * + * Invoked from upper layers to initiate transmission. Uses the next available free TXT Buffer and + * populates its fields to start the transmission. + * + * Return: %NETDEV_TX_OK on success, %NETDEV_TX_BUSY when no free TXT buffer is available, + * negative return values reserved for error cases + */ +static netdev_tx_t ctucan_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + u32 txtb_id; + bool ok; + unsigned long flags; + + if (can_dropped_invalid_skb(ndev, skb)) + return NETDEV_TX_OK; + + if (unlikely(!CTU_CAN_FD_TXTNF(priv))) { + netif_stop_queue(ndev); + netdev_err(ndev, "BUG!, no TXB free when queue awake!\n"); + return NETDEV_TX_BUSY; + } + + txtb_id = priv->txb_head % priv->ntxbufs; + ctucan_netdev_dbg(ndev, "%s: using TXB#%u\n", __func__, txtb_id); + ok = ctucan_insert_frame(priv, cf, txtb_id, can_is_canfd_skb(skb)); + + if (!ok) { + netdev_err(ndev, "BUG! TXNF set but cannot insert frame into TXTB! HW Bug?"); + kfree_skb(skb); + ndev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + can_put_echo_skb(skb, ndev, txtb_id, 0); + + spin_lock_irqsave(&priv->tx_lock, flags); + ctucan_give_txtb_cmd(priv, TXT_CMD_SET_READY, txtb_id); + priv->txb_head++; + + /* Check if all TX buffers are full */ + if (!CTU_CAN_FD_TXTNF(priv)) + netif_stop_queue(ndev); + + spin_unlock_irqrestore(&priv->tx_lock, flags); + + return NETDEV_TX_OK; +} + +/** + * ctucan_read_rx_frame() - Reads frame from RX FIFO + * @priv: Pointer to CTU CAN FD's private data + * @cf: Pointer to CAN frame struct + * @ffw: Previously read frame format word + * + * Note: Frame format word must be read separately and provided in 'ffw'. + */ +static void ctucan_read_rx_frame(struct ctucan_priv *priv, struct canfd_frame *cf, u32 ffw) +{ + u32 idw; + unsigned int i; + unsigned int wc; + unsigned int len; + + idw = ctucan_read32(priv, CTUCANFD_RX_DATA); + if (FIELD_GET(REG_FRAME_FORMAT_W_IDE, ffw)) + cf->can_id = (idw & CAN_EFF_MASK) | CAN_EFF_FLAG; + else + cf->can_id = (idw >> 18) & CAN_SFF_MASK; + + /* BRS, ESI, RTR Flags */ + cf->flags = 0; + if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw)) { + if (FIELD_GET(REG_FRAME_FORMAT_W_BRS, ffw)) + cf->flags |= CANFD_BRS; + if (FIELD_GET(REG_FRAME_FORMAT_W_ESI_RSV, ffw)) + cf->flags |= CANFD_ESI; + } else if (FIELD_GET(REG_FRAME_FORMAT_W_RTR, ffw)) { + cf->can_id |= CAN_RTR_FLAG; + } + + wc = FIELD_GET(REG_FRAME_FORMAT_W_RWCNT, ffw) - 3; + + /* DLC */ + if (FIELD_GET(REG_FRAME_FORMAT_W_DLC, ffw) <= 8) { + len = FIELD_GET(REG_FRAME_FORMAT_W_DLC, ffw); + } else { + if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw)) + len = wc << 2; + else + len = 8; + } + cf->len = len; + if (unlikely(len > wc * 4)) + len = wc * 4; + + /* Timestamp - Read and throw away */ + ctucan_read32(priv, CTUCANFD_RX_DATA); + ctucan_read32(priv, CTUCANFD_RX_DATA); + + /* Data */ + for (i = 0; i < len; i += 4) { + u32 data = ctucan_read32(priv, CTUCANFD_RX_DATA); + *(__le32 *)(cf->data + i) = cpu_to_le32(data); + } + while (unlikely(i < wc * 4)) { + ctucan_read32(priv, CTUCANFD_RX_DATA); + i += 4; + } +} + +/** + * ctucan_rx() - Called from CAN ISR to complete the received frame processing + * @ndev: Pointer to net_device structure + * + * This function is invoked from the CAN isr(poll) to process the Rx frames. It does minimal + * processing and invokes "netif_receive_skb" to complete further processing. + * Return: 1 when frame is passed to the network layer, 0 when the first frame word is read but + * system is out of free SKBs temporally and left code to resolve SKB allocation later, + * -%EAGAIN in a case of empty Rx FIFO. + */ +static int ctucan_rx(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + struct canfd_frame *cf; + struct sk_buff *skb; + u32 ffw; + + if (test_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags)) { + ffw = priv->rxfrm_first_word; + clear_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags); + } else { + ffw = ctucan_read32(priv, CTUCANFD_RX_DATA); + } + + if (!FIELD_GET(REG_FRAME_FORMAT_W_RWCNT, ffw)) + return -EAGAIN; + + if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw)) + skb = alloc_canfd_skb(ndev, &cf); + else + skb = alloc_can_skb(ndev, (struct can_frame **)&cf); + + if (unlikely(!skb)) { + priv->rxfrm_first_word = ffw; + set_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags); + return 0; + } + + ctucan_read_rx_frame(priv, cf, ffw); + + stats->rx_bytes += cf->len; + stats->rx_packets++; + netif_receive_skb(skb); + + return 1; +} + +/** + * ctucan_read_fault_state() - Reads CTU CAN FDs fault confinement state. + * @priv: Pointer to private data + * + * Returns: Fault confinement state of controller + */ +static enum can_state ctucan_read_fault_state(struct ctucan_priv *priv) +{ + u32 fs; + u32 rec_tec; + u32 ewl; + + fs = ctucan_read32(priv, CTUCANFD_EWL); + rec_tec = ctucan_read32(priv, CTUCANFD_REC); + ewl = FIELD_GET(REG_EWL_EW_LIMIT, fs); + + if (FIELD_GET(REG_EWL_ERA, fs)) { + if (ewl > FIELD_GET(REG_REC_REC_VAL, rec_tec) && + ewl > FIELD_GET(REG_REC_TEC_VAL, rec_tec)) + return CAN_STATE_ERROR_ACTIVE; + else + return CAN_STATE_ERROR_WARNING; + } else if (FIELD_GET(REG_EWL_ERP, fs)) { + return CAN_STATE_ERROR_PASSIVE; + } else if (FIELD_GET(REG_EWL_BOF, fs)) { + return CAN_STATE_BUS_OFF; + } + + WARN(true, "Invalid error state"); + return CAN_STATE_ERROR_PASSIVE; +} + +/** + * ctucan_get_rec_tec() - Reads REC/TEC counter values from controller + * @priv: Pointer to private data + * @bec: Pointer to Error counter structure + */ +static void ctucan_get_rec_tec(struct ctucan_priv *priv, struct can_berr_counter *bec) +{ + u32 err_ctrs = ctucan_read32(priv, CTUCANFD_REC); + + bec->rxerr = FIELD_GET(REG_REC_REC_VAL, err_ctrs); + bec->txerr = FIELD_GET(REG_REC_TEC_VAL, err_ctrs); +} + +/** + * ctucan_err_interrupt() - Error frame ISR + * @ndev: net_device pointer + * @isr: interrupt status register value + * + * This is the CAN error interrupt and it will check the type of error and forward the error + * frame to upper layers. + */ +static void ctucan_err_interrupt(struct net_device *ndev, u32 isr) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + struct can_frame *cf; + struct sk_buff *skb; + enum can_state state; + struct can_berr_counter bec; + u32 err_capt_alc; + int dologerr = net_ratelimit(); + + ctucan_get_rec_tec(priv, &bec); + state = ctucan_read_fault_state(priv); + err_capt_alc = ctucan_read32(priv, CTUCANFD_ERR_CAPT); + + if (dologerr) + netdev_info(ndev, "%s: ISR = 0x%08x, rxerr %d, txerr %d, error type %lu, pos %lu, ALC id_field %lu, bit %lu\n", + __func__, isr, bec.rxerr, bec.txerr, + FIELD_GET(REG_ERR_CAPT_ERR_TYPE, err_capt_alc), + FIELD_GET(REG_ERR_CAPT_ERR_POS, err_capt_alc), + FIELD_GET(REG_ERR_CAPT_ALC_ID_FIELD, err_capt_alc), + FIELD_GET(REG_ERR_CAPT_ALC_BIT, err_capt_alc)); + + skb = alloc_can_err_skb(ndev, &cf); + + /* EWLI: error warning limit condition met + * FCSI: fault confinement state changed + * ALI: arbitration lost (just informative) + * BEI: bus error interrupt + */ + if (FIELD_GET(REG_INT_STAT_FCSI, isr) || FIELD_GET(REG_INT_STAT_EWLI, isr)) { + netdev_info(ndev, "state changes from %s to %s\n", + ctucan_state_to_str(priv->can.state), + ctucan_state_to_str(state)); + + if (priv->can.state == state) + netdev_warn(ndev, + "current and previous state is the same! (missed interrupt?)\n"); + + priv->can.state = state; + switch (state) { + case CAN_STATE_BUS_OFF: + priv->can.can_stats.bus_off++; + can_bus_off(ndev); + if (skb) + cf->can_id |= CAN_ERR_BUSOFF; + break; + case CAN_STATE_ERROR_PASSIVE: + priv->can.can_stats.error_passive++; + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = (bec.rxerr > 127) ? + CAN_ERR_CRTL_RX_PASSIVE : + CAN_ERR_CRTL_TX_PASSIVE; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + } + break; + case CAN_STATE_ERROR_WARNING: + priv->can.can_stats.error_warning++; + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] |= (bec.txerr > bec.rxerr) ? + CAN_ERR_CRTL_TX_WARNING : + CAN_ERR_CRTL_RX_WARNING; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + } + break; + case CAN_STATE_ERROR_ACTIVE: + cf->data[1] = CAN_ERR_CRTL_ACTIVE; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + break; + default: + netdev_warn(ndev, "unhandled error state (%d:%s)!\n", + state, ctucan_state_to_str(state)); + break; + } + } + + /* Check for Arbitration Lost interrupt */ + if (FIELD_GET(REG_INT_STAT_ALI, isr)) { + if (dologerr) + netdev_info(ndev, "arbitration lost\n"); + priv->can.can_stats.arbitration_lost++; + if (skb) { + cf->can_id |= CAN_ERR_LOSTARB; + cf->data[0] = CAN_ERR_LOSTARB_UNSPEC; + } + } + + /* Check for Bus Error interrupt */ + if (FIELD_GET(REG_INT_STAT_BEI, isr)) { + netdev_info(ndev, "bus error\n"); + priv->can.can_stats.bus_error++; + stats->rx_errors++; + if (skb) { + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + cf->data[2] = CAN_ERR_PROT_UNSPEC; + cf->data[3] = CAN_ERR_PROT_LOC_UNSPEC; + } + } + + if (skb) { + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); + } +} + +/** + * ctucan_rx_poll() - Poll routine for rx packets (NAPI) + * @napi: NAPI structure pointer + * @quota: Max number of rx packets to be processed. + * + * This is the poll routine for rx part. It will process the packets maximux quota value. + * + * Return: Number of packets received + */ +static int ctucan_rx_poll(struct napi_struct *napi, int quota) +{ + struct net_device *ndev = napi->dev; + struct ctucan_priv *priv = netdev_priv(ndev); + int work_done = 0; + u32 status; + u32 framecnt; + int res = 1; + + framecnt = FIELD_GET(REG_RX_STATUS_RXFRC, ctucan_read32(priv, CTUCANFD_RX_STATUS)); + while (framecnt && work_done < quota && res > 0) { + res = ctucan_rx(ndev); + work_done++; + framecnt = FIELD_GET(REG_RX_STATUS_RXFRC, ctucan_read32(priv, CTUCANFD_RX_STATUS)); + } + + /* Check for RX FIFO Overflow */ + status = ctucan_read32(priv, CTUCANFD_STATUS); + if (FIELD_GET(REG_STATUS_DOR, status)) { + struct net_device_stats *stats = &ndev->stats; + struct can_frame *cf; + struct sk_buff *skb; + + netdev_info(ndev, "rx_poll: rx fifo overflow\n"); + stats->rx_over_errors++; + stats->rx_errors++; + skb = alloc_can_err_skb(ndev, &cf); + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); + } + + /* Clear Data Overrun */ + ctucan_write32(priv, CTUCANFD_COMMAND, REG_COMMAND_CDO); + } + + if (work_done) + can_led_event(ndev, CAN_LED_EVENT_RX); + + if (!framecnt && res != 0) { + if (napi_complete_done(napi, work_done)) { + /* Clear and enable RBNEI. It is level-triggered, so + * there is no race condition. + */ + ctucan_write32(priv, CTUCANFD_INT_STAT, REG_INT_STAT_RBNEI); + ctucan_write32(priv, CTUCANFD_INT_MASK_CLR, REG_INT_STAT_RBNEI); + } + } + + return work_done; +} + +/** + * ctucan_rotate_txb_prio() - Rotates priorities of TXT Buffers + * @ndev: net_device pointer + */ +static void ctucan_rotate_txb_prio(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + u32 prio = priv->txb_prio; + + prio = (prio << 4) | ((prio >> ((priv->ntxbufs - 1) * 4)) & 0xF); + ctucan_netdev_dbg(ndev, "%s: from 0x%08x to 0x%08x\n", __func__, priv->txb_prio, prio); + priv->txb_prio = prio; + ctucan_write32(priv, CTUCANFD_TX_PRIORITY, prio); +} + +/** + * ctucan_tx_interrupt() - Tx done Isr + * @ndev: net_device pointer + */ +static void ctucan_tx_interrupt(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + bool first = true; + bool some_buffers_processed; + unsigned long flags; + enum ctucan_txtb_status txtb_status; + u32 txtb_id; + + /* read tx_status + * if txb[n].finished (bit 2) + * if ok -> echo + * if error / aborted -> ?? (find how to handle oneshot mode) + * txb_tail++ + */ + do { + spin_lock_irqsave(&priv->tx_lock, flags); + + some_buffers_processed = false; + while ((int)(priv->txb_head - priv->txb_tail) > 0) { + txtb_id = priv->txb_tail % priv->ntxbufs; + txtb_status = ctucan_get_tx_status(priv, txtb_id); + + ctucan_netdev_dbg(ndev, "TXI: TXB#%u: status 0x%x\n", txtb_id, txtb_status); + + switch (txtb_status) { + case TXT_TOK: + ctucan_netdev_dbg(ndev, "TXT_OK\n"); + stats->tx_bytes += can_get_echo_skb(ndev, txtb_id, NULL); + stats->tx_packets++; + break; + case TXT_ERR: + /* This indicated that retransmit limit has been reached. Obviously + * we should not echo the frame, but also not indicate any kind of + * error. If desired, it was already reported (possible multiple + * times) on each arbitration lost. + */ + netdev_warn(ndev, "TXB in Error state\n"); + can_free_echo_skb(ndev, txtb_id, NULL); + stats->tx_dropped++; + break; + case TXT_ABT: + /* Same as for TXT_ERR, only with different cause. We *could* + * re-queue the frame, but multiqueue/abort is not supported yet + * anyway. + */ + netdev_warn(ndev, "TXB in Aborted state\n"); + can_free_echo_skb(ndev, txtb_id, NULL); + stats->tx_dropped++; + break; + default: + /* Bug only if the first buffer is not finished, otherwise it is + * pretty much expected. + */ + if (first) { + netdev_err(ndev, + "BUG: TXB#%u not in a finished state (0x%x)!\n", + txtb_id, txtb_status); + spin_unlock_irqrestore(&priv->tx_lock, flags); + /* do not clear nor wake */ + return; + } + goto clear; + } + priv->txb_tail++; + first = false; + some_buffers_processed = true; + /* Adjust priorities *before* marking the buffer as empty. */ + ctucan_rotate_txb_prio(ndev); + ctucan_give_txtb_cmd(priv, TXT_CMD_SET_EMPTY, txtb_id); + } +clear: + spin_unlock_irqrestore(&priv->tx_lock, flags); + + /* If no buffers were processed this time, we cannot clear - that would introduce + * a race condition. + */ + if (some_buffers_processed) { + /* Clear the interrupt again. We do not want to receive again interrupt for + * the buffer already handled. If it is the last finished one then it would + * cause log of spurious interrupt. + */ + ctucan_write32(priv, CTUCANFD_INT_STAT, REG_INT_STAT_TXBHCI); + } + } while (some_buffers_processed); + + can_led_event(ndev, CAN_LED_EVENT_TX); + + spin_lock_irqsave(&priv->tx_lock, flags); + + /* Check if at least one TX buffer is free */ + if (CTU_CAN_FD_TXTNF(priv)) + netif_wake_queue(ndev); + + spin_unlock_irqrestore(&priv->tx_lock, flags); +} + +/** + * ctucan_interrupt() - CAN Isr + * @irq: irq number + * @dev_id: device id poniter + * + * This is the CTU CAN FD ISR. It checks for the type of interrupt + * and invokes the corresponding ISR. + * + * Return: + * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise + */ +static irqreturn_t ctucan_interrupt(int irq, void *dev_id) +{ + struct net_device *ndev = (struct net_device *)dev_id; + struct ctucan_priv *priv = netdev_priv(ndev); + u32 isr, icr; + u32 imask; + int irq_loops; + + for (irq_loops = 0; irq_loops < 10000; irq_loops++) { + /* Get the interrupt status */ + isr = ctucan_read32(priv, CTUCANFD_INT_STAT); + + if (!isr) + return irq_loops ? IRQ_HANDLED : IRQ_NONE; + + /* Receive Buffer Not Empty Interrupt */ + if (FIELD_GET(REG_INT_STAT_RBNEI, isr)) { + ctucan_netdev_dbg(ndev, "RXBNEI\n"); + /* Mask RXBNEI the first, then clear interrupt and schedule NAPI. Even if + * another IRQ fires, RBNEI will always be 0 (masked). + */ + icr = REG_INT_STAT_RBNEI; + ctucan_write32(priv, CTUCANFD_INT_MASK_SET, icr); + ctucan_write32(priv, CTUCANFD_INT_STAT, icr); + napi_schedule(&priv->napi); + } + + /* TXT Buffer HW Command Interrupt */ + if (FIELD_GET(REG_INT_STAT_TXBHCI, isr)) { + ctucan_netdev_dbg(ndev, "TXBHCI\n"); + /* Cleared inside */ + ctucan_tx_interrupt(ndev); + } + + /* Error interrupts */ + if (FIELD_GET(REG_INT_STAT_EWLI, isr) || + FIELD_GET(REG_INT_STAT_FCSI, isr) || + FIELD_GET(REG_INT_STAT_ALI, isr)) { + icr = isr & (REG_INT_STAT_EWLI | REG_INT_STAT_FCSI | REG_INT_STAT_ALI); + + ctucan_netdev_dbg(ndev, "some ERR interrupt: clearing 0x%08x\n", icr); + ctucan_write32(priv, CTUCANFD_INT_STAT, icr); + ctucan_err_interrupt(ndev, isr); + } + /* Ignore RI, TI, LFI, RFI, BSI */ + } + + netdev_err(ndev, "%s: stuck interrupt (isr=0x%08x), stopping\n", __func__, isr); + + if (FIELD_GET(REG_INT_STAT_TXBHCI, isr)) { + int i; + + netdev_err(ndev, "txb_head=0x%08x txb_tail=0x%08x\n", + priv->txb_head, priv->txb_tail); + for (i = 0; i < priv->ntxbufs; i++) { + u32 status = ctucan_get_tx_status(priv, i); + + netdev_err(ndev, "txb[%d] txb status=0x%08x\n", i, status); + } + } + + imask = 0xffffffff; + ctucan_write32(priv, CTUCANFD_INT_ENA_CLR, imask); + ctucan_write32(priv, CTUCANFD_INT_MASK_SET, imask); + + return IRQ_HANDLED; +} + +/** + * ctucan_chip_stop() - Driver stop routine + * @ndev: Pointer to net_device structure + * + * This is the drivers stop routine. It will disable the + * interrupts and disable the controller. + */ +static void ctucan_chip_stop(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + u32 mask = 0xffffffff; + u32 mode; + + /* Disable interrupts and disable CAN */ + ctucan_write32(priv, CTUCANFD_INT_ENA_CLR, mask); + ctucan_write32(priv, CTUCANFD_INT_MASK_SET, mask); + mode = ctucan_read32(priv, CTUCANFD_MODE); + mode &= ~REG_MODE_ENA; + ctucan_write32(priv, CTUCANFD_MODE, mode); + + priv->can.state = CAN_STATE_STOPPED; +} + +/** + * ctucan_open() - Driver open routine + * @ndev: Pointer to net_device structure + * + * This is the driver open routine. + * Return: 0 on success and failure value on error + */ +static int ctucan_open(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + int ret; + + ret = pm_runtime_get_sync(priv->dev); + if (ret < 0) { + netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", + __func__, ret); + pm_runtime_put_noidle(priv->dev); + return ret; + } + + ret = ctucan_reset(ndev); + if (ret < 0) + goto err_reset; + + /* Common open */ + ret = open_candev(ndev); + if (ret) { + netdev_warn(ndev, "open_candev failed!\n"); + goto err_open; + } + + ret = request_irq(ndev->irq, ctucan_interrupt, priv->irq_flags, ndev->name, ndev); + if (ret < 0) { + netdev_err(ndev, "irq allocation for CAN failed\n"); + goto err_irq; + } + + ret = ctucan_chip_start(ndev); + if (ret < 0) { + netdev_err(ndev, "ctucan_chip_start failed!\n"); + goto err_chip_start; + } + + netdev_info(ndev, "ctu_can_fd device registered\n"); + can_led_event(ndev, CAN_LED_EVENT_OPEN); + napi_enable(&priv->napi); + netif_start_queue(ndev); + + return 0; + +err_chip_start: + free_irq(ndev->irq, ndev); +err_irq: + close_candev(ndev); +err_open: +err_reset: + pm_runtime_put(priv->dev); + + return ret; +} + +/** + * ctucan_close() - Driver close routine + * @ndev: Pointer to net_device structure + * + * Return: 0 always + */ +static int ctucan_close(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + + netif_stop_queue(ndev); + napi_disable(&priv->napi); + ctucan_chip_stop(ndev); + free_irq(ndev->irq, ndev); + close_candev(ndev); + + can_led_event(ndev, CAN_LED_EVENT_STOP); + pm_runtime_put(priv->dev); + + return 0; +} + +/** + * ctucan_get_berr_counter() - error counter routine + * @ndev: Pointer to net_device structure + * @bec: Pointer to can_berr_counter structure + * + * This is the driver error counter routine. + * Return: 0 on success and failure value on error + */ +static int ctucan_get_berr_counter(const struct net_device *ndev, struct can_berr_counter *bec) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + int ret; + + ret = pm_runtime_get_sync(priv->dev); + if (ret < 0) { + netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", __func__, ret); + pm_runtime_put_noidle(priv->dev); + return ret; + } + + ctucan_get_rec_tec(priv, bec); + pm_runtime_put(priv->dev); + + return 0; +} + +static const struct net_device_ops ctucan_netdev_ops = { + .ndo_open = ctucan_open, + .ndo_stop = ctucan_close, + .ndo_start_xmit = ctucan_start_xmit, + .ndo_change_mtu = can_change_mtu, +}; + +int ctucan_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct ctucan_priv *priv = netdev_priv(ndev); + + if (netif_running(ndev)) { + netif_stop_queue(ndev); + netif_device_detach(ndev); + } + + priv->can.state = CAN_STATE_SLEEPING; + + return 0; +} +EXPORT_SYMBOL(ctucan_suspend); + +int ctucan_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct ctucan_priv *priv = netdev_priv(ndev); + + priv->can.state = CAN_STATE_ERROR_ACTIVE; + + if (netif_running(ndev)) { + netif_device_attach(ndev); + netif_start_queue(ndev); + } + + return 0; +} +EXPORT_SYMBOL(ctucan_resume); + +int ctucan_probe_common(struct device *dev, void __iomem *addr, int irq, unsigned int ntxbufs, + unsigned long can_clk_rate, int pm_enable_call, + void (*set_drvdata_fnc)(struct device *dev, struct net_device *ndev)) +{ + struct ctucan_priv *priv; + struct net_device *ndev; + int ret; + + /* Create a CAN device instance */ + ndev = alloc_candev(sizeof(struct ctucan_priv), ntxbufs); + if (!ndev) + return -ENOMEM; + + priv = netdev_priv(ndev); + spin_lock_init(&priv->tx_lock); + INIT_LIST_HEAD(&priv->peers_on_pdev); + priv->ntxbufs = ntxbufs; + priv->dev = dev; + priv->can.bittiming_const = &ctu_can_fd_bit_timing_max; + priv->can.data_bittiming_const = &ctu_can_fd_bit_timing_data_max; + priv->can.do_set_mode = ctucan_do_set_mode; + + /* Needed for timing adjustment to be performed as soon as possible */ + priv->can.do_set_bittiming = ctucan_set_bittiming; + priv->can.do_set_data_bittiming = ctucan_set_data_bittiming; + + priv->can.do_get_berr_counter = ctucan_get_berr_counter; + priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK + | CAN_CTRLMODE_LISTENONLY + | CAN_CTRLMODE_FD + | CAN_CTRLMODE_PRESUME_ACK + | CAN_CTRLMODE_BERR_REPORTING + | CAN_CTRLMODE_FD_NON_ISO + | CAN_CTRLMODE_ONE_SHOT; + priv->mem_base = addr; + + /* Get IRQ for the device */ + ndev->irq = irq; + ndev->flags |= IFF_ECHO; /* We support local echo */ + + if (set_drvdata_fnc) + set_drvdata_fnc(dev, ndev); + SET_NETDEV_DEV(ndev, dev); + ndev->netdev_ops = &ctucan_netdev_ops; + + /* Getting the can_clk info */ + if (!can_clk_rate) { + priv->can_clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->can_clk)) { + dev_err(dev, "Device clock not found.\n"); + ret = PTR_ERR(priv->can_clk); + goto err_free; + } + can_clk_rate = clk_get_rate(priv->can_clk); + } + + priv->write_reg = ctucan_write32_le; + priv->read_reg = ctucan_read32_le; + + if (pm_enable_call) + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", + __func__, ret); + pm_runtime_put_noidle(priv->dev); + goto err_pmdisable; + } + + /* Check for big-endianity and set according IO-accessors */ + if ((ctucan_read32(priv, CTUCANFD_DEVICE_ID) & 0xFFFF) != CTUCANFD_ID) { + priv->write_reg = ctucan_write32_be; + priv->read_reg = ctucan_read32_be; + if ((ctucan_read32(priv, CTUCANFD_DEVICE_ID) & 0xFFFF) != CTUCANFD_ID) { + netdev_err(ndev, "CTU_CAN_FD signature not found\n"); + ret = -ENODEV; + goto err_deviceoff; + } + } + + ret = ctucan_reset(ndev); + if (ret < 0) + goto err_deviceoff; + + priv->can.clock.freq = can_clk_rate; + + netif_napi_add(ndev, &priv->napi, ctucan_rx_poll, NAPI_POLL_WEIGHT); + + ret = register_candev(ndev); + if (ret) { + dev_err(dev, "fail to register failed (err=%d)\n", ret); + goto err_deviceoff; + } + + devm_can_led_init(ndev); + + pm_runtime_put(dev); + + netdev_dbg(ndev, "mem_base=0x%p irq=%d clock=%d, no. of txt buffers:%d\n", + priv->mem_base, ndev->irq, priv->can.clock.freq, priv->ntxbufs); + + return 0; + +err_deviceoff: + pm_runtime_put(priv->dev); +err_pmdisable: + if (pm_enable_call) + pm_runtime_disable(dev); +err_free: + list_del_init(&priv->peers_on_pdev); + free_candev(ndev); + return ret; +} +EXPORT_SYMBOL(ctucan_probe_common); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Martin Jerabek <martin.jerabek01@gmail.com>"); +MODULE_AUTHOR("Pavel Pisa <pisa@cmp.felk.cvut.cz>"); +MODULE_AUTHOR("Ondrej Ille <ondrej.ille@gmail.com>"); +MODULE_DESCRIPTION("CTU CAN FD interface"); diff --git a/drivers/net/can/ctucanfd/ctucanfd_kframe.h b/drivers/net/can/ctucanfd/ctucanfd_kframe.h new file mode 100644 index 000000000000..3491299eaac2 --- /dev/null +++ b/drivers/net/can/ctucanfd/ctucanfd_kframe.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/******************************************************************************* + * + * CTU CAN FD IP Core + * + * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU + * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded + * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU + * Copyright (C) 2018-2021 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded + * + * Project advisors: + * Jiri Novak <jnovak@fel.cvut.cz> + * Pavel Pisa <pisa@cmp.felk.cvut.cz> + * + * Department of Measurement (http://meas.fel.cvut.cz/) + * Faculty of Electrical Engineering (http://www.fel.cvut.cz) + * Czech Technical University (http://www.cvut.cz/) + ******************************************************************************/ + +/* This file is autogenerated, DO NOT EDIT! */ + +#ifndef __CTU_CAN_FD_CAN_FD_FRAME_FORMAT__ +#define __CTU_CAN_FD_CAN_FD_FRAME_FORMAT__ + +#include <linux/bits.h> + +/* CAN_Frame_format memory map */ +enum ctu_can_fd_can_frame_format { + CTUCANFD_FRAME_FORMAT_W = 0x0, + CTUCANFD_IDENTIFIER_W = 0x4, + CTUCANFD_TIMESTAMP_L_W = 0x8, + CTUCANFD_TIMESTAMP_U_W = 0xc, + CTUCANFD_DATA_1_4_W = 0x10, + CTUCANFD_DATA_5_8_W = 0x14, + CTUCANFD_DATA_61_64_W = 0x4c, +}; + +/* CAN_FD_Frame_format memory region */ + +/* FRAME_FORMAT_W registers */ +#define REG_FRAME_FORMAT_W_DLC GENMASK(3, 0) +#define REG_FRAME_FORMAT_W_RTR BIT(5) +#define REG_FRAME_FORMAT_W_IDE BIT(6) +#define REG_FRAME_FORMAT_W_FDF BIT(7) +#define REG_FRAME_FORMAT_W_BRS BIT(9) +#define REG_FRAME_FORMAT_W_ESI_RSV BIT(10) +#define REG_FRAME_FORMAT_W_RWCNT GENMASK(15, 11) + +/* IDENTIFIER_W registers */ +#define REG_IDENTIFIER_W_IDENTIFIER_EXT GENMASK(17, 0) +#define REG_IDENTIFIER_W_IDENTIFIER_BASE GENMASK(28, 18) + +/* TIMESTAMP_L_W registers */ +#define REG_TIMESTAMP_L_W_TIME_STAMP_L_W GENMASK(31, 0) + +/* TIMESTAMP_U_W registers */ +#define REG_TIMESTAMP_U_W_TIMESTAMP_U_W GENMASK(31, 0) + +/* DATA_1_4_W registers */ +#define REG_DATA_1_4_W_DATA_1 GENMASK(7, 0) +#define REG_DATA_1_4_W_DATA_2 GENMASK(15, 8) +#define REG_DATA_1_4_W_DATA_3 GENMASK(23, 16) +#define REG_DATA_1_4_W_DATA_4 GENMASK(31, 24) + +/* DATA_5_8_W registers */ +#define REG_DATA_5_8_W_DATA_5 GENMASK(7, 0) +#define REG_DATA_5_8_W_DATA_6 GENMASK(15, 8) +#define REG_DATA_5_8_W_DATA_7 GENMASK(23, 16) +#define REG_DATA_5_8_W_DATA_8 GENMASK(31, 24) + +/* DATA_61_64_W registers */ +#define REG_DATA_61_64_W_DATA_61 GENMASK(7, 0) +#define REG_DATA_61_64_W_DATA_62 GENMASK(15, 8) +#define REG_DATA_61_64_W_DATA_63 GENMASK(23, 16) +#define REG_DATA_61_64_W_DATA_64 GENMASK(31, 24) + +#endif diff --git a/drivers/net/can/ctucanfd/ctucanfd_kregs.h b/drivers/net/can/ctucanfd/ctucanfd_kregs.h new file mode 100644 index 000000000000..edc1c1a24348 --- /dev/null +++ b/drivers/net/can/ctucanfd/ctucanfd_kregs.h @@ -0,0 +1,325 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/******************************************************************************* + * + * CTU CAN FD IP Core + * + * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU + * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded + * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU + * Copyright (C) 2018-2021 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded + * + * Project advisors: + * Jiri Novak <jnovak@fel.cvut.cz> + * Pavel Pisa <pisa@cmp.felk.cvut.cz> + * + * Department of Measurement (http://meas.fel.cvut.cz/) + * Faculty of Electrical Engineering (http://www.fel.cvut.cz) + * Czech Technical University (http://www.cvut.cz/) + ******************************************************************************/ + +/* This file is autogenerated, DO NOT EDIT! */ + +#ifndef __CTU_CAN_FD_CAN_FD_REGISTER_MAP__ +#define __CTU_CAN_FD_CAN_FD_REGISTER_MAP__ + +#include <linux/bits.h> + +/* CAN_Registers memory map */ +enum ctu_can_fd_can_registers { + CTUCANFD_DEVICE_ID = 0x0, + CTUCANFD_VERSION = 0x2, + CTUCANFD_MODE = 0x4, + CTUCANFD_SETTINGS = 0x6, + CTUCANFD_STATUS = 0x8, + CTUCANFD_COMMAND = 0xc, + CTUCANFD_INT_STAT = 0x10, + CTUCANFD_INT_ENA_SET = 0x14, + CTUCANFD_INT_ENA_CLR = 0x18, + CTUCANFD_INT_MASK_SET = 0x1c, + CTUCANFD_INT_MASK_CLR = 0x20, + CTUCANFD_BTR = 0x24, + CTUCANFD_BTR_FD = 0x28, + CTUCANFD_EWL = 0x2c, + CTUCANFD_ERP = 0x2d, + CTUCANFD_FAULT_STATE = 0x2e, + CTUCANFD_REC = 0x30, + CTUCANFD_TEC = 0x32, + CTUCANFD_ERR_NORM = 0x34, + CTUCANFD_ERR_FD = 0x36, + CTUCANFD_CTR_PRES = 0x38, + CTUCANFD_FILTER_A_MASK = 0x3c, + CTUCANFD_FILTER_A_VAL = 0x40, + CTUCANFD_FILTER_B_MASK = 0x44, + CTUCANFD_FILTER_B_VAL = 0x48, + CTUCANFD_FILTER_C_MASK = 0x4c, + CTUCANFD_FILTER_C_VAL = 0x50, + CTUCANFD_FILTER_RAN_LOW = 0x54, + CTUCANFD_FILTER_RAN_HIGH = 0x58, + CTUCANFD_FILTER_CONTROL = 0x5c, + CTUCANFD_FILTER_STATUS = 0x5e, + CTUCANFD_RX_MEM_INFO = 0x60, + CTUCANFD_RX_POINTERS = 0x64, + CTUCANFD_RX_STATUS = 0x68, + CTUCANFD_RX_SETTINGS = 0x6a, + CTUCANFD_RX_DATA = 0x6c, + CTUCANFD_TX_STATUS = 0x70, + CTUCANFD_TX_COMMAND = 0x74, + CTUCANFD_TX_PRIORITY = 0x78, + CTUCANFD_ERR_CAPT = 0x7c, + CTUCANFD_ALC = 0x7e, + CTUCANFD_TRV_DELAY = 0x80, + CTUCANFD_SSP_CFG = 0x82, + CTUCANFD_RX_FR_CTR = 0x84, + CTUCANFD_TX_FR_CTR = 0x88, + CTUCANFD_DEBUG_REGISTER = 0x8c, + CTUCANFD_YOLO_REG = 0x90, + CTUCANFD_TIMESTAMP_LOW = 0x94, + CTUCANFD_TIMESTAMP_HIGH = 0x98, + CTUCANFD_TXTB1_DATA_1 = 0x100, + CTUCANFD_TXTB1_DATA_2 = 0x104, + CTUCANFD_TXTB1_DATA_20 = 0x14c, + CTUCANFD_TXTB2_DATA_1 = 0x200, + CTUCANFD_TXTB2_DATA_2 = 0x204, + CTUCANFD_TXTB2_DATA_20 = 0x24c, + CTUCANFD_TXTB3_DATA_1 = 0x300, + CTUCANFD_TXTB3_DATA_2 = 0x304, + CTUCANFD_TXTB3_DATA_20 = 0x34c, + CTUCANFD_TXTB4_DATA_1 = 0x400, + CTUCANFD_TXTB4_DATA_2 = 0x404, + CTUCANFD_TXTB4_DATA_20 = 0x44c, +}; + +/* Control_registers memory region */ + +/* DEVICE_ID VERSION registers */ +#define REG_DEVICE_ID_DEVICE_ID GENMASK(15, 0) +#define REG_DEVICE_ID_VER_MINOR GENMASK(23, 16) +#define REG_DEVICE_ID_VER_MAJOR GENMASK(31, 24) + +/* MODE SETTINGS registers */ +#define REG_MODE_RST BIT(0) +#define REG_MODE_BMM BIT(1) +#define REG_MODE_STM BIT(2) +#define REG_MODE_AFM BIT(3) +#define REG_MODE_FDE BIT(4) +#define REG_MODE_ACF BIT(7) +#define REG_MODE_TSTM BIT(8) +#define REG_MODE_RTRLE BIT(16) +#define REG_MODE_RTRTH GENMASK(20, 17) +#define REG_MODE_ILBP BIT(21) +#define REG_MODE_ENA BIT(22) +#define REG_MODE_NISOFD BIT(23) +#define REG_MODE_PEX BIT(24) +#define REG_MODE_TBFBO BIT(25) +#define REG_MODE_FDRF BIT(26) + +/* STATUS registers */ +#define REG_STATUS_RXNE BIT(0) +#define REG_STATUS_DOR BIT(1) +#define REG_STATUS_TXNF BIT(2) +#define REG_STATUS_EFT BIT(3) +#define REG_STATUS_RXS BIT(4) +#define REG_STATUS_TXS BIT(5) +#define REG_STATUS_EWL BIT(6) +#define REG_STATUS_IDLE BIT(7) +#define REG_STATUS_PEXS BIT(8) + +/* COMMAND registers */ +#define REG_COMMAND_RRB BIT(2) +#define REG_COMMAND_CDO BIT(3) +#define REG_COMMAND_ERCRST BIT(4) +#define REG_COMMAND_RXFCRST BIT(5) +#define REG_COMMAND_TXFCRST BIT(6) +#define REG_COMMAND_CPEXS BIT(7) + +/* INT_STAT registers */ +#define REG_INT_STAT_RXI BIT(0) +#define REG_INT_STAT_TXI BIT(1) +#define REG_INT_STAT_EWLI BIT(2) +#define REG_INT_STAT_DOI BIT(3) +#define REG_INT_STAT_FCSI BIT(4) +#define REG_INT_STAT_ALI BIT(5) +#define REG_INT_STAT_BEI BIT(6) +#define REG_INT_STAT_OFI BIT(7) +#define REG_INT_STAT_RXFI BIT(8) +#define REG_INT_STAT_BSI BIT(9) +#define REG_INT_STAT_RBNEI BIT(10) +#define REG_INT_STAT_TXBHCI BIT(11) + +/* INT_ENA_SET registers */ +#define REG_INT_ENA_SET_INT_ENA_SET GENMASK(11, 0) + +/* INT_ENA_CLR registers */ +#define REG_INT_ENA_CLR_INT_ENA_CLR GENMASK(11, 0) + +/* INT_MASK_SET registers */ +#define REG_INT_MASK_SET_INT_MASK_SET GENMASK(11, 0) + +/* INT_MASK_CLR registers */ +#define REG_INT_MASK_CLR_INT_MASK_CLR GENMASK(11, 0) + +/* BTR registers */ +#define REG_BTR_PROP GENMASK(6, 0) +#define REG_BTR_PH1 GENMASK(12, 7) +#define REG_BTR_PH2 GENMASK(18, 13) +#define REG_BTR_BRP GENMASK(26, 19) +#define REG_BTR_SJW GENMASK(31, 27) + +/* BTR_FD registers */ +#define REG_BTR_FD_PROP_FD GENMASK(5, 0) +#define REG_BTR_FD_PH1_FD GENMASK(11, 7) +#define REG_BTR_FD_PH2_FD GENMASK(17, 13) +#define REG_BTR_FD_BRP_FD GENMASK(26, 19) +#define REG_BTR_FD_SJW_FD GENMASK(31, 27) + +/* EWL ERP FAULT_STATE registers */ +#define REG_EWL_EW_LIMIT GENMASK(7, 0) +#define REG_EWL_ERP_LIMIT GENMASK(15, 8) +#define REG_EWL_ERA BIT(16) +#define REG_EWL_ERP BIT(17) +#define REG_EWL_BOF BIT(18) + +/* REC TEC registers */ +#define REG_REC_REC_VAL GENMASK(8, 0) +#define REG_REC_TEC_VAL GENMASK(24, 16) + +/* ERR_NORM ERR_FD registers */ +#define REG_ERR_NORM_ERR_NORM_VAL GENMASK(15, 0) +#define REG_ERR_NORM_ERR_FD_VAL GENMASK(31, 16) + +/* CTR_PRES registers */ +#define REG_CTR_PRES_CTPV GENMASK(8, 0) +#define REG_CTR_PRES_PTX BIT(9) +#define REG_CTR_PRES_PRX BIT(10) +#define REG_CTR_PRES_ENORM BIT(11) +#define REG_CTR_PRES_EFD BIT(12) + +/* FILTER_A_MASK registers */ +#define REG_FILTER_A_MASK_BIT_MASK_A_VAL GENMASK(28, 0) + +/* FILTER_A_VAL registers */ +#define REG_FILTER_A_VAL_BIT_VAL_A_VAL GENMASK(28, 0) + +/* FILTER_B_MASK registers */ +#define REG_FILTER_B_MASK_BIT_MASK_B_VAL GENMASK(28, 0) + +/* FILTER_B_VAL registers */ +#define REG_FILTER_B_VAL_BIT_VAL_B_VAL GENMASK(28, 0) + +/* FILTER_C_MASK registers */ +#define REG_FILTER_C_MASK_BIT_MASK_C_VAL GENMASK(28, 0) + +/* FILTER_C_VAL registers */ +#define REG_FILTER_C_VAL_BIT_VAL_C_VAL GENMASK(28, 0) + +/* FILTER_RAN_LOW registers */ +#define REG_FILTER_RAN_LOW_BIT_RAN_LOW_VAL GENMASK(28, 0) + +/* FILTER_RAN_HIGH registers */ +#define REG_FILTER_RAN_HIGH_BIT_RAN_HIGH_VAL GENMASK(28, 0) + +/* FILTER_CONTROL FILTER_STATUS registers */ +#define REG_FILTER_CONTROL_FANB BIT(0) +#define REG_FILTER_CONTROL_FANE BIT(1) +#define REG_FILTER_CONTROL_FAFB BIT(2) +#define REG_FILTER_CONTROL_FAFE BIT(3) +#define REG_FILTER_CONTROL_FBNB BIT(4) +#define REG_FILTER_CONTROL_FBNE BIT(5) +#define REG_FILTER_CONTROL_FBFB BIT(6) +#define REG_FILTER_CONTROL_FBFE BIT(7) +#define REG_FILTER_CONTROL_FCNB BIT(8) +#define REG_FILTER_CONTROL_FCNE BIT(9) +#define REG_FILTER_CONTROL_FCFB BIT(10) +#define REG_FILTER_CONTROL_FCFE BIT(11) +#define REG_FILTER_CONTROL_FRNB BIT(12) +#define REG_FILTER_CONTROL_FRNE BIT(13) +#define REG_FILTER_CONTROL_FRFB BIT(14) +#define REG_FILTER_CONTROL_FRFE BIT(15) +#define REG_FILTER_CONTROL_SFA BIT(16) +#define REG_FILTER_CONTROL_SFB BIT(17) +#define REG_FILTER_CONTROL_SFC BIT(18) +#define REG_FILTER_CONTROL_SFR BIT(19) + +/* RX_MEM_INFO registers */ +#define REG_RX_MEM_INFO_RX_BUFF_SIZE GENMASK(12, 0) +#define REG_RX_MEM_INFO_RX_MEM_FREE GENMASK(28, 16) + +/* RX_POINTERS registers */ +#define REG_RX_POINTERS_RX_WPP GENMASK(11, 0) +#define REG_RX_POINTERS_RX_RPP GENMASK(27, 16) + +/* RX_STATUS RX_SETTINGS registers */ +#define REG_RX_STATUS_RXE BIT(0) +#define REG_RX_STATUS_RXF BIT(1) +#define REG_RX_STATUS_RXMOF BIT(2) +#define REG_RX_STATUS_RXFRC GENMASK(14, 4) +#define REG_RX_STATUS_RTSOP BIT(16) + +/* RX_DATA registers */ +#define REG_RX_DATA_RX_DATA GENMASK(31, 0) + +/* TX_STATUS registers */ +#define REG_TX_STATUS_TX1S GENMASK(3, 0) +#define REG_TX_STATUS_TX2S GENMASK(7, 4) +#define REG_TX_STATUS_TX3S GENMASK(11, 8) +#define REG_TX_STATUS_TX4S GENMASK(15, 12) + +/* TX_COMMAND registers */ +#define REG_TX_COMMAND_TXCE BIT(0) +#define REG_TX_COMMAND_TXCR BIT(1) +#define REG_TX_COMMAND_TXCA BIT(2) +#define REG_TX_COMMAND_TXB1 BIT(8) +#define REG_TX_COMMAND_TXB2 BIT(9) +#define REG_TX_COMMAND_TXB3 BIT(10) +#define REG_TX_COMMAND_TXB4 BIT(11) + +/* TX_PRIORITY registers */ +#define REG_TX_PRIORITY_TXT1P GENMASK(2, 0) +#define REG_TX_PRIORITY_TXT2P GENMASK(6, 4) +#define REG_TX_PRIORITY_TXT3P GENMASK(10, 8) +#define REG_TX_PRIORITY_TXT4P GENMASK(14, 12) + +/* ERR_CAPT ALC registers */ +#define REG_ERR_CAPT_ERR_POS GENMASK(4, 0) +#define REG_ERR_CAPT_ERR_TYPE GENMASK(7, 5) +#define REG_ERR_CAPT_ALC_BIT GENMASK(20, 16) +#define REG_ERR_CAPT_ALC_ID_FIELD GENMASK(23, 21) + +/* TRV_DELAY SSP_CFG registers */ +#define REG_TRV_DELAY_TRV_DELAY_VALUE GENMASK(6, 0) +#define REG_TRV_DELAY_SSP_OFFSET GENMASK(23, 16) +#define REG_TRV_DELAY_SSP_SRC GENMASK(25, 24) + +/* RX_FR_CTR registers */ +#define REG_RX_FR_CTR_RX_FR_CTR_VAL GENMASK(31, 0) + +/* TX_FR_CTR registers */ +#define REG_TX_FR_CTR_TX_FR_CTR_VAL GENMASK(31, 0) + +/* DEBUG_REGISTER registers */ +#define REG_DEBUG_REGISTER_STUFF_COUNT GENMASK(2, 0) +#define REG_DEBUG_REGISTER_DESTUFF_COUNT GENMASK(5, 3) +#define REG_DEBUG_REGISTER_PC_ARB BIT(6) +#define REG_DEBUG_REGISTER_PC_CON BIT(7) +#define REG_DEBUG_REGISTER_PC_DAT BIT(8) +#define REG_DEBUG_REGISTER_PC_STC BIT(9) +#define REG_DEBUG_REGISTER_PC_CRC BIT(10) +#define REG_DEBUG_REGISTER_PC_CRCD BIT(11) +#define REG_DEBUG_REGISTER_PC_ACK BIT(12) +#define REG_DEBUG_REGISTER_PC_ACKD BIT(13) +#define REG_DEBUG_REGISTER_PC_EOF BIT(14) +#define REG_DEBUG_REGISTER_PC_INT BIT(15) +#define REG_DEBUG_REGISTER_PC_SUSP BIT(16) +#define REG_DEBUG_REGISTER_PC_OVR BIT(17) +#define REG_DEBUG_REGISTER_PC_SOF BIT(18) + +/* YOLO_REG registers */ +#define REG_YOLO_REG_YOLO_VAL GENMASK(31, 0) + +/* TIMESTAMP_LOW registers */ +#define REG_TIMESTAMP_LOW_TIMESTAMP_LOW GENMASK(31, 0) + +/* TIMESTAMP_HIGH registers */ +#define REG_TIMESTAMP_HIGH_TIMESTAMP_HIGH GENMASK(31, 0) + +#endif diff --git a/drivers/net/can/ctucanfd/ctucanfd_pci.c b/drivers/net/can/ctucanfd/ctucanfd_pci.c new file mode 100644 index 000000000000..8f2956a8ae43 --- /dev/null +++ b/drivers/net/can/ctucanfd/ctucanfd_pci.c @@ -0,0 +1,294 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * + * CTU CAN FD IP Core + * + * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU + * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded + * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU + * Copyright (C) 2018-2022 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded + * + * Project advisors: + * Jiri Novak <jnovak@fel.cvut.cz> + * Pavel Pisa <pisa@cmp.felk.cvut.cz> + * + * Department of Measurement (http://meas.fel.cvut.cz/) + * Faculty of Electrical Engineering (http://www.fel.cvut.cz) + * Czech Technical University (http://www.cvut.cz/) + ******************************************************************************/ + +#include <linux/module.h> +#include <linux/pci.h> + +#include "ctucanfd.h" + +#ifndef PCI_DEVICE_DATA +#define PCI_DEVICE_DATA(vend, dev, data) \ +.vendor = PCI_VENDOR_ID_##vend, \ +.device = PCI_DEVICE_ID_##vend##_##dev, \ +.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \ +.driver_data = (kernel_ulong_t)(data) +#endif + +#ifndef PCI_VENDOR_ID_TEDIA +#define PCI_VENDOR_ID_TEDIA 0x1760 +#endif + +#ifndef PCI_DEVICE_ID_TEDIA_CTUCAN_VER21 +#define PCI_DEVICE_ID_TEDIA_CTUCAN_VER21 0xff00 +#endif + +#define CTUCAN_BAR0_CTUCAN_ID 0x0000 +#define CTUCAN_BAR0_CRA_BASE 0x4000 +#define CYCLONE_IV_CRA_A2P_IE (0x0050) + +#define CTUCAN_WITHOUT_CTUCAN_ID 0 +#define CTUCAN_WITH_CTUCAN_ID 1 + +struct ctucan_pci_board_data { + void __iomem *bar0_base; + void __iomem *cra_base; + void __iomem *bar1_base; + struct list_head ndev_list_head; + int use_msi; +}; + +static struct ctucan_pci_board_data *ctucan_pci_get_bdata(struct pci_dev *pdev) +{ + return (struct ctucan_pci_board_data *)pci_get_drvdata(pdev); +} + +static void ctucan_pci_set_drvdata(struct device *dev, + struct net_device *ndev) +{ + struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); + struct ctucan_priv *priv = netdev_priv(ndev); + struct ctucan_pci_board_data *bdata = ctucan_pci_get_bdata(pdev); + + list_add(&priv->peers_on_pdev, &bdata->ndev_list_head); + priv->irq_flags = IRQF_SHARED; +} + +/** + * ctucan_pci_probe - PCI registration call + * @pdev: Handle to the pci device structure + * @ent: Pointer to the entry from ctucan_pci_tbl + * + * This function does all the memory allocation and registration for the CAN + * device. + * + * Return: 0 on success and failure value on error + */ +static int ctucan_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct device *dev = &pdev->dev; + unsigned long driver_data = ent->driver_data; + struct ctucan_pci_board_data *bdata; + void __iomem *addr; + void __iomem *cra_addr; + void __iomem *bar0_base; + u32 cra_a2p_ie; + u32 ctucan_id = 0; + int ret; + unsigned int ntxbufs; + unsigned int num_cores = 1; + unsigned int core_i = 0; + int irq; + int msi_ok = 0; + + ret = pci_enable_device(pdev); + if (ret) { + dev_err(dev, "pci_enable_device FAILED\n"); + goto err; + } + + ret = pci_request_regions(pdev, KBUILD_MODNAME); + if (ret) { + dev_err(dev, "pci_request_regions FAILED\n"); + goto err_disable_device; + } + + ret = pci_enable_msi(pdev); + if (!ret) { + dev_info(dev, "MSI enabled\n"); + pci_set_master(pdev); + msi_ok = 1; + } + + dev_info(dev, "ctucan BAR0 0x%08llx 0x%08llx\n", + (long long)pci_resource_start(pdev, 0), + (long long)pci_resource_len(pdev, 0)); + + dev_info(dev, "ctucan BAR1 0x%08llx 0x%08llx\n", + (long long)pci_resource_start(pdev, 1), + (long long)pci_resource_len(pdev, 1)); + + addr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1)); + if (!addr) { + dev_err(dev, "PCI BAR 1 cannot be mapped\n"); + ret = -ENOMEM; + goto err_release_regions; + } + + /* Cyclone IV PCI Express Control Registers Area */ + bar0_base = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); + if (!bar0_base) { + dev_err(dev, "PCI BAR 0 cannot be mapped\n"); + ret = -EIO; + goto err_pci_iounmap_bar1; + } + + if (driver_data == CTUCAN_WITHOUT_CTUCAN_ID) { + cra_addr = bar0_base; + num_cores = 2; + } else { + cra_addr = bar0_base + CTUCAN_BAR0_CRA_BASE; + ctucan_id = ioread32(bar0_base + CTUCAN_BAR0_CTUCAN_ID); + dev_info(dev, "ctucan_id 0x%08lx\n", (unsigned long)ctucan_id); + num_cores = ctucan_id & 0xf; + } + + irq = pdev->irq; + + ntxbufs = 4; + + bdata = kzalloc(sizeof(*bdata), GFP_KERNEL); + if (!bdata) { + ret = -ENOMEM; + goto err_pci_iounmap_bar0; + } + + INIT_LIST_HEAD(&bdata->ndev_list_head); + bdata->bar0_base = bar0_base; + bdata->cra_base = cra_addr; + bdata->bar1_base = addr; + bdata->use_msi = msi_ok; + + pci_set_drvdata(pdev, bdata); + + ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 100000000, + 0, ctucan_pci_set_drvdata); + if (ret < 0) + goto err_free_board; + + core_i++; + + while (core_i < num_cores) { + addr += 0x4000; + ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 100000000, + 0, ctucan_pci_set_drvdata); + if (ret < 0) { + dev_info(dev, "CTU CAN FD core %d initialization failed\n", + core_i); + break; + } + core_i++; + } + + /* enable interrupt in + * Avalon-MM to PCI Express Interrupt Enable Register + */ + cra_a2p_ie = ioread32(cra_addr + CYCLONE_IV_CRA_A2P_IE); + dev_info(dev, "cra_a2p_ie 0x%08x\n", cra_a2p_ie); + cra_a2p_ie |= 1; + iowrite32(cra_a2p_ie, cra_addr + CYCLONE_IV_CRA_A2P_IE); + cra_a2p_ie = ioread32(cra_addr + CYCLONE_IV_CRA_A2P_IE); + dev_info(dev, "cra_a2p_ie 0x%08x\n", cra_a2p_ie); + + return 0; + +err_free_board: + pci_set_drvdata(pdev, NULL); + kfree(bdata); +err_pci_iounmap_bar0: + pci_iounmap(pdev, cra_addr); +err_pci_iounmap_bar1: + pci_iounmap(pdev, addr); +err_release_regions: + if (msi_ok) { + pci_disable_msi(pdev); + pci_clear_master(pdev); + } + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); +err: + return ret; +} + +/** + * ctucan_pci_remove - Unregister the device after releasing the resources + * @pdev: Handle to the pci device structure + * + * This function frees all the resources allocated to the device. + * Return: 0 always + */ +static void ctucan_pci_remove(struct pci_dev *pdev) +{ + struct net_device *ndev; + struct ctucan_priv *priv = NULL; + struct ctucan_pci_board_data *bdata = ctucan_pci_get_bdata(pdev); + + dev_dbg(&pdev->dev, "ctucan_remove"); + + if (!bdata) { + dev_err(&pdev->dev, "%s: no list of devices\n", __func__); + return; + } + + /* disable interrupt in + * Avalon-MM to PCI Express Interrupt Enable Register + */ + if (bdata->cra_base) + iowrite32(0, bdata->cra_base + CYCLONE_IV_CRA_A2P_IE); + + while ((priv = list_first_entry_or_null(&bdata->ndev_list_head, struct ctucan_priv, + peers_on_pdev)) != NULL) { + ndev = priv->can.dev; + + unregister_candev(ndev); + + netif_napi_del(&priv->napi); + + list_del_init(&priv->peers_on_pdev); + free_candev(ndev); + } + + pci_iounmap(pdev, bdata->bar1_base); + + if (bdata->use_msi) { + pci_disable_msi(pdev); + pci_clear_master(pdev); + } + + pci_release_regions(pdev); + pci_disable_device(pdev); + + pci_iounmap(pdev, bdata->bar0_base); + + pci_set_drvdata(pdev, NULL); + kfree(bdata); +} + +static SIMPLE_DEV_PM_OPS(ctucan_pci_pm_ops, ctucan_suspend, ctucan_resume); + +static const struct pci_device_id ctucan_pci_tbl[] = { + {PCI_DEVICE_DATA(TEDIA, CTUCAN_VER21, + CTUCAN_WITH_CTUCAN_ID)}, + {}, +}; + +static struct pci_driver ctucan_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = ctucan_pci_tbl, + .probe = ctucan_pci_probe, + .remove = ctucan_pci_remove, + .driver.pm = &ctucan_pci_pm_ops, +}; + +module_pci_driver(ctucan_pci_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Pavel Pisa <pisa@cmp.felk.cvut.cz>"); +MODULE_DESCRIPTION("CTU CAN FD for PCI bus"); diff --git a/drivers/net/can/ctucanfd/ctucanfd_platform.c b/drivers/net/can/ctucanfd/ctucanfd_platform.c new file mode 100644 index 000000000000..89d54c2151e1 --- /dev/null +++ b/drivers/net/can/ctucanfd/ctucanfd_platform.c @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * + * CTU CAN FD IP Core + * + * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU + * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded + * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU + * Copyright (C) 2018-2022 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded + * + * Project advisors: + * Jiri Novak <jnovak@fel.cvut.cz> + * Pavel Pisa <pisa@cmp.felk.cvut.cz> + * + * Department of Measurement (http://meas.fel.cvut.cz/) + * Faculty of Electrical Engineering (http://www.fel.cvut.cz) + * Czech Technical University (http://www.cvut.cz/) + ******************************************************************************/ + +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> + +#include "ctucanfd.h" + +#define DRV_NAME "ctucanfd" + +static void ctucan_platform_set_drvdata(struct device *dev, + struct net_device *ndev) +{ + struct platform_device *pdev = container_of(dev, struct platform_device, + dev); + + platform_set_drvdata(pdev, ndev); +} + +/** + * ctucan_platform_probe - Platform registration call + * @pdev: Handle to the platform device structure + * + * This function does all the memory allocation and registration for the CAN + * device. + * + * Return: 0 on success and failure value on error + */ +static int ctucan_platform_probe(struct platform_device *pdev) +{ + struct resource *res; /* IO mem resources */ + struct device *dev = &pdev->dev; + void __iomem *addr; + int ret; + unsigned int ntxbufs; + int irq; + + /* Get the virtual base address for the device */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + addr = devm_ioremap_resource(dev, res); + if (IS_ERR(addr)) { + dev_err(dev, "Cannot remap address.\n"); + ret = PTR_ERR(addr); + goto err; + } + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; + goto err; + } + + /* Number of tx bufs might be change in HW for future. If so, + * it will be passed as property via device tree + */ + ntxbufs = 4; + ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 0, + 1, ctucan_platform_set_drvdata); + + if (ret < 0) + platform_set_drvdata(pdev, NULL); + +err: + return ret; +} + +/** + * ctucan_platform_remove - Unregister the device after releasing the resources + * @pdev: Handle to the platform device structure + * + * This function frees all the resources allocated to the device. + * Return: 0 always + */ +static int ctucan_platform_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct ctucan_priv *priv = netdev_priv(ndev); + + netdev_dbg(ndev, "ctucan_remove"); + + unregister_candev(ndev); + pm_runtime_disable(&pdev->dev); + netif_napi_del(&priv->napi); + free_candev(ndev); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(ctucan_platform_pm_ops, ctucan_suspend, ctucan_resume); + +/* Match table for OF platform binding */ +static const struct of_device_id ctucan_of_match[] = { + { .compatible = "ctu,ctucanfd-2", }, + { .compatible = "ctu,ctucanfd", }, + { /* end of list */ }, +}; +MODULE_DEVICE_TABLE(of, ctucan_of_match); + +static struct platform_driver ctucanfd_driver = { + .probe = ctucan_platform_probe, + .remove = ctucan_platform_remove, + .driver = { + .name = DRV_NAME, + .pm = &ctucan_platform_pm_ops, + .of_match_table = ctucan_of_match, + }, +}; + +module_platform_driver(ctucanfd_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Martin Jerabek"); +MODULE_DESCRIPTION("CTU CAN FD for platform"); diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c index 2103bcca9012..c1e76f0a5064 100644 --- a/drivers/net/can/dev/bittiming.c +++ b/drivers/net/can/dev/bittiming.c @@ -116,7 +116,7 @@ int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt, can_update_sample_point(btc, sample_point_nominal, tseg / 2, &tseg1, &tseg2, &sample_point_error); - if (sample_point_error > best_sample_point_error) + if (sample_point_error >= best_sample_point_error) continue; best_sample_point_error = sample_point_error; diff --git a/drivers/net/can/dev/rx-offload.c b/drivers/net/can/dev/rx-offload.c index 7f80d8e1e750..6d0dc18c03e7 100644 --- a/drivers/net/can/dev/rx-offload.c +++ b/drivers/net/can/dev/rx-offload.c @@ -221,7 +221,7 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload) } EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo); -int can_rx_offload_queue_sorted(struct can_rx_offload *offload, +int can_rx_offload_queue_timestamp(struct can_rx_offload *offload, struct sk_buff *skb, u32 timestamp) { struct can_rx_offload_cb *cb; @@ -240,7 +240,7 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload, return 0; } -EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted); +EXPORT_SYMBOL_GPL(can_rx_offload_queue_timestamp); unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, unsigned int idx, u32 timestamp, @@ -256,7 +256,7 @@ unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, if (!skb) return 0; - err = can_rx_offload_queue_sorted(offload, skb, timestamp); + err = can_rx_offload_queue_timestamp(offload, skb, timestamp); if (err) { stats->rx_errors++; stats->tx_fifo_errors++; diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c index 74d7fcbfd065..fe9bda0f5ec4 100644 --- a/drivers/net/can/flexcan/flexcan-core.c +++ b/drivers/net/can/flexcan/flexcan-core.c @@ -723,11 +723,9 @@ static int flexcan_get_berr_counter(const struct net_device *dev, const struct flexcan_priv *priv = netdev_priv(dev); int err; - err = pm_runtime_get_sync(priv->dev); - if (err < 0) { - pm_runtime_put_noidle(priv->dev); + err = pm_runtime_resume_and_get(priv->dev); + if (err < 0) return err; - } err = __flexcan_get_berr_counter(dev, bec); @@ -845,7 +843,7 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr) if (tx_errors) dev->stats.tx_errors++; - err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); if (err) dev->stats.rx_fifo_errors++; } @@ -892,7 +890,7 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr) if (unlikely(new_state == CAN_STATE_BUS_OFF)) can_bus_off(dev); - err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); if (err) dev->stats.rx_fifo_errors++; } @@ -1700,11 +1698,9 @@ static int flexcan_open(struct net_device *dev) return -EINVAL; } - err = pm_runtime_get_sync(priv->dev); - if (err < 0) { - pm_runtime_put_noidle(priv->dev); + err = pm_runtime_resume_and_get(priv->dev); + if (err < 0) return err; - } err = open_candev(dev); if (err) diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index b3b5bc1c803b..e6d2da4a9f41 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -77,9 +77,6 @@ enum m_can_reg { M_CAN_TXEFA = 0xf8, }; -/* napi related */ -#define M_CAN_NAPI_WEIGHT 64 - /* message ram configuration data length */ #define MRAM_CFG_LEN 8 @@ -464,7 +461,7 @@ static void m_can_receive_skb(struct m_can_classdev *cdev, struct net_device_stats *stats = &cdev->net->stats; int err; - err = can_rx_offload_queue_sorted(&cdev->offload, skb, + err = can_rx_offload_queue_timestamp(&cdev->offload, skb, timestamp); if (err) stats->rx_fifo_errors++; @@ -951,7 +948,7 @@ static int m_can_rx_peripheral(struct net_device *dev) struct m_can_classdev *cdev = netdev_priv(dev); int work_done; - work_done = m_can_rx_handler(dev, M_CAN_NAPI_WEIGHT); + work_done = m_can_rx_handler(dev, NAPI_POLL_WEIGHT); /* Don't re-enable interrupts if the driver had a fatal error * (e.g., FIFO read failure). @@ -1474,7 +1471,7 @@ static int m_can_dev_setup(struct m_can_classdev *cdev) if (!cdev->is_peripheral) netif_napi_add(dev, &cdev->napi, - m_can_poll, M_CAN_NAPI_WEIGHT); + m_can_poll, NAPI_POLL_WEIGHT); /* Shared properties of all M_CAN versions */ cdev->version = m_can_version; @@ -1994,7 +1991,7 @@ int m_can_class_register(struct m_can_classdev *cdev) if (cdev->is_peripheral) { ret = can_rx_offload_add_manual(cdev->net, &cdev->offload, - M_CAN_NAPI_WEIGHT); + NAPI_POLL_WEIGHT); if (ret) goto clk_disable; } diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c index de4ddf79ba9b..65ba6697bd7d 100644 --- a/drivers/net/can/mscan/mpc5xxx_can.c +++ b/drivers/net/can/mscan/mpc5xxx_can.c @@ -14,6 +14,8 @@ #include <linux/platform_device.h> #include <linux/netdevice.h> #include <linux/can/dev.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/of_platform.h> #include <sysdev/fsl_soc.h> #include <linux/clk.h> diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig index 110071b26921..4b2f9cb17fc3 100644 --- a/drivers/net/can/sja1000/Kconfig +++ b/drivers/net/can/sja1000/Kconfig @@ -107,7 +107,7 @@ config CAN_TSCAN1 depends on ISA help This driver is for Technologic Systems' TSCAN-1 PC104 boards. - http://www.embeddedarm.com/products/board-detail.php?product=TS-CAN1 + https://www.embeddedts.com/products/TS-CAN1 The driver supports multiple boards and automatically configures them: PLD IO base addresses are read from jumpers JP1 and JP2, IRQ numbers are read from jumpers JP4 and JP5, diff --git a/drivers/net/can/sja1000/tscan1.c b/drivers/net/can/sja1000/tscan1.c index 3dbba8d61afb..f3862bed3d40 100644 --- a/drivers/net/can/sja1000/tscan1.c +++ b/drivers/net/can/sja1000/tscan1.c @@ -5,10 +5,9 @@ * Copyright 2010 Andre B. Oliveira */ -/* - * References: - * - Getting started with TS-CAN1, Technologic Systems, Jun 2009 - * http://www.embeddedarm.com/documentation/ts-can1-manual.pdf +/* References: + * - Getting started with TS-CAN1, Technologic Systems, Feb 2022 + * https://docs.embeddedts.com/TS-CAN1 */ #include <linux/init.h> diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c index f9dd8fdba12b..b21252390216 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c @@ -37,6 +37,12 @@ static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2518fd = { .model = MCP251XFD_MODEL_MCP2518FD, }; +static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251863 = { + .quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX | + MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC, + .model = MCP251XFD_MODEL_MCP251863, +}; + /* Autodetect model, start with CRC enabled. */ static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251xfd = { .quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX | @@ -75,6 +81,8 @@ static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model) return "MCP2517FD"; case MCP251XFD_MODEL_MCP2518FD: return "MCP2518FD"; + case MCP251XFD_MODEL_MCP251863: + return "MCP251863"; case MCP251XFD_MODEL_MCP251XFD: return "MCP251xFD"; } @@ -916,7 +924,7 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv) cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; - err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); if (err) stats->rx_fifo_errors++; @@ -1021,7 +1029,7 @@ static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv) return 0; mcp251xfd_skb_set_timestamp(priv, skb, timestamp); - err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); if (err) stats->rx_fifo_errors++; @@ -1094,7 +1102,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv) cf->data[7] = bec.rxerr; } - err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); if (err) stats->rx_fifo_errors++; @@ -1259,7 +1267,8 @@ mcp251xfd_handle_eccif_recover(struct mcp251xfd_priv *priv, u8 nr) * - for mcp2518fd: offset not 0 or 1 */ if (chip_tx_tail != tx_tail || - !(offset == 0 || (offset == 1 && mcp251xfd_is_2518(priv)))) { + !(offset == 0 || (offset == 1 && (mcp251xfd_is_2518FD(priv) || + mcp251xfd_is_251863(priv))))) { netdev_err(priv->ndev, "ECC Error information inconsistent (addr=0x%04x, nr=%d, tx_tail=0x%08x(%d), chip_tx_tail=%d, offset=%d).\n", addr, nr, tx_ring->tail, tx_tail, chip_tx_tail, @@ -1697,7 +1706,7 @@ static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv) else devtype_data = &mcp251xfd_devtype_data_mcp2517fd; - if (!mcp251xfd_is_251X(priv) && + if (!mcp251xfd_is_251XFD(priv) && priv->devtype_data.model != devtype_data->model) { netdev_info(ndev, "Detected %s, but firmware specifies a %s. Fixing up.\n", @@ -1930,6 +1939,9 @@ static const struct of_device_id mcp251xfd_of_match[] = { .compatible = "microchip,mcp2518fd", .data = &mcp251xfd_devtype_data_mcp2518fd, }, { + .compatible = "microchip,mcp251863", + .data = &mcp251xfd_devtype_data_mcp251863, + }, { .compatible = "microchip,mcp251xfd", .data = &mcp251xfd_devtype_data_mcp251xfd, }, { @@ -1946,6 +1958,9 @@ static const struct spi_device_id mcp251xfd_id_table[] = { .name = "mcp2518fd", .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2518fd, }, { + .name = "mcp251863", + .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251863, + }, { .name = "mcp251xfd", .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251xfd, }, { diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c index d09f7fbf2ba7..ced8d9c81f8c 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c @@ -173,7 +173,7 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv, } mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb); - err = can_rx_offload_queue_sorted(&priv->offload, skb, hw_rx_obj->ts); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, hw_rx_obj->ts); if (err) stats->rx_fifo_errors++; diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h index 9cb6b5ad8dda..1d43bccc29bf 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h @@ -586,7 +586,8 @@ struct mcp251xfd_regs_status { enum mcp251xfd_model { MCP251XFD_MODEL_MCP2517FD = 0x2517, MCP251XFD_MODEL_MCP2518FD = 0x2518, - MCP251XFD_MODEL_MCP251XFD = 0xffff, /* autodetect model */ + MCP251XFD_MODEL_MCP251863 = 0x251863, + MCP251XFD_MODEL_MCP251XFD = 0xffffffff, /* autodetect model */ }; struct mcp251xfd_devtype_data { @@ -659,12 +660,13 @@ struct mcp251xfd_priv { static inline bool \ mcp251xfd_is_##_model(const struct mcp251xfd_priv *priv) \ { \ - return priv->devtype_data.model == MCP251XFD_MODEL_MCP##_model##FD; \ + return priv->devtype_data.model == MCP251XFD_MODEL_MCP##_model; \ } -MCP251XFD_IS(2517); -MCP251XFD_IS(2518); -MCP251XFD_IS(251X); +MCP251XFD_IS(2517FD); +MCP251XFD_IS(2518FD); +MCP251XFD_IS(251863); +MCP251XFD_IS(251XFD); static inline bool mcp251xfd_is_fd_mode(const struct mcp251xfd_priv *priv) { diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index ff31b993ab17..bb3f2e3b004c 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -633,7 +633,7 @@ static int ti_hecc_error(struct net_device *ndev, int int_status, cf->data[3] = CAN_ERR_PROT_LOC_ACK; timestamp = hecc_read(priv, HECC_CANLNT); - err = can_rx_offload_queue_sorted(&priv->offload, skb, + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); if (err) ndev->stats.rx_fifo_errors++; @@ -668,7 +668,7 @@ static void ti_hecc_change_state(struct net_device *ndev, } timestamp = hecc_read(priv, HECC_CANLNT); - err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); if (err) ndev->stats.rx_fifo_errors++; } diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index e562c5ab1149..43f0c6a064ba 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -239,7 +239,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd = { }; /* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */ -static struct can_bittiming_const xcan_data_bittiming_const_canfd = { +static const struct can_bittiming_const xcan_data_bittiming_const_canfd = { .name = DRIVER_NAME, .tseg1_min = 1, .tseg1_max = 16, @@ -265,7 +265,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = { }; /* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */ -static struct can_bittiming_const xcan_data_bittiming_const_canfd2 = { +static const struct can_bittiming_const xcan_data_bittiming_const_canfd2 = { .name = DRIVER_NAME, .tseg1_min = 1, .tseg1_max = 32, diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index a416240d001b..12c15da55664 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -1681,9 +1681,6 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port, break; case PHY_INTERFACE_MODE_RMII: miicfg |= GSWIP_MII_CFG_MODE_RMIIM; - - /* Configure the RMII clock as output: */ - miicfg |= GSWIP_MII_CFG_RMII_CLK; break; case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index b2752978cb09..f91deea9368e 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -1027,40 +1027,7 @@ static void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member) static void ksz8_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) { - struct ksz_device *dev = ds->priv; - struct ksz_port *p; - u8 data; - - ksz_pread8(dev, port, P_STP_CTRL, &data); - data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE); - - switch (state) { - case BR_STATE_DISABLED: - data |= PORT_LEARN_DISABLE; - break; - case BR_STATE_LISTENING: - data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE); - break; - case BR_STATE_LEARNING: - data |= PORT_RX_ENABLE; - break; - case BR_STATE_FORWARDING: - data |= (PORT_TX_ENABLE | PORT_RX_ENABLE); - break; - case BR_STATE_BLOCKING: - data |= PORT_LEARN_DISABLE; - break; - default: - dev_err(ds->dev, "invalid STP state: %d\n", state); - return; - } - - ksz_pwrite8(dev, port, P_STP_CTRL, data); - - p = &dev->ports[port]; - p->stp_state = state; - - ksz_update_port_member(dev, port); + ksz_port_stp_state_set(ds, port, state, P_STP_CTRL); } static void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port) diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h index d74defcd86b4..4109433b6b6c 100644 --- a/drivers/net/dsa/microchip/ksz8795_reg.h +++ b/drivers/net/dsa/microchip/ksz8795_reg.h @@ -160,9 +160,6 @@ #define PORT_DISCARD_NON_VID BIT(5) #define PORT_FORCE_FLOW_CTRL BIT(4) #define PORT_BACK_PRESSURE BIT(3) -#define PORT_TX_ENABLE BIT(2) -#define PORT_RX_ENABLE BIT(1) -#define PORT_LEARN_DISABLE BIT(0) #define REG_PORT_1_CTRL_3 0x13 #define REG_PORT_2_CTRL_3 0x23 diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index 8222c8a6c5ec..48c90e4cda30 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -65,100 +65,6 @@ static const struct { { 0x83, "tx_discards" }, }; -struct ksz9477_stats_raw { - u64 rx_hi; - u64 rx_undersize; - u64 rx_fragments; - u64 rx_oversize; - u64 rx_jabbers; - u64 rx_symbol_err; - u64 rx_crc_err; - u64 rx_align_err; - u64 rx_mac_ctrl; - u64 rx_pause; - u64 rx_bcast; - u64 rx_mcast; - u64 rx_ucast; - u64 rx_64_or_less; - u64 rx_65_127; - u64 rx_128_255; - u64 rx_256_511; - u64 rx_512_1023; - u64 rx_1024_1522; - u64 rx_1523_2000; - u64 rx_2001; - u64 tx_hi; - u64 tx_late_col; - u64 tx_pause; - u64 tx_bcast; - u64 tx_mcast; - u64 tx_ucast; - u64 tx_deferred; - u64 tx_total_col; - u64 tx_exc_col; - u64 tx_single_col; - u64 tx_mult_col; - u64 rx_total; - u64 tx_total; - u64 rx_discards; - u64 tx_discards; -}; - -static void ksz9477_r_mib_stats64(struct ksz_device *dev, int port) -{ - struct rtnl_link_stats64 *stats; - struct ksz9477_stats_raw *raw; - struct ksz_port_mib *mib; - - mib = &dev->ports[port].mib; - stats = &mib->stats64; - raw = (struct ksz9477_stats_raw *)mib->counters; - - spin_lock(&mib->stats64_lock); - - stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast; - stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast; - - /* HW counters are counting bytes + FCS which is not acceptable - * for rtnl_link_stats64 interface - */ - stats->rx_bytes = raw->rx_total - stats->rx_packets * ETH_FCS_LEN; - stats->tx_bytes = raw->tx_total - stats->tx_packets * ETH_FCS_LEN; - - stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments + - raw->rx_oversize; - - stats->rx_crc_errors = raw->rx_crc_err; - stats->rx_frame_errors = raw->rx_align_err; - stats->rx_dropped = raw->rx_discards; - stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors + - stats->rx_frame_errors + stats->rx_dropped; - - stats->tx_window_errors = raw->tx_late_col; - stats->tx_fifo_errors = raw->tx_discards; - stats->tx_aborted_errors = raw->tx_exc_col; - stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors + - stats->tx_aborted_errors; - - stats->multicast = raw->rx_mcast; - stats->collisions = raw->tx_total_col; - - spin_unlock(&mib->stats64_lock); -} - -static void ksz9477_get_stats64(struct dsa_switch *ds, int port, - struct rtnl_link_stats64 *s) -{ - struct ksz_device *dev = ds->priv; - struct ksz_port_mib *mib; - - mib = &dev->ports[port].mib; - - spin_lock(&mib->stats64_lock); - memcpy(s, &mib->stats64, sizeof(*s)); - spin_unlock(&mib->stats64_lock); -} - static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set) { regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0); @@ -517,38 +423,7 @@ static void ksz9477_cfg_port_member(struct ksz_device *dev, int port, static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) { - struct ksz_device *dev = ds->priv; - struct ksz_port *p = &dev->ports[port]; - u8 data; - - ksz_pread8(dev, port, P_STP_CTRL, &data); - data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE); - - switch (state) { - case BR_STATE_DISABLED: - data |= PORT_LEARN_DISABLE; - break; - case BR_STATE_LISTENING: - data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE); - break; - case BR_STATE_LEARNING: - data |= PORT_RX_ENABLE; - break; - case BR_STATE_FORWARDING: - data |= (PORT_TX_ENABLE | PORT_RX_ENABLE); - break; - case BR_STATE_BLOCKING: - data |= PORT_LEARN_DISABLE; - break; - default: - dev_err(ds->dev, "invalid STP state: %d\n", state); - return; - } - - ksz_pwrite8(dev, port, P_STP_CTRL, data); - p->stp_state = state; - - ksz_update_port_member(dev, port); + ksz_port_stp_state_set(ds, port, state, P_STP_CTRL); } static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port) @@ -1493,7 +1368,7 @@ static const struct dsa_switch_ops ksz9477_switch_ops = { .port_mdb_del = ksz9477_port_mdb_del, .port_mirror_add = ksz9477_port_mirror_add, .port_mirror_del = ksz9477_port_mirror_del, - .get_stats64 = ksz9477_get_stats64, + .get_stats64 = ksz_get_stats64, .port_change_mtu = ksz9477_change_mtu, .port_max_mtu = ksz9477_max_mtu, }; @@ -1684,7 +1559,7 @@ static const struct ksz_dev_ops ksz9477_dev_ops = { .port_setup = ksz9477_port_setup, .r_mib_cnt = ksz9477_r_mib_cnt, .r_mib_pkt = ksz9477_r_mib_pkt, - .r_mib_stat64 = ksz9477_r_mib_stats64, + .r_mib_stat64 = ksz_r_mib_stats64, .freeze_mib = ksz9477_freeze_mib, .port_init_cnt = ksz9477_port_init_cnt, .shutdown = ksz9477_reset_switch, diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h index 0bd58467181f..7a2c8d4767af 100644 --- a/drivers/net/dsa/microchip/ksz9477_reg.h +++ b/drivers/net/dsa/microchip/ksz9477_reg.h @@ -1586,10 +1586,6 @@ #define REG_PORT_LUE_MSTP_STATE 0x0B04 -#define PORT_TX_ENABLE BIT(2) -#define PORT_RX_ENABLE BIT(1) -#define PORT_LEARN_DISABLE BIT(0) - /* C - PTP */ #define REG_PTP_PORT_RX_DELAY__2 0x0C00 diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 8014b18d9391..10f127b09e58 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -20,6 +20,102 @@ #include "ksz_common.h" +struct ksz_stats_raw { + u64 rx_hi; + u64 rx_undersize; + u64 rx_fragments; + u64 rx_oversize; + u64 rx_jabbers; + u64 rx_symbol_err; + u64 rx_crc_err; + u64 rx_align_err; + u64 rx_mac_ctrl; + u64 rx_pause; + u64 rx_bcast; + u64 rx_mcast; + u64 rx_ucast; + u64 rx_64_or_less; + u64 rx_65_127; + u64 rx_128_255; + u64 rx_256_511; + u64 rx_512_1023; + u64 rx_1024_1522; + u64 rx_1523_2000; + u64 rx_2001; + u64 tx_hi; + u64 tx_late_col; + u64 tx_pause; + u64 tx_bcast; + u64 tx_mcast; + u64 tx_ucast; + u64 tx_deferred; + u64 tx_total_col; + u64 tx_exc_col; + u64 tx_single_col; + u64 tx_mult_col; + u64 rx_total; + u64 tx_total; + u64 rx_discards; + u64 tx_discards; +}; + +void ksz_r_mib_stats64(struct ksz_device *dev, int port) +{ + struct rtnl_link_stats64 *stats; + struct ksz_stats_raw *raw; + struct ksz_port_mib *mib; + + mib = &dev->ports[port].mib; + stats = &mib->stats64; + raw = (struct ksz_stats_raw *)mib->counters; + + spin_lock(&mib->stats64_lock); + + stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast; + stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast; + + /* HW counters are counting bytes + FCS which is not acceptable + * for rtnl_link_stats64 interface + */ + stats->rx_bytes = raw->rx_total - stats->rx_packets * ETH_FCS_LEN; + stats->tx_bytes = raw->tx_total - stats->tx_packets * ETH_FCS_LEN; + + stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments + + raw->rx_oversize; + + stats->rx_crc_errors = raw->rx_crc_err; + stats->rx_frame_errors = raw->rx_align_err; + stats->rx_dropped = raw->rx_discards; + stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors + + stats->rx_frame_errors + stats->rx_dropped; + + stats->tx_window_errors = raw->tx_late_col; + stats->tx_fifo_errors = raw->tx_discards; + stats->tx_aborted_errors = raw->tx_exc_col; + stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors + + stats->tx_aborted_errors; + + stats->multicast = raw->rx_mcast; + stats->collisions = raw->tx_total_col; + + spin_unlock(&mib->stats64_lock); +} +EXPORT_SYMBOL_GPL(ksz_r_mib_stats64); + +void ksz_get_stats64(struct dsa_switch *ds, int port, + struct rtnl_link_stats64 *s) +{ + struct ksz_device *dev = ds->priv; + struct ksz_port_mib *mib; + + mib = &dev->ports[port].mib; + + spin_lock(&mib->stats64_lock); + memcpy(s, &mib->stats64, sizeof(*s)); + spin_unlock(&mib->stats64_lock); +} +EXPORT_SYMBOL_GPL(ksz_get_stats64); + void ksz_update_port_member(struct ksz_device *dev, int port) { struct ksz_port *p = &dev->ports[port]; @@ -372,6 +468,46 @@ int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) } EXPORT_SYMBOL_GPL(ksz_enable_port); +void ksz_port_stp_state_set(struct dsa_switch *ds, int port, + u8 state, int reg) +{ + struct ksz_device *dev = ds->priv; + struct ksz_port *p; + u8 data; + + ksz_pread8(dev, port, reg, &data); + data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE); + + switch (state) { + case BR_STATE_DISABLED: + data |= PORT_LEARN_DISABLE; + break; + case BR_STATE_LISTENING: + data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE); + break; + case BR_STATE_LEARNING: + data |= PORT_RX_ENABLE; + break; + case BR_STATE_FORWARDING: + data |= (PORT_TX_ENABLE | PORT_RX_ENABLE); + break; + case BR_STATE_BLOCKING: + data |= PORT_LEARN_DISABLE; + break; + default: + dev_err(ds->dev, "invalid STP state: %d\n", state); + return; + } + + ksz_pwrite8(dev, port, reg, data); + + p = &dev->ports[port]; + p->stp_state = state; + + ksz_update_port_member(dev, port); +} +EXPORT_SYMBOL_GPL(ksz_port_stp_state_set); + struct ksz_device *ksz_switch_alloc(struct device *base, void *priv) { struct dsa_switch *ds; diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index 485d4a948c38..28cda79b090f 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -151,6 +151,9 @@ int ksz9477_switch_register(struct ksz_device *dev); void ksz_update_port_member(struct ksz_device *dev, int port); void ksz_init_mib_timer(struct ksz_device *dev); +void ksz_r_mib_stats64(struct ksz_device *dev, int port); +void ksz_get_stats64(struct dsa_switch *ds, int port, + struct rtnl_link_stats64 *s); /* Common DSA access functions */ @@ -165,6 +168,8 @@ int ksz_port_bridge_join(struct dsa_switch *ds, int port, struct netlink_ext_ack *extack); void ksz_port_bridge_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge); +void ksz_port_stp_state_set(struct dsa_switch *ds, int port, + u8 state, int reg); void ksz_port_fast_age(struct dsa_switch *ds, int port); int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb, void *data); @@ -292,6 +297,11 @@ static inline void ksz_regmap_unlock(void *__mtx) mutex_unlock(mtx); } +/* STP State Defines */ +#define PORT_TX_ENABLE BIT(2) +#define PORT_RX_ENABLE BIT(1) +#define PORT_LEARN_DISABLE BIT(0) + /* Regmap tables generation */ #define KSZ_SPI_OP_RD 3 #define KSZ_SPI_OP_WR 2 diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 19f0035d4410..e55d962fef9f 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -24,6 +24,11 @@ #include "mt7530.h" +static struct mt753x_pcs *pcs_to_mt753x_pcs(struct phylink_pcs *pcs) +{ + return container_of(pcs, struct mt753x_pcs, pcs); +} + /* String, offset, and register size in bytes if different from 4 bytes */ static const struct mt7530_mib_desc mt7530_mib[] = { MIB_DESC(1, 0x00, "TxDrop"), @@ -2389,35 +2394,30 @@ mt7531_setup(struct dsa_switch *ds) return 0; } -static bool -mt7530_phy_mode_supported(struct dsa_switch *ds, int port, - const struct phylink_link_state *state) +static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) { - struct mt7530_priv *priv = ds->priv; - switch (port) { case 0 ... 4: /* Internal phy */ - if (state->interface != PHY_INTERFACE_MODE_GMII) - return false; + __set_bit(PHY_INTERFACE_MODE_GMII, + config->supported_interfaces); break; + case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */ - if (!phy_interface_mode_is_rgmii(state->interface) && - state->interface != PHY_INTERFACE_MODE_MII && - state->interface != PHY_INTERFACE_MODE_GMII) - return false; + phy_interface_set_rgmii(config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_MII, + config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_GMII, + config->supported_interfaces); break; + case 6: /* 1st cpu port */ - if (state->interface != PHY_INTERFACE_MODE_RGMII && - state->interface != PHY_INTERFACE_MODE_TRGMII) - return false; + __set_bit(PHY_INTERFACE_MODE_RGMII, + config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_TRGMII, + config->supported_interfaces); break; - default: - dev_err(priv->dev, "%s: unsupported port: %i\n", __func__, - port); - return false; } - - return true; } static bool mt7531_is_rgmii_port(struct mt7530_priv *priv, u32 port) @@ -2425,42 +2425,35 @@ static bool mt7531_is_rgmii_port(struct mt7530_priv *priv, u32 port) return (port == 5) && (priv->p5_intf_sel != P5_INTF_SEL_GMAC5_SGMII); } -static bool -mt7531_phy_mode_supported(struct dsa_switch *ds, int port, - const struct phylink_link_state *state) +static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) { struct mt7530_priv *priv = ds->priv; switch (port) { case 0 ... 4: /* Internal phy */ - if (state->interface != PHY_INTERFACE_MODE_GMII) - return false; + __set_bit(PHY_INTERFACE_MODE_GMII, + config->supported_interfaces); break; + case 5: /* 2nd cpu port supports either rgmii or sgmii/8023z */ - if (mt7531_is_rgmii_port(priv, port)) - return phy_interface_mode_is_rgmii(state->interface); + if (mt7531_is_rgmii_port(priv, port)) { + phy_interface_set_rgmii(config->supported_interfaces); + break; + } fallthrough; + case 6: /* 1st cpu port supports sgmii/8023z only */ - if (state->interface != PHY_INTERFACE_MODE_SGMII && - !phy_interface_mode_is_8023z(state->interface)) - return false; + __set_bit(PHY_INTERFACE_MODE_SGMII, + config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, + config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, + config->supported_interfaces); + + config->mac_capabilities |= MAC_2500FD; break; - default: - dev_err(priv->dev, "%s: unsupported port: %i\n", __func__, - port); - return false; } - - return true; -} - -static bool -mt753x_phy_mode_supported(struct dsa_switch *ds, int port, - const struct phylink_link_state *state) -{ - struct mt7530_priv *priv = ds->priv; - - return priv->info->phy_mode_supported(ds, port, state); } static int @@ -2533,30 +2526,11 @@ static int mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port, return 0; } -static void mt7531_sgmii_validate(struct mt7530_priv *priv, int port, - unsigned long *supported) -{ - /* Port5 supports ethier RGMII or SGMII. - * Port6 supports SGMII only. - */ - switch (port) { - case 5: - if (mt7531_is_rgmii_port(priv, port)) - break; - fallthrough; - case 6: - phylink_set(supported, 1000baseX_Full); - phylink_set(supported, 2500baseX_Full); - phylink_set(supported, 2500baseT_Full); - } -} - -static void -mt7531_sgmii_link_up_force(struct dsa_switch *ds, int port, - unsigned int mode, phy_interface_t interface, - int speed, int duplex) +static void mt7531_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, int speed, int duplex) { - struct mt7530_priv *priv = ds->priv; + struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv; + int port = pcs_to_mt753x_pcs(pcs)->port; unsigned int val; /* For adjusting speed and duplex of SGMII force mode. */ @@ -2582,6 +2556,9 @@ mt7531_sgmii_link_up_force(struct dsa_switch *ds, int port, /* MT7531 SGMII 1G force mode can only work in full duplex mode, * no matter MT7531_SGMII_FORCE_HALF_DUPLEX is set or not. + * + * The speed check is unnecessary as the MAC capabilities apply + * this restriction. --rmk */ if ((speed == SPEED_10 || speed == SPEED_100) && duplex != DUPLEX_FULL) @@ -2657,9 +2634,10 @@ static int mt7531_sgmii_setup_mode_an(struct mt7530_priv *priv, int port, return 0; } -static void mt7531_sgmii_restart_an(struct dsa_switch *ds, int port) +static void mt7531_pcs_an_restart(struct phylink_pcs *pcs) { - struct mt7530_priv *priv = ds->priv; + struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv; + int port = pcs_to_mt753x_pcs(pcs)->port; u32 val; /* Only restart AN when AN is enabled */ @@ -2716,6 +2694,24 @@ mt753x_mac_config(struct dsa_switch *ds, int port, unsigned int mode, return priv->info->mac_port_config(ds, port, mode, state->interface); } +static struct phylink_pcs * +mt753x_phylink_mac_select_pcs(struct dsa_switch *ds, int port, + phy_interface_t interface) +{ + struct mt7530_priv *priv = ds->priv; + + switch (interface) { + case PHY_INTERFACE_MODE_TRGMII: + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + return &priv->pcs[port].pcs; + + default: + return NULL; + } +} + static void mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, const struct phylink_link_state *state) @@ -2723,9 +2719,6 @@ mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, struct mt7530_priv *priv = ds->priv; u32 mcr_cur, mcr_new; - if (!mt753x_phy_mode_supported(ds, port, state)) - goto unsupported; - switch (port) { case 0 ... 4: /* Internal phy */ if (state->interface != PHY_INTERFACE_MODE_GMII) @@ -2780,17 +2773,6 @@ unsupported: mt7530_write(priv, MT7530_PMCR_P(port), mcr_new); } -static void -mt753x_phylink_mac_an_restart(struct dsa_switch *ds, int port) -{ - struct mt7530_priv *priv = ds->priv; - - if (!priv->info->mac_pcs_an_restart) - return; - - priv->info->mac_pcs_an_restart(ds, port); -} - static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode, phy_interface_t interface) @@ -2800,16 +2782,13 @@ static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port, mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK); } -static void mt753x_mac_pcs_link_up(struct dsa_switch *ds, int port, - unsigned int mode, phy_interface_t interface, - int speed, int duplex) +static void mt753x_phylink_pcs_link_up(struct phylink_pcs *pcs, + unsigned int mode, + phy_interface_t interface, + int speed, int duplex) { - struct mt7530_priv *priv = ds->priv; - - if (!priv->info->mac_pcs_link_up) - return; - - priv->info->mac_pcs_link_up(ds, port, mode, interface, speed, duplex); + if (pcs->ops->pcs_link_up) + pcs->ops->pcs_link_up(pcs, mode, interface, speed, duplex); } static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port, @@ -2822,8 +2801,6 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port, struct mt7530_priv *priv = ds->priv; u32 mcr; - mt753x_mac_pcs_link_up(ds, port, mode, interface, speed, duplex); - mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK; /* MT753x MAC works in 1G full duplex mode for all up-clocked @@ -2903,81 +2880,51 @@ mt7531_cpu_port_config(struct dsa_switch *ds, int port) return ret; mt7530_write(priv, MT7530_PMCR_P(port), PMCR_CPU_PORT_SETTING(priv->id)); + mt753x_phylink_pcs_link_up(&priv->pcs[port].pcs, MLO_AN_FIXED, + interface, speed, DUPLEX_FULL); mt753x_phylink_mac_link_up(ds, port, MLO_AN_FIXED, interface, NULL, speed, DUPLEX_FULL, true, true); return 0; } -static void -mt7530_mac_port_validate(struct dsa_switch *ds, int port, - unsigned long *supported) +static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) { - if (port == 5) - phylink_set(supported, 1000baseX_Full); -} - -static void mt7531_mac_port_validate(struct dsa_switch *ds, int port, - unsigned long *supported) -{ - struct mt7530_priv *priv = ds->priv; - - mt7531_sgmii_validate(priv, port, supported); -} - -static void -mt753x_phylink_validate(struct dsa_switch *ds, int port, - unsigned long *supported, - struct phylink_link_state *state) -{ - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; struct mt7530_priv *priv = ds->priv; - if (state->interface != PHY_INTERFACE_MODE_NA && - !mt753x_phy_mode_supported(ds, port, state)) { - linkmode_zero(supported); - return; - } - - phylink_set_port_modes(mask); - - if (state->interface != PHY_INTERFACE_MODE_TRGMII && - !phy_interface_mode_is_8023z(state->interface)) { - phylink_set(mask, 10baseT_Half); - phylink_set(mask, 10baseT_Full); - phylink_set(mask, 100baseT_Half); - phylink_set(mask, 100baseT_Full); - phylink_set(mask, Autoneg); - } - - /* This switch only supports 1G full-duplex. */ - if (state->interface != PHY_INTERFACE_MODE_MII) - phylink_set(mask, 1000baseT_Full); + /* This switch only supports full-duplex at 1Gbps */ + config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_10 | MAC_100 | MAC_1000FD; - priv->info->mac_port_validate(ds, port, mask); + /* This driver does not make use of the speed, duplex, pause or the + * advertisement in its mac_config, so it is safe to mark this driver + * as non-legacy. + */ + config->legacy_pre_march2020 = false; - phylink_set(mask, Pause); - phylink_set(mask, Asym_Pause); + priv->info->mac_port_get_caps(ds, port, config); +} - linkmode_and(supported, supported, mask); - linkmode_and(state->advertising, state->advertising, mask); +static int mt753x_pcs_validate(struct phylink_pcs *pcs, + unsigned long *supported, + const struct phylink_link_state *state) +{ + /* Autonegotiation is not supported in TRGMII nor 802.3z modes */ + if (state->interface == PHY_INTERFACE_MODE_TRGMII || + phy_interface_mode_is_8023z(state->interface)) + phylink_clear(supported, Autoneg); - /* We can only operate at 2500BaseX or 1000BaseX. If requested - * to advertise both, only report advertising at 2500BaseX. - */ - phylink_helper_basex_speed(state); + return 0; } -static int -mt7530_phylink_mac_link_state(struct dsa_switch *ds, int port, - struct phylink_link_state *state) +static void mt7530_pcs_get_state(struct phylink_pcs *pcs, + struct phylink_link_state *state) { - struct mt7530_priv *priv = ds->priv; + struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv; + int port = pcs_to_mt753x_pcs(pcs)->port; u32 pmsr; - if (port < 0 || port >= MT7530_NUM_PORTS) - return -EINVAL; - pmsr = mt7530_read(priv, MT7530_PMSR_P(port)); state->link = (pmsr & PMSR_LINK); @@ -3004,8 +2951,6 @@ mt7530_phylink_mac_link_state(struct dsa_switch *ds, int port, state->pause |= MLO_PAUSE_RX; if (pmsr & PMSR_TX_FC) state->pause |= MLO_PAUSE_TX; - - return 1; } static int @@ -3047,33 +2992,59 @@ mt7531_sgmii_pcs_get_state_an(struct mt7530_priv *priv, int port, return 0; } -static int -mt7531_phylink_mac_link_state(struct dsa_switch *ds, int port, - struct phylink_link_state *state) +static void mt7531_pcs_get_state(struct phylink_pcs *pcs, + struct phylink_link_state *state) { - struct mt7530_priv *priv = ds->priv; + struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv; + int port = pcs_to_mt753x_pcs(pcs)->port; if (state->interface == PHY_INTERFACE_MODE_SGMII) - return mt7531_sgmii_pcs_get_state_an(priv, port, state); - - return -EOPNOTSUPP; + mt7531_sgmii_pcs_get_state_an(priv, port, state); + else + state->link = false; } -static int -mt753x_phylink_mac_link_state(struct dsa_switch *ds, int port, - struct phylink_link_state *state) +static int mt753x_pcs_config(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) { - struct mt7530_priv *priv = ds->priv; + return 0; +} - return priv->info->mac_port_get_state(ds, port, state); +static void mt7530_pcs_an_restart(struct phylink_pcs *pcs) +{ } +static const struct phylink_pcs_ops mt7530_pcs_ops = { + .pcs_validate = mt753x_pcs_validate, + .pcs_get_state = mt7530_pcs_get_state, + .pcs_config = mt753x_pcs_config, + .pcs_an_restart = mt7530_pcs_an_restart, +}; + +static const struct phylink_pcs_ops mt7531_pcs_ops = { + .pcs_validate = mt753x_pcs_validate, + .pcs_get_state = mt7531_pcs_get_state, + .pcs_config = mt753x_pcs_config, + .pcs_an_restart = mt7531_pcs_an_restart, + .pcs_link_up = mt7531_pcs_link_up, +}; + static int mt753x_setup(struct dsa_switch *ds) { struct mt7530_priv *priv = ds->priv; - int ret = priv->info->sw_setup(ds); + int i, ret; + + /* Initialise the PCS devices */ + for (i = 0; i < priv->ds->num_ports; i++) { + priv->pcs[i].pcs.ops = priv->info->pcs_ops; + priv->pcs[i].priv = priv; + priv->pcs[i].port = i; + } + ret = priv->info->sw_setup(ds); if (ret) return ret; @@ -3144,10 +3115,9 @@ static const struct dsa_switch_ops mt7530_switch_ops = { .port_vlan_del = mt7530_port_vlan_del, .port_mirror_add = mt753x_port_mirror_add, .port_mirror_del = mt753x_port_mirror_del, - .phylink_validate = mt753x_phylink_validate, - .phylink_mac_link_state = mt753x_phylink_mac_link_state, + .phylink_get_caps = mt753x_phylink_get_caps, + .phylink_mac_select_pcs = mt753x_phylink_mac_select_pcs, .phylink_mac_config = mt753x_phylink_mac_config, - .phylink_mac_an_restart = mt753x_phylink_mac_an_restart, .phylink_mac_link_down = mt753x_phylink_mac_link_down, .phylink_mac_link_up = mt753x_phylink_mac_link_up, .get_mac_eee = mt753x_get_mac_eee, @@ -3157,39 +3127,34 @@ static const struct dsa_switch_ops mt7530_switch_ops = { static const struct mt753x_info mt753x_table[] = { [ID_MT7621] = { .id = ID_MT7621, + .pcs_ops = &mt7530_pcs_ops, .sw_setup = mt7530_setup, .phy_read = mt7530_phy_read, .phy_write = mt7530_phy_write, .pad_setup = mt7530_pad_clk_setup, - .phy_mode_supported = mt7530_phy_mode_supported, - .mac_port_validate = mt7530_mac_port_validate, - .mac_port_get_state = mt7530_phylink_mac_link_state, + .mac_port_get_caps = mt7530_mac_port_get_caps, .mac_port_config = mt7530_mac_config, }, [ID_MT7530] = { .id = ID_MT7530, + .pcs_ops = &mt7530_pcs_ops, .sw_setup = mt7530_setup, .phy_read = mt7530_phy_read, .phy_write = mt7530_phy_write, .pad_setup = mt7530_pad_clk_setup, - .phy_mode_supported = mt7530_phy_mode_supported, - .mac_port_validate = mt7530_mac_port_validate, - .mac_port_get_state = mt7530_phylink_mac_link_state, + .mac_port_get_caps = mt7530_mac_port_get_caps, .mac_port_config = mt7530_mac_config, }, [ID_MT7531] = { .id = ID_MT7531, + .pcs_ops = &mt7531_pcs_ops, .sw_setup = mt7531_setup, .phy_read = mt7531_ind_phy_read, .phy_write = mt7531_ind_phy_write, .pad_setup = mt7531_pad_setup, .cpu_port_config = mt7531_cpu_port_config, - .phy_mode_supported = mt7531_phy_mode_supported, - .mac_port_validate = mt7531_mac_port_validate, - .mac_port_get_state = mt7531_phylink_mac_link_state, + .mac_port_get_caps = mt7531_mac_port_get_caps, .mac_port_config = mt7531_mac_config, - .mac_pcs_an_restart = mt7531_sgmii_restart_an, - .mac_pcs_link_up = mt7531_sgmii_link_up_force, }, }; @@ -3246,9 +3211,8 @@ mt7530_probe(struct mdio_device *mdiodev) */ if (!priv->info->sw_setup || !priv->info->pad_setup || !priv->info->phy_read || !priv->info->phy_write || - !priv->info->phy_mode_supported || - !priv->info->mac_port_validate || - !priv->info->mac_port_get_state || !priv->info->mac_port_config) + !priv->info->mac_port_get_caps || + !priv->info->mac_port_config) return -EINVAL; priv->id = priv->info->id; diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index 91508e2feef9..71e36b69b96d 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -741,6 +741,12 @@ static const char *p5_intf_modes(unsigned int p5_interface) struct mt7530_priv; +struct mt753x_pcs { + struct phylink_pcs pcs; + struct mt7530_priv *priv; + int port; +}; + /* struct mt753x_info - This is the main data structure for holding the specific * part for each supported device * @sw_setup: Holding the handler to a device initialization @@ -752,36 +758,27 @@ struct mt7530_priv; * port * @mac_port_validate: Holding the way to set addition validate type for a * certan MAC port - * @mac_port_get_state: Holding the way getting the MAC/PCS state for a certain - * MAC port * @mac_port_config: Holding the way setting up the PHY attribute to a * certain MAC port - * @mac_pcs_an_restart Holding the way restarting PCS autonegotiation for a - * certain MAC port - * @mac_pcs_link_up: Holding the way setting up the PHY attribute to the pcs - * of the certain MAC port */ struct mt753x_info { enum mt753x_id id; + const struct phylink_pcs_ops *pcs_ops; + int (*sw_setup)(struct dsa_switch *ds); int (*phy_read)(struct mt7530_priv *priv, int port, int regnum); int (*phy_write)(struct mt7530_priv *priv, int port, int regnum, u16 val); int (*pad_setup)(struct dsa_switch *ds, phy_interface_t interface); int (*cpu_port_config)(struct dsa_switch *ds, int port); - bool (*phy_mode_supported)(struct dsa_switch *ds, int port, - const struct phylink_link_state *state); + void (*mac_port_get_caps)(struct dsa_switch *ds, int port, + struct phylink_config *config); void (*mac_port_validate)(struct dsa_switch *ds, int port, + phy_interface_t interface, unsigned long *supported); - int (*mac_port_get_state)(struct dsa_switch *ds, int port, - struct phylink_link_state *state); int (*mac_port_config)(struct dsa_switch *ds, int port, unsigned int mode, phy_interface_t interface); - void (*mac_pcs_an_restart)(struct dsa_switch *ds, int port); - void (*mac_pcs_link_up)(struct dsa_switch *ds, int port, - unsigned int mode, phy_interface_t interface, - int speed, int duplex); }; /* struct mt7530_priv - This is the main data structure for holding the state @@ -823,6 +820,7 @@ struct mt7530_priv { u8 mirror_tx; struct mt7530_port ports[MT7530_NUM_PORTS]; + struct mt753x_pcs pcs[MT7530_NUM_PORTS]; /* protect among processes for registers access*/ struct mutex reg_mutex; int irq; diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 64f4fdd02902..53fd12e7a21c 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -6276,6 +6276,32 @@ static int mv88e6xxx_detect(struct mv88e6xxx_chip *chip) return 0; } +static int mv88e6xxx_single_chip_detect(struct mv88e6xxx_chip *chip, + struct mdio_device *mdiodev) +{ + int err; + + /* dual_chip takes precedence over single/multi-chip modes */ + if (chip->info->dual_chip) + return -EINVAL; + + /* If the mdio addr is 16 indicating the first port address of a switch + * (e.g. mv88e6*41) in single chip addressing mode the device may be + * configured in single chip addressing mode. Setup the smi access as + * single chip addressing mode and attempt to detect the model of the + * switch, if this fails the device is not configured in single chip + * addressing mode. + */ + if (mdiodev->addr != 16) + return -EINVAL; + + err = mv88e6xxx_smi_init(chip, mdiodev->bus, 0); + if (err) + return err; + + return mv88e6xxx_detect(chip); +} + static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev) { struct mv88e6xxx_chip *chip; @@ -6830,11 +6856,11 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .port_vlan_add = mv88e6xxx_port_vlan_add, .port_vlan_del = mv88e6xxx_port_vlan_del, .vlan_msti_set = mv88e6xxx_vlan_msti_set, - .port_fdb_add = mv88e6xxx_port_fdb_add, - .port_fdb_del = mv88e6xxx_port_fdb_del, - .port_fdb_dump = mv88e6xxx_port_fdb_dump, - .port_mdb_add = mv88e6xxx_port_mdb_add, - .port_mdb_del = mv88e6xxx_port_mdb_del, + .port_fdb_add = mv88e6xxx_port_fdb_add, + .port_fdb_del = mv88e6xxx_port_fdb_del, + .port_fdb_dump = mv88e6xxx_port_fdb_dump, + .port_mdb_add = mv88e6xxx_port_mdb_add, + .port_mdb_del = mv88e6xxx_port_mdb_del, .port_mirror_add = mv88e6xxx_port_mirror_add, .port_mirror_del = mv88e6xxx_port_mirror_del, .crosschip_bridge_join = mv88e6xxx_crosschip_bridge_join, @@ -6959,10 +6985,6 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) chip->info = compat_info; - err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr); - if (err) - goto out; - chip->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(chip->reset)) { err = PTR_ERR(chip->reset); @@ -6971,9 +6993,19 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) if (chip->reset) usleep_range(1000, 2000); - err = mv88e6xxx_detect(chip); - if (err) - goto out; + /* Detect if the device is configured in single chip addressing mode, + * otherwise continue with address specific smi init/detection. + */ + err = mv88e6xxx_single_chip_detect(chip, mdiodev); + if (err) { + err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr); + if (err) + goto out; + + err = mv88e6xxx_detect(chip); + if (err) + goto out; + } if (chip->info->edsa_support == MV88E6XXX_EDSA_SUPPORTED) chip->tag_protocol = DSA_TAG_PROTO_EDSA; diff --git a/drivers/net/dsa/mv88e6xxx/port_hidden.c b/drivers/net/dsa/mv88e6xxx/port_hidden.c index b49d05f0e117..7a9f9ff6dedf 100644 --- a/drivers/net/dsa/mv88e6xxx/port_hidden.c +++ b/drivers/net/dsa/mv88e6xxx/port_hidden.c @@ -40,8 +40,9 @@ int mv88e6xxx_port_hidden_wait(struct mv88e6xxx_chip *chip) { int bit = __bf_shf(MV88E6XXX_PORT_RESERVED_1A_BUSY); - return mv88e6xxx_wait_bit(chip, MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT, - MV88E6XXX_PORT_RESERVED_1A, bit, 0); + return mv88e6xxx_port_wait_bit(chip, + MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT, + MV88E6XXX_PORT_RESERVED_1A, bit, 0); } int mv88e6xxx_port_hidden_read(struct mv88e6xxx_chip *chip, int block, int port, diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 413b0006e9a2..33cb124ca912 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -670,6 +670,8 @@ static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu, struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); enum dsa_tag_protocol old_proto = felix->tag_proto; + bool cpu_port_active = false; + struct dsa_port *dp; int err; if (proto != DSA_TAG_PROTO_SEVILLE && @@ -677,6 +679,27 @@ static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu, proto != DSA_TAG_PROTO_OCELOT_8021Q) return -EPROTONOSUPPORT; + /* We don't support multiple CPU ports, yet the DT blob may have + * multiple CPU ports defined. The first CPU port is the active one, + * the others are inactive. In this case, DSA will call + * ->change_tag_protocol() multiple times, once per CPU port. + * Since we implement the tagging protocol change towards "ocelot" or + * "seville" as effectively initializing the NPI port, what we are + * doing is effectively changing who the NPI port is to the last @cpu + * argument passed, which is an unused DSA CPU port and not the one + * that should actively pass traffic. + * Suppress DSA's calls on CPU ports that are inactive. + */ + dsa_switch_for_each_user_port(dp, ds) { + if (dp->cpu_dp->index == cpu) { + cpu_port_active = true; + break; + } + } + + if (!cpu_port_active) + return 0; + felix_del_tag_protocol(ds, cpu, old_proto); err = felix_set_tag_protocol(ds, cpu, proto); @@ -1174,7 +1197,6 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) ocelot->map = felix->info->map; ocelot->stats_layout = felix->info->stats_layout; - ocelot->num_stats = felix->info->num_stats; ocelot->num_mact_rows = felix->info->num_mact_rows; ocelot->vcap = felix->info->vcap; ocelot->vcap_pol.base = felix->info->vcap_pol_base; diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h index f083b06fdfe9..39faf1027965 100644 --- a/drivers/net/dsa/ocelot/felix.h +++ b/drivers/net/dsa/ocelot/felix.h @@ -24,7 +24,6 @@ struct felix_info { const u32 *port_modes; int num_mact_rows; const struct ocelot_stat_layout *stats_layout; - unsigned int num_stats; int num_ports; int num_tx_queues; struct vcap_props *vcap; diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 8d382b27e625..081871824eaf 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -638,6 +638,7 @@ static const struct ocelot_stat_layout vsc9959_stats_layout[] = { { .offset = 0x10F, .name = "drop_green_prio_5", }, { .offset = 0x110, .name = "drop_green_prio_6", }, { .offset = 0x111, .name = "drop_green_prio_7", }, + OCELOT_STAT_END }; static const struct vcap_field vsc9959_vcap_es0_keys[] = { @@ -2216,7 +2217,6 @@ static const struct felix_info felix_info_vsc9959 = { .map = vsc9959_regmap, .ops = &vsc9959_ops, .stats_layout = vsc9959_stats_layout, - .num_stats = ARRAY_SIZE(vsc9959_stats_layout), .vcap = vsc9959_vcap_props, .vcap_pol_base = VSC9959_VCAP_POLICER_BASE, .vcap_pol_max = VSC9959_VCAP_POLICER_MAX, @@ -2316,7 +2316,7 @@ static int felix_pci_probe(struct pci_dev *pdev, err = dsa_register_switch(ds); if (err) { - dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err); + dev_err_probe(&pdev->dev, err, "Failed to register DSA switch\n"); goto err_register_ds; } diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c index 68ef8f111bbe..48fd43a93364 100644 --- a/drivers/net/dsa/ocelot/seville_vsc9953.c +++ b/drivers/net/dsa/ocelot/seville_vsc9953.c @@ -636,6 +636,7 @@ static const struct ocelot_stat_layout vsc9953_stats_layout[] = { { .offset = 0x8F, .name = "drop_green_prio_5", }, { .offset = 0x90, .name = "drop_green_prio_6", }, { .offset = 0x91, .name = "drop_green_prio_7", }, + OCELOT_STAT_END }; static const struct vcap_field vsc9953_vcap_es0_keys[] = { @@ -1086,7 +1087,6 @@ static const struct felix_info seville_info_vsc9953 = { .map = vsc9953_regmap, .ops = &vsc9953_ops, .stats_layout = vsc9953_stats_layout, - .num_stats = ARRAY_SIZE(vsc9953_stats_layout), .vcap = vsc9953_vcap_props, .vcap_pol_base = VSC9953_VCAP_POLICER_BASE, .vcap_pol_max = VSC9953_VCAP_POLICER_MAX, diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index d3ed0a7f8077..2727d3169c25 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -1287,87 +1287,71 @@ qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum) if (ret >= 0) return ret; - return qca8k_mdio_read(priv, phy, regnum); + ret = qca8k_mdio_read(priv, phy, regnum); + + if (ret < 0) + return 0xffff; + + return ret; } static int -qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data) +qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data) { - struct qca8k_priv *priv = ds->priv; - int ret; + port = qca8k_port_to_phy(port) % PHY_MAX_ADDR; - /* Check if the legacy mapping should be used and the - * port is not correctly mapped to the right PHY in the - * devicetree - */ - if (priv->legacy_phy_port_mapping) - port = qca8k_port_to_phy(port) % PHY_MAX_ADDR; - - /* Use mdio Ethernet when available, fallback to legacy one on error */ - ret = qca8k_phy_eth_command(priv, false, port, regnum, 0); - if (!ret) - return ret; - - return qca8k_mdio_write(priv, port, regnum, data); + return qca8k_internal_mdio_write(slave_bus, port, regnum, data); } static int -qca8k_phy_read(struct dsa_switch *ds, int port, int regnum) +qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum) { - struct qca8k_priv *priv = ds->priv; - int ret; + port = qca8k_port_to_phy(port) % PHY_MAX_ADDR; - /* Check if the legacy mapping should be used and the - * port is not correctly mapped to the right PHY in the - * devicetree - */ - if (priv->legacy_phy_port_mapping) - port = qca8k_port_to_phy(port) % PHY_MAX_ADDR; - - /* Use mdio Ethernet when available, fallback to legacy one on error */ - ret = qca8k_phy_eth_command(priv, true, port, regnum, 0); - if (ret >= 0) - return ret; - - ret = qca8k_mdio_read(priv, port, regnum); - - if (ret < 0) - return 0xffff; - - return ret; + return qca8k_internal_mdio_read(slave_bus, port, regnum); } static int -qca8k_mdio_register(struct qca8k_priv *priv, struct device_node *mdio) +qca8k_mdio_register(struct qca8k_priv *priv) { struct dsa_switch *ds = priv->ds; + struct device_node *mdio; struct mii_bus *bus; bus = devm_mdiobus_alloc(ds->dev); - if (!bus) return -ENOMEM; bus->priv = (void *)priv; - bus->name = "qca8k slave mii"; - bus->read = qca8k_internal_mdio_read; - bus->write = qca8k_internal_mdio_write; - snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d", - ds->index); - + snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d", + ds->dst->index, ds->index); bus->parent = ds->dev; bus->phy_mask = ~ds->phys_mii_mask; - ds->slave_mii_bus = bus; - return devm_of_mdiobus_register(priv->dev, bus, mdio); + /* Check if the devicetree declare the port:phy mapping */ + mdio = of_get_child_by_name(priv->dev->of_node, "mdio"); + if (of_device_is_available(mdio)) { + bus->name = "qca8k slave mii"; + bus->read = qca8k_internal_mdio_read; + bus->write = qca8k_internal_mdio_write; + return devm_of_mdiobus_register(priv->dev, bus, mdio); + } + + /* If a mapping can't be found the legacy mapping is used, + * using the qca8k_port_to_phy function + */ + bus->name = "qca8k-legacy slave mii"; + bus->read = qca8k_legacy_mdio_read; + bus->write = qca8k_legacy_mdio_write; + return devm_mdiobus_register(priv->dev, bus); } static int qca8k_setup_mdio_bus(struct qca8k_priv *priv) { u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg; - struct device_node *ports, *port, *mdio; + struct device_node *ports, *port; phy_interface_t mode; int err; @@ -1429,24 +1413,7 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv) QCA8K_MDIO_MASTER_EN); } - /* Check if the devicetree declare the port:phy mapping */ - mdio = of_get_child_by_name(priv->dev->of_node, "mdio"); - if (of_device_is_available(mdio)) { - err = qca8k_mdio_register(priv, mdio); - if (err) - of_node_put(mdio); - - return err; - } - - /* If a mapping can't be found the legacy mapping is used, - * using the qca8k_port_to_phy function - */ - priv->legacy_phy_port_mapping = true; - priv->ops.phy_read = qca8k_phy_read; - priv->ops.phy_write = qca8k_phy_write; - - return 0; + return qca8k_mdio_register(priv); } static int @@ -2346,7 +2313,7 @@ qca8k_port_enable(struct dsa_switch *ds, int port, struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; qca8k_port_set_status(priv, port, 1); - priv->port_sts[port].enabled = 1; + priv->port_enabled_map |= BIT(port); if (dsa_is_user_port(ds, port)) phy_support_asym_pause(phy); @@ -2360,23 +2327,25 @@ qca8k_port_disable(struct dsa_switch *ds, int port) struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; qca8k_port_set_status(priv, port, 0); - priv->port_sts[port].enabled = 0; + priv->port_enabled_map &= ~BIT(port); } static int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) { struct qca8k_priv *priv = ds->priv; - int i, mtu = 0; - priv->port_mtu[port] = new_mtu; - - for (i = 0; i < QCA8K_NUM_PORTS; i++) - if (priv->port_mtu[i] > mtu) - mtu = priv->port_mtu[i]; + /* We have only have a general MTU setting. + * DSA always set the CPU port's MTU to the largest MTU of the slave + * ports. + * Setting MTU just for the CPU port is sufficient to correctly set a + * value for every port. + */ + if (!dsa_is_cpu_port(ds, port)) + return 0; /* Include L2 header / FCS length */ - return qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, mtu + ETH_HLEN + ETH_FCS_LEN); + return qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN); } static int @@ -3033,16 +3002,6 @@ qca8k_setup(struct dsa_switch *ds) QCA8K_PORT_HOL_CTRL1_WRED_EN, mask); } - - /* Set initial MTU for every port. - * We have only have a general MTU setting. So track - * every port and set the max across all port. - * Set per port MTU to 1500 as the MTU change function - * will add the overhead and if its set to 1518 then it - * will apply the overhead again and we will end up with - * MTU of 1536 instead of 1518 - */ - priv->port_mtu[i] = ETH_DATA_LEN; } /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */ @@ -3202,8 +3161,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev) priv->ds->dev = &mdiodev->dev; priv->ds->num_ports = QCA8K_NUM_PORTS; priv->ds->priv = priv; - priv->ops = qca8k_switch_ops; - priv->ds->ops = &priv->ops; + priv->ds->ops = &qca8k_switch_ops; mutex_init(&priv->reg_mutex); dev_set_drvdata(&mdiodev->dev, priv); @@ -3243,13 +3201,16 @@ static void qca8k_sw_shutdown(struct mdio_device *mdiodev) static void qca8k_set_pm(struct qca8k_priv *priv, int enable) { - int i; + int port; - for (i = 0; i < QCA8K_NUM_PORTS; i++) { - if (!priv->port_sts[i].enabled) + for (port = 0; port < QCA8K_NUM_PORTS; port++) { + /* Do not enable on resume if the port was + * disabled before. + */ + if (!(priv->port_enabled_map & BIT(port))) continue; - qca8k_port_set_status(priv, i, enable); + qca8k_port_set_status(priv, port, enable); } } diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index f375627174c8..04408e11402a 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -324,10 +324,6 @@ enum qca8k_mid_cmd { QCA8K_MIB_CAST = 3, }; -struct ar8xxx_port_status { - int enabled; -}; - struct qca8k_match_data { u8 id; bool reduced_package; @@ -388,17 +384,17 @@ struct qca8k_priv { u8 mirror_rx; u8 mirror_tx; u8 lag_hash_mode; - bool legacy_phy_port_mapping; + /* Each bit correspond to a port. This switch can support a max of 7 port. + * Bit 1: port enabled. Bit 0: port disabled. + */ + u8 port_enabled_map; struct qca8k_ports_config ports_config; struct regmap *regmap; struct mii_bus *bus; - struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS]; struct dsa_switch *ds; struct mutex reg_mutex; struct device *dev; - struct dsa_switch_ops ops; struct gpio_desc *reset_gpio; - unsigned int port_mtu[QCA8K_NUM_PORTS]; struct net_device *mgmt_master; /* Track if mdio/mib Ethernet is available */ struct qca8k_mgmt_eth_data mgmt_eth_data; struct qca8k_mib_eth_data mib_eth_data; diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig index 1aa79735355f..060165a85fb7 100644 --- a/drivers/net/dsa/realtek/Kconfig +++ b/drivers/net/dsa/realtek/Kconfig @@ -9,34 +9,46 @@ menuconfig NET_DSA_REALTEK help Select to enable support for Realtek Ethernet switch chips. + Note that at least one interface driver must be enabled for the + subdrivers to be loaded. Moreover, an interface driver cannot achieve + anything without at least one subdriver enabled. + +if NET_DSA_REALTEK + config NET_DSA_REALTEK_MDIO - tristate "Realtek MDIO connected switch driver" - depends on NET_DSA_REALTEK + tristate "Realtek MDIO interface driver" depends on OF + depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB + depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB + depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB help Select to enable support for registering switches configured through MDIO. config NET_DSA_REALTEK_SMI - tristate "Realtek SMI connected switch driver" - depends on NET_DSA_REALTEK + tristate "Realtek SMI interface driver" depends on OF + depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB + depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB + depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB help Select to enable support for registering switches connected through SMI. config NET_DSA_REALTEK_RTL8365MB tristate "Realtek RTL8365MB switch subdriver" - depends on NET_DSA_REALTEK - depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO + imply NET_DSA_REALTEK_SMI + imply NET_DSA_REALTEK_MDIO select NET_DSA_TAG_RTL8_4 help Select to enable support for Realtek RTL8365MB-VC and RTL8367S. config NET_DSA_REALTEK_RTL8366RB tristate "Realtek RTL8366RB switch subdriver" - depends on NET_DSA_REALTEK - depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO + imply NET_DSA_REALTEK_SMI + imply NET_DSA_REALTEK_MDIO select NET_DSA_TAG_RTL4_A help - Select to enable support for Realtek RTL8366RB + Select to enable support for Realtek RTL8366RB. + +endif diff --git a/drivers/net/dsa/realtek/realtek-mdio.c b/drivers/net/dsa/realtek/realtek-mdio.c index 31e1f100e48e..c58f49d558d2 100644 --- a/drivers/net/dsa/realtek/realtek-mdio.c +++ b/drivers/net/dsa/realtek/realtek-mdio.c @@ -267,7 +267,6 @@ static const struct of_device_id realtek_mdio_of_match[] = { #endif #if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB) { .compatible = "realtek,rtl8365mb", .data = &rtl8365mb_variant, }, - { .compatible = "realtek,rtl8367s", .data = &rtl8365mb_variant, }, #endif { /* sentinel */ }, }; diff --git a/drivers/net/dsa/realtek/realtek-smi.c b/drivers/net/dsa/realtek/realtek-smi.c index 2243d3da55b2..45992f79ec8d 100644 --- a/drivers/net/dsa/realtek/realtek-smi.c +++ b/drivers/net/dsa/realtek/realtek-smi.c @@ -546,20 +546,11 @@ static const struct of_device_id realtek_smi_of_match[] = { .data = &rtl8366rb_variant, }, #endif - { - /* FIXME: add support for RTL8366S and more */ - .compatible = "realtek,rtl8366s", - .data = NULL, - }, #if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB) { .compatible = "realtek,rtl8365mb", .data = &rtl8365mb_variant, }, - { - .compatible = "realtek,rtl8367s", - .data = &rtl8365mb_variant, - }, #endif { /* sentinel */ }, }; diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index b33841c6507a..72b6fc1932b5 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -2252,14 +2252,13 @@ int sja1105_static_config_reload(struct sja1105_private *priv, * change it through the dynamic interface later. */ for (i = 0; i < ds->num_ports; i++) { - u32 reg_addr = mdiobus_c45_addr(MDIO_MMD_VEND2, MDIO_CTRL1); - speed_mbps[i] = sja1105_port_speed_to_ethtool(priv, mac[i].speed); mac[i].speed = priv->info->port_speed[SJA1105_SPEED_AUTO]; if (priv->xpcs[i]) - bmcr[i] = mdiobus_read(priv->mdio_pcs, i, reg_addr); + bmcr[i] = mdiobus_c45_read(priv->mdio_pcs, i, + MDIO_MMD_VEND2, MDIO_CTRL1); } /* No PTP operations can run right now */ diff --git a/drivers/net/eql.c b/drivers/net/eql.c index 1111d1f33865..557ca8ff9dec 100644 --- a/drivers/net/eql.c +++ b/drivers/net/eql.c @@ -425,14 +425,13 @@ static int eql_enslave(struct net_device *master_dev, slaving_request_t __user * if ((master_dev->flags & IFF_UP) == IFF_UP) { /* slave is not a master & not already a slave: */ if (!eql_is_master(slave_dev) && !eql_is_slave(slave_dev)) { - slave_t *s = kmalloc(sizeof(*s), GFP_KERNEL); + slave_t *s = kzalloc(sizeof(*s), GFP_KERNEL); equalizer_t *eql = netdev_priv(master_dev); int ret; if (!s) return -ENOMEM; - memset(s, 0, sizeof(*s)); s->dev = slave_dev; s->priority = srq.priority; s->priority_bps = srq.priority; diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index bd4cb9d7c35d..827993022386 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -35,15 +35,6 @@ source "drivers/net/ethernet/aquantia/Kconfig" source "drivers/net/ethernet/arc/Kconfig" source "drivers/net/ethernet/asix/Kconfig" source "drivers/net/ethernet/atheros/Kconfig" -source "drivers/net/ethernet/broadcom/Kconfig" -source "drivers/net/ethernet/brocade/Kconfig" -source "drivers/net/ethernet/cadence/Kconfig" -source "drivers/net/ethernet/calxeda/Kconfig" -source "drivers/net/ethernet/cavium/Kconfig" -source "drivers/net/ethernet/chelsio/Kconfig" -source "drivers/net/ethernet/cirrus/Kconfig" -source "drivers/net/ethernet/cisco/Kconfig" -source "drivers/net/ethernet/cortina/Kconfig" config CX_ECAT tristate "Beckhoff CX5020 EtherCAT master support" @@ -57,6 +48,14 @@ config CX_ECAT To compile this driver as a module, choose M here. The module will be called ec_bhf. +source "drivers/net/ethernet/broadcom/Kconfig" +source "drivers/net/ethernet/cadence/Kconfig" +source "drivers/net/ethernet/calxeda/Kconfig" +source "drivers/net/ethernet/cavium/Kconfig" +source "drivers/net/ethernet/chelsio/Kconfig" +source "drivers/net/ethernet/cirrus/Kconfig" +source "drivers/net/ethernet/cisco/Kconfig" +source "drivers/net/ethernet/cortina/Kconfig" source "drivers/net/ethernet/davicom/Kconfig" config DNET @@ -85,7 +84,6 @@ source "drivers/net/ethernet/huawei/Kconfig" source "drivers/net/ethernet/i825xx/Kconfig" source "drivers/net/ethernet/ibm/Kconfig" source "drivers/net/ethernet/intel/Kconfig" -source "drivers/net/ethernet/microsoft/Kconfig" source "drivers/net/ethernet/xscale/Kconfig" config JME @@ -128,8 +126,9 @@ source "drivers/net/ethernet/mediatek/Kconfig" source "drivers/net/ethernet/mellanox/Kconfig" source "drivers/net/ethernet/micrel/Kconfig" source "drivers/net/ethernet/microchip/Kconfig" -source "drivers/net/ethernet/moxa/Kconfig" source "drivers/net/ethernet/mscc/Kconfig" +source "drivers/net/ethernet/microsoft/Kconfig" +source "drivers/net/ethernet/moxa/Kconfig" source "drivers/net/ethernet/myricom/Kconfig" config FEALNX @@ -141,10 +140,10 @@ config FEALNX Say Y here to support the Myson MTD-800 family of PCI-based Ethernet cards. <http://www.myson.com.tw/> +source "drivers/net/ethernet/ni/Kconfig" source "drivers/net/ethernet/natsemi/Kconfig" source "drivers/net/ethernet/neterion/Kconfig" source "drivers/net/ethernet/netronome/Kconfig" -source "drivers/net/ethernet/ni/Kconfig" source "drivers/net/ethernet/8390/Kconfig" source "drivers/net/ethernet/nvidia/Kconfig" source "drivers/net/ethernet/nxp/Kconfig" @@ -164,6 +163,7 @@ source "drivers/net/ethernet/packetengines/Kconfig" source "drivers/net/ethernet/pasemi/Kconfig" source "drivers/net/ethernet/pensando/Kconfig" source "drivers/net/ethernet/qlogic/Kconfig" +source "drivers/net/ethernet/brocade/Kconfig" source "drivers/net/ethernet/qualcomm/Kconfig" source "drivers/net/ethernet/rdc/Kconfig" source "drivers/net/ethernet/realtek/Kconfig" @@ -171,10 +171,10 @@ source "drivers/net/ethernet/renesas/Kconfig" source "drivers/net/ethernet/rocker/Kconfig" source "drivers/net/ethernet/samsung/Kconfig" source "drivers/net/ethernet/seeq/Kconfig" -source "drivers/net/ethernet/sfc/Kconfig" source "drivers/net/ethernet/sgi/Kconfig" source "drivers/net/ethernet/silan/Kconfig" source "drivers/net/ethernet/sis/Kconfig" +source "drivers/net/ethernet/sfc/Kconfig" source "drivers/net/ethernet/smsc/Kconfig" source "drivers/net/ethernet/socionext/Kconfig" source "drivers/net/ethernet/stmicro/Kconfig" diff --git a/drivers/net/ethernet/alacritech/slic.h b/drivers/net/ethernet/alacritech/slic.h index 3add305d34b4..4eecbdfff3ff 100644 --- a/drivers/net/ethernet/alacritech/slic.h +++ b/drivers/net/ethernet/alacritech/slic.h @@ -265,8 +265,6 @@ #define SLIC_NUM_STAT_DESC_ARRAYS 4 #define SLIC_INVALID_STAT_DESC_IDX 0xffffffff -#define SLIC_NAPI_WEIGHT 64 - #define SLIC_UPR_LSTAT 0 #define SLIC_UPR_CONFIG 1 diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c index 1fc9a1cd3ef8..ce353b0c02a3 100644 --- a/drivers/net/ethernet/alacritech/slicoss.c +++ b/drivers/net/ethernet/alacritech/slicoss.c @@ -1803,7 +1803,7 @@ static int slic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto unmap; } - netif_napi_add(dev, &sdev->napi, slic_poll, SLIC_NAPI_WEIGHT); + netif_napi_add(dev, &sdev->napi, slic_poll, NAPI_POLL_WEIGHT); netif_carrier_off(dev); err = register_netdev(dev); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 07444aead3fd..6a356a6cee15 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -31,8 +31,6 @@ MODULE_LICENSE("GPL"); #define ENA_MAX_RINGS min_t(unsigned int, ENA_MAX_NUM_IO_QUEUES, num_possible_cpus()) -#define ENA_NAPI_BUDGET 64 - #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \ NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR) @@ -2270,7 +2268,7 @@ static void ena_init_napi_in_range(struct ena_adapter *adapter, netif_napi_add(adapter->netdev, &napi->napi, ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll, - ENA_NAPI_BUDGET); + NAPI_POLL_WEIGHT); if (!ENA_IS_XDP_INDEX(adapter, i)) { napi->rx_ring = &adapter->rx_ring[i]; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h index 52b9833fda99..7e9c74b141ef 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h @@ -40,6 +40,7 @@ #define AQ_CFG_RX_HDR_SIZE 256U #define AQ_CFG_RX_PAGEORDER 0U +#define AQ_CFG_XDP_PAGEORDER 2U /* LRO */ #define AQ_CFG_IS_LRO_DEF 1U @@ -64,8 +65,6 @@ */ #define AQ_CFG_RESTART_DESC_THRES (AQ_CFG_SKB_FRAGS_MAX * 2) -#define AQ_CFG_NAPI_WEIGHT 64U - /*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/ #define AQ_CFG_FC_MODE AQ_NIC_FC_FULL diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index a418238f6309..1daecd483b8d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -97,6 +97,15 @@ static const char * const aq_ethtool_queue_rx_stat_names[] = { "%sQueue[%d] AllocFails", "%sQueue[%d] SkbAllocFails", "%sQueue[%d] Polls", + "%sQueue[%d] PageFlips", + "%sQueue[%d] PageReuses", + "%sQueue[%d] PageFrees", + "%sQueue[%d] XdpAbort", + "%sQueue[%d] XdpDrop", + "%sQueue[%d] XdpPass", + "%sQueue[%d] XdpTx", + "%sQueue[%d] XdpInvalid", + "%sQueue[%d] XdpRedirect", }; static const char * const aq_ethtool_queue_tx_stat_names[] = { diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index e65ce7199dac..88595863d8bc 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -14,17 +14,22 @@ #include "aq_ptp.h" #include "aq_filters.h" #include "aq_hw_utils.h" +#include "aq_vec.h" #include <linux/netdevice.h> #include <linux/module.h> #include <linux/ip.h> #include <linux/udp.h> #include <net/pkt_cls.h> +#include <linux/filter.h> MODULE_LICENSE("GPL v2"); MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR); MODULE_DESCRIPTION(AQ_CFG_DRV_DESC); +DEFINE_STATIC_KEY_FALSE(aq_xdp_locking_key); +EXPORT_SYMBOL(aq_xdp_locking_key); + static const char aq_ndev_driver_name[] = AQ_CFG_DRV_NAME; static const struct net_device_ops aq_ndev_ops; @@ -126,9 +131,19 @@ static netdev_tx_t aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *nd static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu) { + int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN; struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct bpf_prog *prog; int err; + prog = READ_ONCE(aq_nic->xdp_prog); + if (prog && !prog->aux->xdp_has_frags && + new_frame_size > AQ_CFG_RX_FRAME_MAX) { + netdev_err(ndev, "Illegal MTU %d for XDP prog without frags\n", + ndev->mtu); + return -EOPNOTSUPP; + } + err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN); if (err < 0) @@ -204,6 +219,25 @@ err_exit: return err; } +static netdev_features_t aq_ndev_fix_features(struct net_device *ndev, + netdev_features_t features) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct bpf_prog *prog; + + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; + + prog = READ_ONCE(aq_nic->xdp_prog); + if (prog && !prog->aux->xdp_has_frags && + aq_nic->xdp_prog && features & NETIF_F_LRO) { + netdev_err(ndev, "LRO is not supported with single buffer XDP, disabling\n"); + features &= ~NETIF_F_LRO; + } + + return features; +} + static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr) { struct aq_nic_s *aq_nic = netdev_priv(ndev); @@ -410,6 +444,56 @@ static int aq_ndo_setup_tc(struct net_device *dev, enum tc_setup_type type, mqprio->qopt.prio_tc_map); } +static int aq_xdp_setup(struct net_device *ndev, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + bool need_update, running = netif_running(ndev); + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct bpf_prog *old_prog; + + if (prog && !prog->aux->xdp_has_frags) { + if (ndev->mtu > AQ_CFG_RX_FRAME_MAX) { + NL_SET_ERR_MSG_MOD(extack, + "prog does not support XDP frags"); + return -EOPNOTSUPP; + } + + if (prog && ndev->features & NETIF_F_LRO) { + netdev_err(ndev, + "LRO is not supported with single buffer XDP, disabling\n"); + ndev->features &= ~NETIF_F_LRO; + } + } + + need_update = !!aq_nic->xdp_prog != !!prog; + if (running && need_update) + aq_ndev_close(ndev); + + old_prog = xchg(&aq_nic->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + if (!old_prog && prog) + static_branch_inc(&aq_xdp_locking_key); + else if (old_prog && !prog) + static_branch_dec(&aq_xdp_locking_key); + + if (running && need_update) + return aq_ndev_open(ndev); + + return 0; +} + +static int aq_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + switch (xdp->command) { + case XDP_SETUP_PROG: + return aq_xdp_setup(dev, xdp->prog, xdp->extack); + default: + return -EINVAL; + } +} + static const struct net_device_ops aq_ndev_ops = { .ndo_open = aq_ndev_open, .ndo_stop = aq_ndev_close, @@ -418,10 +502,13 @@ static const struct net_device_ops aq_ndev_ops = { .ndo_change_mtu = aq_ndev_change_mtu, .ndo_set_mac_address = aq_ndev_set_mac_address, .ndo_set_features = aq_ndev_set_features, + .ndo_fix_features = aq_ndev_fix_features, .ndo_eth_ioctl = aq_ndev_ioctl, .ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid, .ndo_setup_tc = aq_ndo_setup_tc, + .ndo_bpf = aq_xdp, + .ndo_xdp_xmit = aq_xdp_xmit, }; static int __init aq_ndev_init_module(void) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h index a5a624b9ce73..99870865f66d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.h @@ -12,6 +12,8 @@ #include "aq_common.h" #include "aq_nic.h" +DECLARE_STATIC_KEY_FALSE(aq_xdp_locking_key); + void aq_ndev_schedule_work(struct work_struct *work); struct net_device *aq_ndev_alloc(void); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 33f1a1377588..e11cc29d3264 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -486,8 +486,8 @@ int aq_nic_start(struct aq_nic_s *self) if (err < 0) goto err_exit; - for (i = 0U, aq_vec = self->aq_vec[0]; - self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { + for (i = 0U; self->aq_vecs > i; ++i) { + aq_vec = self->aq_vec[i]; err = aq_vec_start(aq_vec); if (err < 0) goto err_exit; @@ -517,8 +517,8 @@ int aq_nic_start(struct aq_nic_s *self) mod_timer(&self->polling_timer, jiffies + AQ_CFG_POLLING_TIMER_INTERVAL); } else { - for (i = 0U, aq_vec = self->aq_vec[0]; - self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { + for (i = 0U; self->aq_vecs > i; ++i) { + aq_vec = self->aq_vec[i]; err = aq_pci_func_alloc_irq(self, i, self->ndev->name, aq_vec_isr, aq_vec, aq_vec_get_affinity_mask(aq_vec)); @@ -569,6 +569,103 @@ err_exit: return err; } +static unsigned int aq_nic_map_xdp(struct aq_nic_s *self, + struct xdp_frame *xdpf, + struct aq_ring_s *ring) +{ + struct device *dev = aq_nic_get_dev(self); + struct aq_ring_buff_s *first = NULL; + unsigned int dx = ring->sw_tail; + struct aq_ring_buff_s *dx_buff; + struct skb_shared_info *sinfo; + unsigned int frag_count = 0U; + unsigned int nr_frags = 0U; + unsigned int ret = 0U; + u16 total_len; + + dx_buff = &ring->buff_ring[dx]; + dx_buff->flags = 0U; + + sinfo = xdp_get_shared_info_from_frame(xdpf); + total_len = xdpf->len; + dx_buff->len = total_len; + if (xdp_frame_has_frags(xdpf)) { + nr_frags = sinfo->nr_frags; + total_len += sinfo->xdp_frags_size; + } + dx_buff->pa = dma_map_single(dev, xdpf->data, dx_buff->len, + DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(dev, dx_buff->pa))) + goto exit; + + first = dx_buff; + dx_buff->len_pkt = total_len; + dx_buff->is_sop = 1U; + dx_buff->is_mapped = 1U; + ++ret; + + for (; nr_frags--; ++frag_count) { + skb_frag_t *frag = &sinfo->frags[frag_count]; + unsigned int frag_len = skb_frag_size(frag); + unsigned int buff_offset = 0U; + unsigned int buff_size = 0U; + dma_addr_t frag_pa; + + while (frag_len) { + if (frag_len > AQ_CFG_TX_FRAME_MAX) + buff_size = AQ_CFG_TX_FRAME_MAX; + else + buff_size = frag_len; + + frag_pa = skb_frag_dma_map(dev, frag, buff_offset, + buff_size, DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(dev, frag_pa))) + goto mapping_error; + + dx = aq_ring_next_dx(ring, dx); + dx_buff = &ring->buff_ring[dx]; + + dx_buff->flags = 0U; + dx_buff->len = buff_size; + dx_buff->pa = frag_pa; + dx_buff->is_mapped = 1U; + dx_buff->eop_index = 0xffffU; + + frag_len -= buff_size; + buff_offset += buff_size; + + ++ret; + } + } + + first->eop_index = dx; + dx_buff->is_eop = 1U; + dx_buff->skb = NULL; + dx_buff->xdpf = xdpf; + goto exit; + +mapping_error: + for (dx = ring->sw_tail; + ret > 0; + --ret, dx = aq_ring_next_dx(ring, dx)) { + dx_buff = &ring->buff_ring[dx]; + + if (!dx_buff->pa) + continue; + if (unlikely(dx_buff->is_sop)) + dma_unmap_single(dev, dx_buff->pa, dx_buff->len, + DMA_TO_DEVICE); + else + dma_unmap_page(dev, dx_buff->pa, dx_buff->len, + DMA_TO_DEVICE); + } + +exit: + return ret; +} + unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb, struct aq_ring_s *ring) { @@ -697,6 +794,7 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb, first->eop_index = dx; dx_buff->is_eop = 1U; dx_buff->skb = skb; + dx_buff->xdpf = NULL; goto exit; mapping_error: @@ -725,6 +823,44 @@ exit: return ret; } +int aq_nic_xmit_xdpf(struct aq_nic_s *aq_nic, struct aq_ring_s *tx_ring, + struct xdp_frame *xdpf) +{ + u16 queue_index = AQ_NIC_RING2QMAP(aq_nic, tx_ring->idx); + struct net_device *ndev = aq_nic_get_ndev(aq_nic); + struct skb_shared_info *sinfo; + int cpu = smp_processor_id(); + int err = NETDEV_TX_BUSY; + struct netdev_queue *nq; + unsigned int frags = 1; + + if (xdp_frame_has_frags(xdpf)) { + sinfo = xdp_get_shared_info_from_frame(xdpf); + frags += sinfo->nr_frags; + } + + if (frags > AQ_CFG_SKB_FRAGS_MAX) + return err; + + nq = netdev_get_tx_queue(ndev, tx_ring->idx); + __netif_tx_lock(nq, cpu); + + aq_ring_update_queue_state(tx_ring); + + /* Above status update may stop the queue. Check this. */ + if (__netif_subqueue_stopped(aq_nic_get_ndev(aq_nic), queue_index)) + goto out; + + frags = aq_nic_map_xdp(aq_nic, xdpf, tx_ring); + if (likely(frags)) + err = aq_nic->aq_hw_ops->hw_ring_tx_xmit(aq_nic->aq_hw, tx_ring, + frags); +out: + __netif_tx_unlock(nq); + + return err; +} + int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) { struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 1a7148041e3d..935ba889bd9a 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -11,6 +11,8 @@ #define AQ_NIC_H #include <linux/ethtool.h> +#include <net/xdp.h> +#include <linux/bpf.h> #include "aq_common.h" #include "aq_rss.h" @@ -128,6 +130,7 @@ struct aq_nic_s { struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX]; struct aq_ring_s *aq_ring_tx[AQ_HW_QUEUES_MAX]; struct aq_hw_s *aq_hw; + struct bpf_prog *xdp_prog; struct net_device *ndev; unsigned int aq_vecs; unsigned int packet_filter; @@ -177,6 +180,8 @@ void aq_nic_ndev_free(struct aq_nic_s *self); int aq_nic_start(struct aq_nic_s *self); unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb, struct aq_ring_s *ring); +int aq_nic_xmit_xdpf(struct aq_nic_s *aq_nic, struct aq_ring_s *tx_ring, + struct xdp_frame *xdpf); int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb); int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p); int aq_nic_get_regs_count(struct aq_nic_s *self); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index 797a95142d1f..3a529ee8c834 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -444,22 +444,22 @@ err_exit: static int aq_pm_freeze(struct device *dev) { - return aq_suspend_common(dev, false); + return aq_suspend_common(dev, true); } static int aq_pm_suspend_poweroff(struct device *dev) { - return aq_suspend_common(dev, true); + return aq_suspend_common(dev, false); } static int aq_pm_thaw(struct device *dev) { - return atl_resume_common(dev, false); + return atl_resume_common(dev, true); } static int aq_pm_resume_restore(struct device *dev) { - return atl_resume_common(dev, true); + return atl_resume_common(dev, false); } static const struct dev_pm_ops aq_pm_ops = { diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c index 06de19f63287..275324c9e51e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c @@ -1218,7 +1218,7 @@ int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec) atomic_set(&aq_ptp->offset_ingress, 0); netif_napi_add(aq_nic_get_ndev(aq_nic), &aq_ptp->napi, - aq_ptp_poll, AQ_CFG_NAPI_WEIGHT); + aq_ptp_poll, NAPI_POLL_WEIGHT); aq_ptp->idx_vector = idx_vec; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 77e76c9efd32..ea740210803f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -7,15 +7,37 @@ /* File aq_ring.c: Definition of functions for Rx/Tx rings. */ -#include "aq_ring.h" #include "aq_nic.h" #include "aq_hw.h" #include "aq_hw_utils.h" #include "aq_ptp.h" +#include "aq_vec.h" +#include "aq_main.h" +#include <net/xdp.h> +#include <linux/filter.h> +#include <linux/bpf_trace.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> +static void aq_get_rxpages_xdp(struct aq_ring_buff_s *buff, + struct xdp_buff *xdp) +{ + struct skb_shared_info *sinfo; + int i; + + if (xdp_buff_has_frags(xdp)) { + sinfo = xdp_get_shared_info_from_buff(xdp); + + for (i = 0; i < sinfo->nr_frags; i++) { + skb_frag_t *frag = &sinfo->frags[i]; + + page_ref_inc(skb_frag_page(frag)); + } + } + page_ref_inc(buff->rxdata.page); +} + static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev) { unsigned int len = PAGE_SIZE << rxpage->order; @@ -27,9 +49,10 @@ static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev) rxpage->page = NULL; } -static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order, - struct device *dev) +static int aq_alloc_rxpages(struct aq_rxpage *rxpage, struct aq_ring_s *rx_ring) { + struct device *dev = aq_nic_get_dev(rx_ring->aq_nic); + unsigned int order = rx_ring->page_order; struct page *page; int ret = -ENOMEM; dma_addr_t daddr; @@ -47,7 +70,7 @@ static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order, rxpage->page = page; rxpage->daddr = daddr; rxpage->order = order; - rxpage->pg_off = 0; + rxpage->pg_off = rx_ring->page_offset; return 0; @@ -58,21 +81,26 @@ err_exit: return ret; } -static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf, - int order) +static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf) { + unsigned int order = self->page_order; + u16 page_offset = self->page_offset; + u16 frame_max = self->frame_max; + u16 tail_size = self->tail_size; int ret; if (rxbuf->rxdata.page) { /* One means ring is the only user and can reuse */ if (page_ref_count(rxbuf->rxdata.page) > 1) { /* Try reuse buffer */ - rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX; - if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <= - (PAGE_SIZE << order)) { + rxbuf->rxdata.pg_off += frame_max + page_offset + + tail_size; + if (rxbuf->rxdata.pg_off + frame_max + tail_size <= + (PAGE_SIZE << order)) { u64_stats_update_begin(&self->stats.rx.syncp); self->stats.rx.pg_flips++; u64_stats_update_end(&self->stats.rx.syncp); + } else { /* Buffer exhausted. We have other users and * should release this page and realloc @@ -84,7 +112,7 @@ static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf, u64_stats_update_end(&self->stats.rx.syncp); } } else { - rxbuf->rxdata.pg_off = 0; + rxbuf->rxdata.pg_off = page_offset; u64_stats_update_begin(&self->stats.rx.syncp); self->stats.rx.pg_reuses++; u64_stats_update_end(&self->stats.rx.syncp); @@ -92,8 +120,7 @@ static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf, } if (!rxbuf->rxdata.page) { - ret = aq_get_rxpage(&rxbuf->rxdata, order, - aq_nic_get_dev(self->aq_nic)); + ret = aq_alloc_rxpages(&rxbuf->rxdata, self); if (ret) { u64_stats_update_begin(&self->stats.rx.syncp); self->stats.rx.alloc_fails++; @@ -117,6 +144,7 @@ static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self, err = -ENOMEM; goto err_exit; } + self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic), self->size * self->dx_size, &self->dx_ring_pa, GFP_KERNEL); @@ -172,11 +200,22 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, self->idx = idx; self->size = aq_nic_cfg->rxds; self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size; - self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE + - (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1; - - if (aq_nic_cfg->rxpageorder > self->page_order) - self->page_order = aq_nic_cfg->rxpageorder; + self->xdp_prog = aq_nic->xdp_prog; + self->frame_max = AQ_CFG_RX_FRAME_MAX; + + /* Only order-2 is allowed if XDP is enabled */ + if (READ_ONCE(self->xdp_prog)) { + self->page_offset = AQ_XDP_HEADROOM; + self->page_order = AQ_CFG_XDP_PAGEORDER; + self->tail_size = AQ_XDP_TAILROOM; + } else { + self->page_offset = 0; + self->page_order = fls(self->frame_max / PAGE_SIZE + + (self->frame_max % PAGE_SIZE ? 1 : 0)) - 1; + if (aq_nic_cfg->rxpageorder > self->page_order) + self->page_order = aq_nic_cfg->rxpageorder; + self->tail_size = 0; + } self = aq_ring_alloc(self, aq_nic); if (!self) { @@ -298,15 +337,26 @@ bool aq_ring_tx_clean(struct aq_ring_s *self) } } - if (unlikely(buff->is_eop && buff->skb)) { + if (likely(!buff->is_eop)) + goto out; + + if (buff->skb) { u64_stats_update_begin(&self->stats.tx.syncp); ++self->stats.tx.packets; self->stats.tx.bytes += buff->skb->len; u64_stats_update_end(&self->stats.tx.syncp); - dev_kfree_skb_any(buff->skb); - buff->skb = NULL; + } else if (buff->xdpf) { + u64_stats_update_begin(&self->stats.tx.syncp); + ++self->stats.tx.packets; + self->stats.tx.bytes += xdp_get_frame_len(buff->xdpf); + u64_stats_update_end(&self->stats.tx.syncp); + xdp_return_frame_rx_napi(buff->xdpf); } + +out: + buff->skb = NULL; + buff->xdpf = NULL; buff->pa = 0U; buff->eop_index = 0xffffU; self->sw_head = aq_ring_next_dx(self, self->sw_head); @@ -339,11 +389,159 @@ static void aq_rx_checksum(struct aq_ring_s *self, __skb_incr_checksum_unnecessary(skb); } -#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) -int aq_ring_rx_clean(struct aq_ring_s *self, - struct napi_struct *napi, - int *work_done, - int budget) +int aq_xdp_xmit(struct net_device *dev, int num_frames, + struct xdp_frame **frames, u32 flags) +{ + struct aq_nic_s *aq_nic = netdev_priv(dev); + unsigned int vec, i, drop = 0; + int cpu = smp_processor_id(); + struct aq_nic_cfg_s *aq_cfg; + struct aq_ring_s *ring; + + aq_cfg = aq_nic_get_cfg(aq_nic); + vec = cpu % aq_cfg->vecs; + ring = aq_nic->aq_ring_tx[AQ_NIC_CFG_TCVEC2RING(aq_cfg, 0, vec)]; + + for (i = 0; i < num_frames; i++) { + struct xdp_frame *xdpf = frames[i]; + + if (aq_nic_xmit_xdpf(aq_nic, ring, xdpf) == NETDEV_TX_BUSY) + drop++; + } + + return num_frames - drop; +} + +static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic, + struct xdp_buff *xdp, + struct aq_ring_s *rx_ring, + struct aq_ring_buff_s *buff) +{ + int result = NETDEV_TX_BUSY; + struct aq_ring_s *tx_ring; + struct xdp_frame *xdpf; + struct bpf_prog *prog; + u32 act = XDP_ABORTED; + struct sk_buff *skb; + + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.packets; + rx_ring->stats.rx.bytes += xdp_get_buff_len(xdp); + u64_stats_update_end(&rx_ring->stats.rx.syncp); + + prog = READ_ONCE(rx_ring->xdp_prog); + if (!prog) + goto pass; + + prefetchw(xdp->data_hard_start); /* xdp_frame write */ + + /* single buffer XDP program, but packet is multi buffer, aborted */ + if (xdp_buff_has_frags(xdp) && !prog->aux->xdp_has_frags) + goto out_aborted; + + act = bpf_prog_run_xdp(prog, xdp); + switch (act) { + case XDP_PASS: +pass: + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) + goto out_aborted; + skb = xdp_build_skb_from_frame(xdpf, aq_nic->ndev); + if (!skb) + goto out_aborted; + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.xdp_pass; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + aq_get_rxpages_xdp(buff, xdp); + return skb; + case XDP_TX: + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) + goto out_aborted; + tx_ring = aq_nic->aq_ring_tx[rx_ring->idx]; + result = aq_nic_xmit_xdpf(aq_nic, tx_ring, xdpf); + if (result == NETDEV_TX_BUSY) + goto out_aborted; + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.xdp_tx; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + aq_get_rxpages_xdp(buff, xdp); + break; + case XDP_REDIRECT: + if (xdp_do_redirect(aq_nic->ndev, xdp, prog) < 0) + goto out_aborted; + xdp_do_flush(); + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.xdp_redirect; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + aq_get_rxpages_xdp(buff, xdp); + break; + default: + fallthrough; + case XDP_ABORTED: +out_aborted: + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.xdp_aborted; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + trace_xdp_exception(aq_nic->ndev, prog, act); + bpf_warn_invalid_xdp_action(aq_nic->ndev, prog, act); + break; + case XDP_DROP: + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.xdp_drop; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + break; + } + + return ERR_PTR(-result); +} + +static bool aq_add_rx_fragment(struct device *dev, + struct aq_ring_s *ring, + struct aq_ring_buff_s *buff, + struct xdp_buff *xdp) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); + struct aq_ring_buff_s *buff_ = buff; + + memset(sinfo, 0, sizeof(*sinfo)); + do { + skb_frag_t *frag; + + if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) + return true; + + frag = &sinfo->frags[sinfo->nr_frags++]; + buff_ = &ring->buff_ring[buff_->next]; + dma_sync_single_range_for_cpu(dev, + buff_->rxdata.daddr, + buff_->rxdata.pg_off, + buff_->len, + DMA_FROM_DEVICE); + skb_frag_off_set(frag, buff_->rxdata.pg_off); + skb_frag_size_set(frag, buff_->len); + sinfo->xdp_frags_size += buff_->len; + __skb_frag_set_page(frag, buff_->rxdata.page); + + buff_->is_cleaned = 1; + + buff->is_ip_cso &= buff_->is_ip_cso; + buff->is_udp_cso &= buff_->is_udp_cso; + buff->is_tcp_cso &= buff_->is_tcp_cso; + buff->is_cso_err |= buff_->is_cso_err; + + if (page_is_pfmemalloc(buff_->rxdata.page)) + xdp_buff_set_frag_pfmemalloc(xdp); + + } while (!buff_->is_eop); + + xdp_buff_set_frags_flag(xdp); + + return false; +} + +static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi, + int *work_done, int budget) { struct net_device *ndev = aq_nic_get_ndev(self->aq_nic); bool is_rsc_completed = true; @@ -449,7 +647,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self, skb_add_rx_frag(skb, 0, buff->rxdata.page, buff->rxdata.pg_off + hdr_len, buff->len - hdr_len, - AQ_CFG_RX_FRAME_MAX); + self->frame_max); page_ref_inc(buff->rxdata.page); } @@ -469,7 +667,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self, buff_->rxdata.page, buff_->rxdata.pg_off, buff_->len, - AQ_CFG_RX_FRAME_MAX); + self->frame_max); page_ref_inc(buff_->rxdata.page); buff_->is_cleaned = 1; @@ -510,6 +708,149 @@ err_exit: return err; } +static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring, + struct napi_struct *napi, int *work_done, + int budget) +{ + int frame_sz = rx_ring->page_offset + rx_ring->frame_max + + rx_ring->tail_size; + struct aq_nic_s *aq_nic = rx_ring->aq_nic; + bool is_rsc_completed = true; + struct device *dev; + int err = 0; + + dev = aq_nic_get_dev(aq_nic); + for (; (rx_ring->sw_head != rx_ring->hw_head) && budget; + rx_ring->sw_head = aq_ring_next_dx(rx_ring, rx_ring->sw_head), + --budget, ++(*work_done)) { + struct aq_ring_buff_s *buff = &rx_ring->buff_ring[rx_ring->sw_head]; + bool is_ptp_ring = aq_ptp_ring(rx_ring->aq_nic, rx_ring); + struct aq_ring_buff_s *buff_ = NULL; + struct sk_buff *skb = NULL; + unsigned int next_ = 0U; + struct xdp_buff xdp; + void *hard_start; + + if (buff->is_cleaned) + continue; + + if (!buff->is_eop) { + buff_ = buff; + do { + if (buff_->next >= rx_ring->size) { + err = -EIO; + goto err_exit; + } + next_ = buff_->next; + buff_ = &rx_ring->buff_ring[next_]; + is_rsc_completed = + aq_ring_dx_in_range(rx_ring->sw_head, + next_, + rx_ring->hw_head); + + if (unlikely(!is_rsc_completed)) + break; + + buff->is_error |= buff_->is_error; + buff->is_cso_err |= buff_->is_cso_err; + } while (!buff_->is_eop); + + if (!is_rsc_completed) { + err = 0; + goto err_exit; + } + if (buff->is_error || + (buff->is_lro && buff->is_cso_err)) { + buff_ = buff; + do { + if (buff_->next >= rx_ring->size) { + err = -EIO; + goto err_exit; + } + next_ = buff_->next; + buff_ = &rx_ring->buff_ring[next_]; + + buff_->is_cleaned = true; + } while (!buff_->is_eop); + + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.errors; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + continue; + } + } + + if (buff->is_error) { + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.errors; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + continue; + } + + dma_sync_single_range_for_cpu(dev, + buff->rxdata.daddr, + buff->rxdata.pg_off, + buff->len, DMA_FROM_DEVICE); + hard_start = page_address(buff->rxdata.page) + + buff->rxdata.pg_off - rx_ring->page_offset; + + if (is_ptp_ring) + buff->len -= + aq_ptp_extract_ts(rx_ring->aq_nic, skb, + aq_buf_vaddr(&buff->rxdata), + buff->len); + + xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); + xdp_prepare_buff(&xdp, hard_start, rx_ring->page_offset, + buff->len, false); + if (!buff->is_eop) { + if (aq_add_rx_fragment(dev, rx_ring, buff, &xdp)) { + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.packets; + rx_ring->stats.rx.bytes += xdp_get_buff_len(&xdp); + ++rx_ring->stats.rx.xdp_aborted; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + continue; + } + } + + skb = aq_xdp_run_prog(aq_nic, &xdp, rx_ring, buff); + if (IS_ERR(skb) || !skb) + continue; + + if (buff->is_vlan) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + buff->vlan_rx_tag); + + aq_rx_checksum(rx_ring, buff, skb); + + skb_set_hash(skb, buff->rss_hash, + buff->is_hash_l4 ? PKT_HASH_TYPE_L4 : + PKT_HASH_TYPE_NONE); + /* Send all PTP traffic to 0 queue */ + skb_record_rx_queue(skb, + is_ptp_ring ? 0 + : AQ_NIC_RING2QMAP(rx_ring->aq_nic, + rx_ring->idx)); + + napi_gro_receive(napi, skb); + } + +err_exit: + return err; +} + +int aq_ring_rx_clean(struct aq_ring_s *self, + struct napi_struct *napi, + int *work_done, + int budget) +{ + if (static_branch_unlikely(&aq_xdp_locking_key)) + return __aq_ring_xdp_clean(self, napi, work_done, budget); + else + return __aq_ring_rx_clean(self, napi, work_done, budget); +} + void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic) { #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) @@ -529,7 +870,6 @@ void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic) int aq_ring_rx_fill(struct aq_ring_s *self) { - unsigned int page_order = self->page_order; struct aq_ring_buff_s *buff = NULL; int err = 0; int i = 0; @@ -543,9 +883,9 @@ int aq_ring_rx_fill(struct aq_ring_s *self) buff = &self->buff_ring[self->sw_tail]; buff->flags = 0U; - buff->len = AQ_CFG_RX_FRAME_MAX; + buff->len = self->frame_max; - err = aq_get_rxpages(self, buff, page_order); + err = aq_get_rxpages(self, buff); if (err) goto err_exit; @@ -600,6 +940,15 @@ unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data) data[++count] = self->stats.rx.alloc_fails; data[++count] = self->stats.rx.skb_alloc_fails; data[++count] = self->stats.rx.polls; + data[++count] = self->stats.rx.pg_flips; + data[++count] = self->stats.rx.pg_reuses; + data[++count] = self->stats.rx.pg_losts; + data[++count] = self->stats.rx.xdp_aborted; + data[++count] = self->stats.rx.xdp_drop; + data[++count] = self->stats.rx.xdp_pass; + data[++count] = self->stats.rx.xdp_tx; + data[++count] = self->stats.rx.xdp_invalid; + data[++count] = self->stats.rx.xdp_redirect; } while (u64_stats_fetch_retry_irq(&self->stats.rx.syncp, start)); } else { /* This data should mimic aq_ethtool_queue_tx_stat_names structure */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index 93659e58f1ce..0a6c34438c1d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h @@ -11,6 +11,10 @@ #define AQ_RING_H #include "aq_common.h" +#include "aq_vec.h" + +#define AQ_XDP_HEADROOM ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +#define AQ_XDP_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) struct page; struct aq_nic_cfg_s; @@ -51,6 +55,7 @@ struct __packed aq_ring_buff_s { struct { dma_addr_t pa_eop; struct sk_buff *skb; + struct xdp_frame *xdpf; }; /* TxC */ struct { @@ -101,6 +106,12 @@ struct aq_ring_stats_rx_s { u64 pg_losts; u64 pg_flips; u64 pg_reuses; + u64 xdp_aborted; + u64 xdp_drop; + u64 xdp_pass; + u64 xdp_tx; + u64 xdp_invalid; + u64 xdp_redirect; }; struct aq_ring_stats_tx_s { @@ -132,10 +143,15 @@ struct aq_ring_s { unsigned int size; /* descriptors number */ unsigned int dx_size; /* TX or RX descriptor size, */ /* stored here for fater math */ - unsigned int page_order; + u16 page_order; + u16 page_offset; + u16 frame_max; + u16 tail_size; union aq_ring_stats_s stats; dma_addr_t dx_ring_pa; + struct bpf_prog *xdp_prog; enum atl_ring_type ring_type; + struct xdp_rxq_info xdp_rxq; }; struct aq_ring_param_s { @@ -175,6 +191,7 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic, unsigned int idx, struct aq_nic_cfg_s *aq_nic_cfg); + int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type); void aq_ring_rx_deinit(struct aq_ring_s *self); void aq_ring_free(struct aq_ring_s *self); @@ -182,6 +199,8 @@ void aq_ring_update_queue_state(struct aq_ring_s *ring); void aq_ring_queue_wake(struct aq_ring_s *ring); void aq_ring_queue_stop(struct aq_ring_s *ring); bool aq_ring_tx_clean(struct aq_ring_s *self); +int aq_xdp_xmit(struct net_device *dev, int num_frames, + struct xdp_frame **frames, u32 flags); int aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi, int *work_done, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index f4774cf051c9..f0fdf20f01c1 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -10,11 +10,6 @@ */ #include "aq_vec.h" -#include "aq_nic.h" -#include "aq_ring.h" -#include "aq_hw.h" - -#include <linux/netdevice.h> struct aq_vec_s { const struct aq_hw_ops *aq_hw_ops; @@ -43,8 +38,8 @@ static int aq_vec_poll(struct napi_struct *napi, int budget) if (!self) { err = -EINVAL; } else { - for (i = 0U, ring = self->ring[0]; - self->tx_rings > i; ++i, ring = self->ring[i]) { + for (i = 0U; self->tx_rings > i; ++i) { + ring = self->ring[i]; u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp); ring[AQ_VEC_RX_ID].stats.rx.polls++; u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp); @@ -125,7 +120,7 @@ struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx, self->rx_rings = 0; netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi, - aq_vec_poll, AQ_CFG_NAPI_WEIGHT); + aq_vec_poll, NAPI_POLL_WEIGHT); err_exit: return self; @@ -153,9 +148,23 @@ int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic, aq_nic_set_tx_ring(aq_nic, idx_ring, ring); + if (xdp_rxq_info_reg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq, + aq_nic->ndev, idx, + self->napi.napi_id) < 0) { + err = -ENOMEM; + goto err_exit; + } + if (xdp_rxq_info_reg_mem_model(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL) < 0) { + xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq); + err = -ENOMEM; + goto err_exit; + } + ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic, idx_ring, aq_nic_cfg); if (!ring) { + xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq); err = -ENOMEM; goto err_exit; } @@ -182,8 +191,8 @@ int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops, self->aq_hw_ops = aq_hw_ops; self->aq_hw = aq_hw; - for (i = 0U, ring = self->ring[0]; - self->tx_rings > i; ++i, ring = self->ring[i]) { + for (i = 0U; self->tx_rings > i; ++i) { + ring = self->ring[i]; err = aq_ring_init(&ring[AQ_VEC_TX_ID], ATL_RING_TX); if (err < 0) goto err_exit; @@ -224,8 +233,8 @@ int aq_vec_start(struct aq_vec_s *self) unsigned int i = 0U; int err = 0; - for (i = 0U, ring = self->ring[0]; - self->tx_rings > i; ++i, ring = self->ring[i]) { + for (i = 0U; self->tx_rings > i; ++i) { + ring = self->ring[i]; err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw, &ring[AQ_VEC_TX_ID]); if (err < 0) @@ -248,8 +257,8 @@ void aq_vec_stop(struct aq_vec_s *self) struct aq_ring_s *ring = NULL; unsigned int i = 0U; - for (i = 0U, ring = self->ring[0]; - self->tx_rings > i; ++i, ring = self->ring[i]) { + for (i = 0U; self->tx_rings > i; ++i) { + ring = self->ring[i]; self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw, &ring[AQ_VEC_TX_ID]); @@ -268,8 +277,8 @@ void aq_vec_deinit(struct aq_vec_s *self) if (!self) goto err_exit; - for (i = 0U, ring = self->ring[0]; - self->tx_rings > i; ++i, ring = self->ring[i]) { + for (i = 0U; self->tx_rings > i; ++i) { + ring = self->ring[i]; aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]); } @@ -297,11 +306,13 @@ void aq_vec_ring_free(struct aq_vec_s *self) if (!self) goto err_exit; - for (i = 0U, ring = self->ring[0]; - self->tx_rings > i; ++i, ring = self->ring[i]) { + for (i = 0U; self->tx_rings > i; ++i) { + ring = self->ring[i]; aq_ring_free(&ring[AQ_VEC_TX_ID]); - if (i < self->rx_rings) + if (i < self->rx_rings) { + xdp_rxq_info_unreg(&ring[AQ_VEC_RX_ID].xdp_rxq); aq_ring_free(&ring[AQ_VEC_RX_ID]); + } } self->tx_rings = 0; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h index 567f3d4b79a2..78fac609b71d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h @@ -13,7 +13,13 @@ #define AQ_VEC_H #include "aq_common.h" +#include "aq_nic.h" +#include "aq_ring.h" +#include "aq_hw.h" + #include <linux/irqreturn.h> +#include <linux/filter.h> +#include <linux/netdevice.h> struct aq_hw_s; struct aq_hw_ops; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index 4625ccb79499..9dfd68f0fda9 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -531,7 +531,7 @@ static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self, hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); hw_atl_rdm_rx_desc_data_buff_size_set(self, - AQ_CFG_RX_FRAME_MAX / 1024U, + aq_ring->frame_max / 1024U, aq_ring->idx); hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx); @@ -706,9 +706,9 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self, if (HW_ATL_A0_RXD_WB_STAT2_EOP & rxd_wb->status) { buff->len = rxd_wb->pkt_len % - AQ_CFG_RX_FRAME_MAX; + ring->frame_max; buff->len = buff->len ? - buff->len : AQ_CFG_RX_FRAME_MAX; + buff->len : ring->frame_max; buff->next = 0U; buff->is_eop = 1U; } else { diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index d875ce3ec759..878a53abec33 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -766,7 +766,7 @@ int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring, hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); hw_atl_rdm_rx_desc_data_buff_size_set(self, - AQ_CFG_RX_FRAME_MAX / 1024U, + aq_ring->frame_max / 1024U, aq_ring->idx); hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx); @@ -969,15 +969,15 @@ int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, struct aq_ring_s *ring) rxd_wb->status); if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) { buff->len = rxd_wb->pkt_len % - AQ_CFG_RX_FRAME_MAX; + ring->frame_max; buff->len = buff->len ? - buff->len : AQ_CFG_RX_FRAME_MAX; + buff->len : ring->frame_max; buff->next = 0U; buff->is_eop = 1U; } else { buff->len = - rxd_wb->pkt_len > AQ_CFG_RX_FRAME_MAX ? - AQ_CFG_RX_FRAME_MAX : rxd_wb->pkt_len; + rxd_wb->pkt_len > ring->frame_max ? + ring->frame_max : rxd_wb->pkt_len; if (buff->is_lro) { /* LRO */ diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 7b525c65bacb..2dfc1e32bbb3 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1527,7 +1527,7 @@ int bgmac_enet_probe(struct bgmac *bgmac) if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0) bgmac->int_mask &= ~BGMAC_IS_TX_MASK; - netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT); + netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, NAPI_POLL_WEIGHT); err = bgmac_phy_connect(bgmac); if (err) { diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index 110088e662ea..e05ac92c0650 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -364,8 +364,6 @@ #define BGMAC_CHIPCTL_7_IF_TYPE_MII 0x00000040 #define BGMAC_CHIPCTL_7_IF_TYPE_RGMII 0x00000080 -#define BGMAC_WEIGHT 64 - #define ETHER_MAX_LEN (ETH_FRAME_LEN + ETH_FCS_LEN) /* Feature Flags */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index c19b072f3a23..962253db25b8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -14153,10 +14153,6 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) /* Stop Tx */ bnx2x_tx_disable(bp); - /* Delete all NAPI objects */ - bnx2x_del_all_napi(bp); - if (CNIC_LOADED(bp)) - bnx2x_del_all_napi_cnic(bp); netdev_reset_tc(bp->dev); del_timer_sync(&bp->timer); @@ -14261,6 +14257,11 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) bnx2x_drain_tx_queues(bp); bnx2x_send_unload_req(bp, UNLOAD_RECOVERY); bnx2x_netif_stop(bp, 1); + bnx2x_del_all_napi(bp); + + if (CNIC_LOADED(bp)) + bnx2x_del_all_napi_cnic(bp); + bnx2x_free_irq(bp); /* Report UNLOAD_DONE to MCP */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 5caa75b41b73..4e9215bce4ad 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -2212,7 +2212,7 @@ * MAC DA 2. The reset default is set to mask out all parameters. */ #define NIG_REG_P0_LLH_PTP_PARAM_MASK 0x187a0 -/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set +/* [RW 14] Mask register for the rules used in detecting PTP packets. Set * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} . * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1; @@ -2381,7 +2381,7 @@ * MAC DA 2. The reset default is set to mask out all parameters. */ #define NIG_REG_P1_LLH_PTP_PARAM_MASK 0x187c8 -/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set +/* [RW 14] Mask register for the rules used in detecting PTP packets. Set * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} . * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1; @@ -2493,7 +2493,7 @@ * MAC DA 2. The reset default is set to mask out all parameters. */ #define NIG_REG_P0_TLLH_PTP_PARAM_MASK 0x187f0 -/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set +/* [RW 14] Mask register for the rules used in detecting PTP packets. Set * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} . * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1; @@ -2529,7 +2529,7 @@ * MAC DA 2. The reset default is set to mask out all parameters. */ #define NIG_REG_P1_TLLH_PTP_PARAM_MASK 0x187f8 -/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set +/* [RW 14] Mask register for the rules used in detecting PTP packets. Set * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} . * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1; @@ -6218,7 +6218,7 @@ #define AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 (0x1<<2) #define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (0x1<<12) #define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (0x1<<28) -#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (0x1<<31) +#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (0x1U<<31) #define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (0x1<<29) #define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (0x1<<30) #define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (0x1<<15) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 1c28495875cf..0489c1c2e7dd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -56,6 +56,7 @@ #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <net/page_pool.h> +#include <linux/align.h> #include "bnxt_hsi.h" #include "bnxt.h" @@ -738,7 +739,6 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, page_pool_recycle_direct(rxr->page_pool, page); return NULL; } - *mapping += bp->rx_dma_offset; return page; } @@ -780,6 +780,7 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, if (!page) return -ENOMEM; + mapping += bp->rx_dma_offset; rx_buf->data = page; rx_buf->data_ptr = page_address(page) + bp->rx_offset; } else { @@ -840,33 +841,41 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp, u16 sw_prod = rxr->rx_sw_agg_prod; unsigned int offset = 0; - if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { - page = rxr->rx_page; - if (!page) { + if (BNXT_RX_PAGE_MODE(bp)) { + page = __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp); + + if (!page) + return -ENOMEM; + + } else { + if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { + page = rxr->rx_page; + if (!page) { + page = alloc_page(gfp); + if (!page) + return -ENOMEM; + rxr->rx_page = page; + rxr->rx_page_offset = 0; + } + offset = rxr->rx_page_offset; + rxr->rx_page_offset += BNXT_RX_PAGE_SIZE; + if (rxr->rx_page_offset == PAGE_SIZE) + rxr->rx_page = NULL; + else + get_page(page); + } else { page = alloc_page(gfp); if (!page) return -ENOMEM; - rxr->rx_page = page; - rxr->rx_page_offset = 0; } - offset = rxr->rx_page_offset; - rxr->rx_page_offset += BNXT_RX_PAGE_SIZE; - if (rxr->rx_page_offset == PAGE_SIZE) - rxr->rx_page = NULL; - else - get_page(page); - } else { - page = alloc_page(gfp); - if (!page) - return -ENOMEM; - } - mapping = dma_map_page_attrs(&pdev->dev, page, offset, - BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE, - DMA_ATTR_WEAK_ORDERING); - if (dma_mapping_error(&pdev->dev, mapping)) { - __free_page(page); - return -EIO; + mapping = dma_map_page_attrs(&pdev->dev, page, offset, + BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE, + DMA_ATTR_WEAK_ORDERING); + if (dma_mapping_error(&pdev->dev, mapping)) { + __free_page(page); + return -EIO; + } } if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) @@ -962,6 +971,39 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx, rxr->rx_sw_agg_prod = sw_prod; } +static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + u16 cons, void *data, u8 *data_ptr, + dma_addr_t dma_addr, + unsigned int offset_and_len) +{ + unsigned int len = offset_and_len & 0xffff; + struct page *page = data; + u16 prod = rxr->rx_prod; + struct sk_buff *skb; + int err; + + err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); + if (unlikely(err)) { + bnxt_reuse_rx_data(rxr, cons, data); + return NULL; + } + dma_addr -= bp->rx_dma_offset; + dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir, + DMA_ATTR_WEAK_ORDERING); + skb = build_skb(page_address(page), BNXT_PAGE_MODE_BUF_SIZE + + bp->rx_dma_offset); + if (!skb) { + __free_page(page); + return NULL; + } + skb_mark_for_recycle(skb); + skb_reserve(skb, bp->rx_dma_offset); + __skb_put(skb, len); + + return skb; +} + static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, void *data, u8 *data_ptr, @@ -984,7 +1026,6 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, dma_addr -= bp->rx_dma_offset; dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir, DMA_ATTR_WEAK_ORDERING); - page_pool_release_page(rxr->page_pool, page); if (unlikely(!payload)) payload = eth_get_headlen(bp->dev, data_ptr, len); @@ -995,6 +1036,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, return NULL; } + skb_mark_for_recycle(skb); off = (void *)data_ptr - page_address(page); skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE); memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, @@ -1038,22 +1080,24 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, return skb; } -static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, - struct bnxt_cp_ring_info *cpr, - struct sk_buff *skb, u16 idx, - u32 agg_bufs, bool tpa) +static u32 __bnxt_rx_agg_pages(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, + struct skb_shared_info *shinfo, + u16 idx, u32 agg_bufs, bool tpa, + struct xdp_buff *xdp) { struct bnxt_napi *bnapi = cpr->bnapi; struct pci_dev *pdev = bp->pdev; struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; u16 prod = rxr->rx_agg_prod; + u32 i, total_frag_len = 0; bool p5_tpa = false; - u32 i; if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa) p5_tpa = true; for (i = 0; i < agg_bufs; i++) { + skb_frag_t *frag = &shinfo->frags[i]; u16 cons, frag_len; struct rx_agg_cmp *agg; struct bnxt_sw_rx_agg_bd *cons_rx_buf; @@ -1069,8 +1113,10 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; cons_rx_buf = &rxr->rx_agg_ring[cons]; - skb_fill_page_desc(skb, i, cons_rx_buf->page, - cons_rx_buf->offset, frag_len); + skb_frag_off_set(frag, cons_rx_buf->offset); + skb_frag_size_set(frag, frag_len); + __skb_frag_set_page(frag, cons_rx_buf->page); + shinfo->nr_frags = i + 1; __clear_bit(cons, rxr->rx_agg_bmap); /* It is possible for bnxt_alloc_rx_page() to allocate @@ -1081,16 +1127,14 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, page = cons_rx_buf->page; cons_rx_buf->page = NULL; + if (xdp && page_is_pfmemalloc(page)) + xdp_buff_set_frag_pfmemalloc(xdp); + if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { - struct skb_shared_info *shinfo; unsigned int nr_frags; - shinfo = skb_shinfo(skb); nr_frags = --shinfo->nr_frags; __skb_frag_set_page(&shinfo->frags[nr_frags], NULL); - - dev_kfree_skb(skb); - cons_rx_buf->page = page; /* Update prod since possibly some pages have been @@ -1098,23 +1142,62 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, */ rxr->rx_agg_prod = prod; bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa); - return NULL; + return 0; } dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, - DMA_FROM_DEVICE, + bp->rx_dir, DMA_ATTR_WEAK_ORDERING); - skb->data_len += frag_len; - skb->len += frag_len; - skb->truesize += PAGE_SIZE; - + total_frag_len += frag_len; prod = NEXT_RX_AGG(prod); } rxr->rx_agg_prod = prod; + return total_frag_len; +} + +static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, + struct sk_buff *skb, u16 idx, + u32 agg_bufs, bool tpa) +{ + struct skb_shared_info *shinfo = skb_shinfo(skb); + u32 total_frag_len = 0; + + total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx, + agg_bufs, tpa, NULL); + if (!total_frag_len) { + dev_kfree_skb(skb); + return NULL; + } + + skb->data_len += total_frag_len; + skb->len += total_frag_len; + skb->truesize += PAGE_SIZE * agg_bufs; return skb; } +static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, + struct xdp_buff *xdp, u16 idx, + u32 agg_bufs, bool tpa) +{ + struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp); + u32 total_frag_len = 0; + + if (!xdp_buff_has_frags(xdp)) + shinfo->nr_frags = 0; + + total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, + idx, agg_bufs, tpa, xdp); + if (total_frag_len) { + xdp_buff_set_frags_flag(xdp); + shinfo->nr_frags = agg_bufs; + shinfo->xdp_frags_size = total_frag_len; + } + return total_frag_len; +} + static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, u8 agg_bufs, u32 *raw_cons) { @@ -1644,7 +1727,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, } if (agg_bufs) { - skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true); + skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true); if (!skb) { /* Page reuse already handled by bnxt_rx_pages(). */ cpr->sw_stats.rx.rx_oom_discards += 1; @@ -1729,8 +1812,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, struct bnxt_sw_rx_bd *rx_buf; unsigned int len; u8 *data_ptr, agg_bufs, cmp_type; + bool xdp_active = false; dma_addr_t dma_addr; struct sk_buff *skb; + struct xdp_buff xdp; u32 flags, misc; void *data; int rc = 0; @@ -1839,18 +1924,39 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, len = flags >> RX_CMP_LEN_SHIFT; dma_addr = rx_buf->mapping; - if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) { - rc = 1; - goto next_rx; + if (bnxt_xdp_attached(bp, rxr)) { + bnxt_xdp_buff_init(bp, rxr, cons, &data_ptr, &len, &xdp); + if (agg_bufs) { + u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp, + cp_cons, agg_bufs, + false); + if (!frag_len) { + cpr->sw_stats.rx.rx_oom_discards += 1; + rc = -ENOMEM; + goto next_rx; + } + } + xdp_active = true; + } + + if (xdp_active) { + if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &len, event)) { + rc = 1; + goto next_rx; + } } if (len <= bp->rx_copy_thresh) { skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); bnxt_reuse_rx_data(rxr, cons, data); if (!skb) { - if (agg_bufs) - bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, - agg_bufs, false); + if (agg_bufs) { + if (!xdp_active) + bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, + agg_bufs, false); + else + bnxt_xdp_buff_frags_free(rxr, &xdp); + } cpr->sw_stats.rx.rx_oom_discards += 1; rc = -ENOMEM; goto next_rx; @@ -1872,11 +1978,22 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } if (agg_bufs) { - skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false); - if (!skb) { - cpr->sw_stats.rx.rx_oom_discards += 1; - rc = -ENOMEM; - goto next_rx; + if (!xdp_active) { + skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false); + if (!skb) { + cpr->sw_stats.rx.rx_oom_discards += 1; + rc = -ENOMEM; + goto next_rx; + } + } else { + skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1); + if (!skb) { + /* we should be able to free the old skb here */ + bnxt_xdp_buff_frags_free(rxr, &xdp); + cpr->sw_stats.rx.rx_oom_discards += 1; + rc = -ENOMEM; + goto next_rx; + } } } @@ -2492,10 +2609,13 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi) if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) { struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; - if (bnapi->events & BNXT_AGG_EVENT) - bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); } + if (bnapi->events & BNXT_AGG_EVENT) { + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; + + bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + } bnapi->events = 0; } @@ -2872,14 +2992,23 @@ skip_rx_buf_free: if (!page) continue; - dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, - BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE, - DMA_ATTR_WEAK_ORDERING); + if (BNXT_RX_PAGE_MODE(bp)) { + dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, + BNXT_RX_PAGE_SIZE, bp->rx_dir, + DMA_ATTR_WEAK_ORDERING); + rx_agg_buf->page = NULL; + __clear_bit(i, rxr->rx_agg_bmap); - rx_agg_buf->page = NULL; - __clear_bit(i, rxr->rx_agg_bmap); + page_pool_recycle_direct(rxr->page_pool, page); + } else { + dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, + BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE, + DMA_ATTR_WEAK_ORDERING); + rx_agg_buf->page = NULL; + __clear_bit(i, rxr->rx_agg_bmap); - __free_page(page); + __free_page(page); + } } skip_rx_agg_free: @@ -3253,6 +3382,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) } qidx = bp->tc_to_qidx[j]; ring->queue_id = bp->q_info[qidx].queue_id; + spin_lock_init(&txr->xdp_tx_lock); if (i < bp->tx_nr_rings_xdp) continue; if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) @@ -3792,7 +3922,7 @@ void bnxt_set_ring_params(struct bnxt *bp) /* 8 for CRC and VLAN */ rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); - rx_space = rx_size + NET_SKB_PAD + + rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); bp->rx_copy_thresh = BNXT_RX_COPY_THRESH; @@ -3833,9 +3963,15 @@ void bnxt_set_ring_params(struct bnxt *bp) } bp->rx_agg_ring_size = agg_ring_size; bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; - rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN); - rx_space = rx_size + NET_SKB_PAD + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + if (BNXT_RX_PAGE_MODE(bp)) { + rx_space = BNXT_PAGE_MODE_BUF_SIZE; + rx_size = BNXT_MAX_PAGE_MODE_MTU; + } else { + rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN); + rx_space = rx_size + NET_SKB_PAD + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + } } bp->rx_buf_use_size = rx_size; @@ -3876,14 +4012,21 @@ void bnxt_set_ring_params(struct bnxt *bp) int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) { if (page_mode) { - if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) - return -EOPNOTSUPP; - bp->dev->max_mtu = - min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); bp->flags &= ~BNXT_FLAG_AGG_RINGS; - bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE; + bp->flags |= BNXT_FLAG_RX_PAGE_MODE; + + if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { + bp->flags |= BNXT_FLAG_JUMBO; + bp->rx_skb_func = bnxt_rx_multi_page_skb; + bp->dev->max_mtu = + min_t(u16, bp->max_mtu, BNXT_MAX_MTU); + } else { + bp->flags |= BNXT_FLAG_NO_AGG_RINGS; + bp->rx_skb_func = bnxt_rx_page_skb; + bp->dev->max_mtu = + min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); + } bp->rx_dir = DMA_BIDIRECTIONAL; - bp->rx_skb_func = bnxt_rx_page_skb; /* Disable LRO or GRO_HW */ netdev_update_features(bp->dev); } else { @@ -5225,12 +5368,15 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) if (rc) return rc; - req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT | - VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | - VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); - req->enables = - cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID | - VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); + req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT); + req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID); + + if (BNXT_RX_PAGE_MODE(bp) && !BNXT_RX_JUMBO_MODE(bp)) { + req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | + VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); + req->enables |= + cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); + } /* thresholds not implemented in firmware yet */ req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh); @@ -10338,6 +10484,12 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) if (irq_re_init) udp_tunnel_nic_reset_ntf(bp->dev); + if (bp->tx_nr_rings_xdp < num_possible_cpus()) { + if (!static_key_enabled(&bnxt_xdp_locking_key)) + static_branch_enable(&bnxt_xdp_locking_key); + } else if (static_key_enabled(&bnxt_xdp_locking_key)) { + static_branch_disable(&bnxt_xdp_locking_key); + } set_bit(BNXT_STATE_OPEN, &bp->state); bnxt_enable_int(bp); /* Enable TX queues */ @@ -11024,6 +11176,9 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev, if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); + if (!(bp->flags & BNXT_FLAG_TPA)) + features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); + if (!(features & NETIF_F_GRO)) features &= ~NETIF_F_GRO_HW; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 61aa3e8c5952..a498ee297946 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -591,9 +591,12 @@ struct nqe_cn { #define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT) #define BNXT_MAX_MTU 9500 -#define BNXT_MAX_PAGE_MODE_MTU \ +#define BNXT_PAGE_MODE_BUF_SIZE \ ((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN - \ XDP_PACKET_HEADROOM) +#define BNXT_MAX_PAGE_MODE_MTU \ + BNXT_PAGE_MODE_BUF_SIZE - \ + SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info)) #define BNXT_MIN_PKT_SIZE 52 @@ -698,13 +701,12 @@ struct bnxt_sw_tx_bd { }; DEFINE_DMA_UNMAP_ADDR(mapping); DEFINE_DMA_UNMAP_LEN(len); + struct page *page; u8 is_gso; u8 is_push; u8 action; - union { - unsigned short nr_frags; - u16 rx_prod; - }; + unsigned short nr_frags; + u16 rx_prod; }; struct bnxt_sw_rx_bd { @@ -800,6 +802,8 @@ struct bnxt_tx_ring_info { u32 dev_state; struct bnxt_ring_struct tx_ring_struct; + /* Synchronize simultaneous xdp_xmit on same ring */ + spinlock_t xdp_tx_lock; }; #define BNXT_LEGACY_COAL_CMPL_PARAMS \ @@ -1814,6 +1818,7 @@ struct bnxt { #define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \ (!((bp)->flags & BNXT_FLAG_CHIP_P5) || \ (bp)->max_tpa_v2) && !is_kdump_kernel()) +#define BNXT_RX_JUMBO_MODE(bp) ((bp)->flags & BNXT_FLAG_JUMBO) #define BNXT_CHIP_SR2(bp) \ ((bp)->chip_num == CHIP_NUM_58818) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 22e965e18fbc..b3a48d6675fe 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -3491,7 +3491,7 @@ static int bnxt_run_loopback(struct bnxt *bp) dev_kfree_skb(skb); return -EIO; } - bnxt_xmit_bd(bp, txr, map, pkt_size); + bnxt_xmit_bd(bp, txr, map, pkt_size, NULL); /* Sync BD data before updating doorbell */ wmb(); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 52fad0fdeacf..f02fe906dedb 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -20,38 +20,95 @@ #include "bnxt.h" #include "bnxt_xdp.h" +DEFINE_STATIC_KEY_FALSE(bnxt_xdp_locking_key); + struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, struct bnxt_tx_ring_info *txr, - dma_addr_t mapping, u32 len) + dma_addr_t mapping, u32 len, + struct xdp_buff *xdp) { - struct bnxt_sw_tx_bd *tx_buf; + struct skb_shared_info *sinfo; + struct bnxt_sw_tx_bd *tx_buf, *first_buf; struct tx_bd *txbd; + int num_frags = 0; u32 flags; u16 prod; + int i; + + if (xdp && xdp_buff_has_frags(xdp)) { + sinfo = xdp_get_shared_info_from_buff(xdp); + num_frags = sinfo->nr_frags; + } + /* fill up the first buffer */ prod = txr->tx_prod; tx_buf = &txr->tx_buf_ring[prod]; + first_buf = tx_buf; + tx_buf->nr_frags = num_frags; + if (xdp) + tx_buf->page = virt_to_head_page(xdp->data); txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; - flags = (len << TX_BD_LEN_SHIFT) | (1 << TX_BD_FLAGS_BD_CNT_SHIFT) | - TX_BD_FLAGS_PACKET_END | bnxt_lhint_arr[len >> 9]; + flags = ((len) << TX_BD_LEN_SHIFT) | ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT); txbd->tx_bd_len_flags_type = cpu_to_le32(flags); txbd->tx_bd_opaque = prod; txbd->tx_bd_haddr = cpu_to_le64(mapping); + /* now let us fill up the frags into the next buffers */ + for (i = 0; i < num_frags ; i++) { + skb_frag_t *frag = &sinfo->frags[i]; + struct bnxt_sw_tx_bd *frag_tx_buf; + struct pci_dev *pdev = bp->pdev; + dma_addr_t frag_mapping; + int frag_len; + + prod = NEXT_TX(prod); + txr->tx_prod = prod; + + /* first fill up the first buffer */ + frag_tx_buf = &txr->tx_buf_ring[prod]; + frag_tx_buf->page = skb_frag_page(frag); + + txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; + + frag_len = skb_frag_size(frag); + frag_mapping = skb_frag_dma_map(&pdev->dev, frag, 0, + frag_len, DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(&pdev->dev, frag_mapping))) + return NULL; + + dma_unmap_addr_set(frag_tx_buf, mapping, frag_mapping); + + flags = frag_len << TX_BD_LEN_SHIFT; + txbd->tx_bd_len_flags_type = cpu_to_le32(flags); + txbd->tx_bd_opaque = prod; + txbd->tx_bd_haddr = cpu_to_le64(frag_mapping); + + len = frag_len; + } + + flags &= ~TX_BD_LEN; + txbd->tx_bd_len_flags_type = cpu_to_le32(((len) << TX_BD_LEN_SHIFT) | flags | + TX_BD_FLAGS_PACKET_END); + /* Sync TX BD */ + wmb(); prod = NEXT_TX(prod); txr->tx_prod = prod; - return tx_buf; + + return first_buf; } static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr, - dma_addr_t mapping, u32 len, u16 rx_prod) + dma_addr_t mapping, u32 len, u16 rx_prod, + struct xdp_buff *xdp) { struct bnxt_sw_tx_bd *tx_buf; - tx_buf = bnxt_xmit_bd(bp, txr, mapping, len); + tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, xdp); tx_buf->rx_prod = rx_prod; tx_buf->action = XDP_TX; + } static void __bnxt_xmit_xdp_redirect(struct bnxt *bp, @@ -61,7 +118,7 @@ static void __bnxt_xmit_xdp_redirect(struct bnxt *bp, { struct bnxt_sw_tx_bd *tx_buf; - tx_buf = bnxt_xmit_bd(bp, txr, mapping, len); + tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, NULL); tx_buf->action = XDP_REDIRECT; tx_buf->xdpf = xdpf; dma_unmap_addr_set(tx_buf, mapping, mapping); @@ -76,7 +133,7 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) struct bnxt_sw_tx_bd *tx_buf; u16 tx_cons = txr->tx_cons; u16 last_tx_cons = tx_cons; - int i; + int i, j, frags; for (i = 0; i < nr_pkts; i++) { tx_buf = &txr->tx_buf_ring[tx_cons]; @@ -94,6 +151,13 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) } else if (tx_buf->action == XDP_TX) { rx_doorbell_needed = true; last_tx_cons = tx_cons; + + frags = tx_buf->nr_frags; + for (j = 0; j < frags; j++) { + tx_cons = NEXT_TX(tx_cons); + tx_buf = &txr->tx_buf_ring[tx_cons]; + page_pool_recycle_direct(rxr->page_pool, tx_buf->page); + } } tx_cons = NEXT_TX(tx_cons); } @@ -101,22 +165,67 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) if (rx_doorbell_needed) { tx_buf = &txr->tx_buf_ring[last_tx_cons]; bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod); + } } +bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) +{ + struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog); + + return !!xdp_prog; +} + +void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, + u16 cons, u8 **data_ptr, unsigned int *len, + struct xdp_buff *xdp) +{ + struct bnxt_sw_rx_bd *rx_buf; + struct pci_dev *pdev; + dma_addr_t mapping; + u32 offset; + + pdev = bp->pdev; + rx_buf = &rxr->rx_buf_ring[cons]; + offset = bp->rx_offset; + + mapping = rx_buf->mapping - bp->rx_dma_offset; + dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir); + + xdp_init_buff(xdp, BNXT_PAGE_MODE_BUF_SIZE + offset, &rxr->xdp_rxq); + xdp_prepare_buff(xdp, *data_ptr - offset, offset, *len, false); +} + +void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr, + struct xdp_buff *xdp) +{ + struct skb_shared_info *shinfo; + int i; + + if (!xdp || !xdp_buff_has_frags(xdp)) + return; + shinfo = xdp_get_shared_info_from_buff(xdp); + for (i = 0; i < shinfo->nr_frags; i++) { + struct page *page = skb_frag_page(&shinfo->frags[i]); + + page_pool_recycle_direct(rxr->page_pool, page); + } + shinfo->nr_frags = 0; +} + /* returns the following: * true - packet consumed by XDP and new buffer is allocated. * false - packet should be passed to the stack. */ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, - struct page *page, u8 **data_ptr, unsigned int *len, u8 *event) + struct xdp_buff xdp, struct page *page, unsigned int *len, u8 *event) { struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog); struct bnxt_tx_ring_info *txr; struct bnxt_sw_rx_bd *rx_buf; struct pci_dev *pdev; - struct xdp_buff xdp; dma_addr_t mapping; + u32 tx_needed = 1; void *orig_data; u32 tx_avail; u32 offset; @@ -126,16 +235,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, return false; pdev = bp->pdev; - rx_buf = &rxr->rx_buf_ring[cons]; offset = bp->rx_offset; - mapping = rx_buf->mapping - bp->rx_dma_offset; - dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir); - txr = rxr->bnapi->tx_ring; /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */ - xdp_init_buff(&xdp, PAGE_SIZE, &rxr->xdp_rxq); - xdp_prepare_buff(&xdp, *data_ptr - offset, offset, *len, false); orig_data = xdp.data; act = bpf_prog_run_xdp(xdp_prog, &xdp); @@ -148,26 +251,38 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, *event &= ~BNXT_RX_EVENT; *len = xdp.data_end - xdp.data; - if (orig_data != xdp.data) { + if (orig_data != xdp.data) offset = xdp.data - xdp.data_hard_start; - *data_ptr = xdp.data_hard_start + offset; - } + switch (act) { case XDP_PASS: return false; case XDP_TX: - if (tx_avail < 1) { + rx_buf = &rxr->rx_buf_ring[cons]; + mapping = rx_buf->mapping - bp->rx_dma_offset; + *event = 0; + + if (unlikely(xdp_buff_has_frags(&xdp))) { + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(&xdp); + + tx_needed += sinfo->nr_frags; + *event = BNXT_AGG_EVENT; + } + + if (tx_avail < tx_needed) { trace_xdp_exception(bp->dev, xdp_prog, act); + bnxt_xdp_buff_frags_free(rxr, &xdp); bnxt_reuse_rx_data(rxr, cons, page); return true; } - *event = BNXT_TX_EVENT; dma_sync_single_for_device(&pdev->dev, mapping + offset, *len, bp->rx_dir); + + *event |= BNXT_TX_EVENT; __bnxt_xmit_xdp(bp, txr, mapping + offset, *len, - NEXT_RX(rxr->rx_prod)); + NEXT_RX(rxr->rx_prod), &xdp); bnxt_reuse_rx_data(rxr, cons, page); return true; case XDP_REDIRECT: @@ -175,6 +290,8 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, * redirect is coming from a frame received by the * bnxt_en driver. */ + rx_buf = &rxr->rx_buf_ring[cons]; + mapping = rx_buf->mapping - bp->rx_dma_offset; dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE, bp->rx_dir, DMA_ATTR_WEAK_ORDERING); @@ -182,6 +299,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, /* if we are unable to allocate a new buffer, abort and reuse */ if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) { trace_xdp_exception(bp->dev, xdp_prog, act); + bnxt_xdp_buff_frags_free(rxr, &xdp); bnxt_reuse_rx_data(rxr, cons, page); return true; } @@ -201,6 +319,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, trace_xdp_exception(bp->dev, xdp_prog, act); fallthrough; case XDP_DROP: + bnxt_xdp_buff_frags_free(rxr, &xdp); bnxt_reuse_rx_data(rxr, cons, page); break; } @@ -227,11 +346,16 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames, ring = smp_processor_id() % bp->tx_nr_rings_xdp; txr = &bp->tx_ring[ring]; + if (READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING) + return -EINVAL; + + if (static_branch_unlikely(&bnxt_xdp_locking_key)) + spin_lock(&txr->xdp_tx_lock); + for (i = 0; i < num_frames; i++) { struct xdp_frame *xdp = frames[i]; - if (!txr || !bnxt_tx_avail(bp, txr) || - !(bp->bnapi[ring]->flags & BNXT_NAPI_FLAG_XDP)) + if (!bnxt_tx_avail(bp, txr)) break; mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len, @@ -250,6 +374,9 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames, bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); } + if (static_branch_unlikely(&bnxt_xdp_locking_key)) + spin_unlock(&txr->xdp_tx_lock); + return nxmit; } @@ -260,8 +387,9 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog) int tx_xdp = 0, rc, tc; struct bpf_prog *old; - if (prog && bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { - netdev_warn(dev, "MTU %d larger than largest XDP supported MTU %d.\n", + if (prog && !prog->aux->xdp_has_frags && + bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { + netdev_warn(dev, "MTU %d larger than %d without XDP frag support.\n", bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU); return -EOPNOTSUPP; } @@ -327,3 +455,26 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp) } return rc; } + +struct sk_buff * +bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags, + struct page_pool *pool, struct xdp_buff *xdp, + struct rx_cmp_ext *rxcmp1) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); + + if (!skb) + return NULL; + skb_checksum_none_assert(skb); + if (RX_CMP_L4_CS_OK(rxcmp1)) { + if (bp->dev->features & NETIF_F_RXCSUM) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = RX_CMP_ENCAP(rxcmp1); + } + } + xdp_update_skb_shared_info(skb, num_frags, + sinfo->xdp_frags_size, + PAGE_SIZE * sinfo->nr_frags, + xdp_buff_is_frag_pfmemalloc(xdp)); + return skb; +} diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h index 0df40c3beb05..505911ae095d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h @@ -10,15 +10,29 @@ #ifndef BNXT_XDP_H #define BNXT_XDP_H +DECLARE_STATIC_KEY_FALSE(bnxt_xdp_locking_key); + struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, struct bnxt_tx_ring_info *txr, - dma_addr_t mapping, u32 len); + dma_addr_t mapping, u32 len, + struct xdp_buff *xdp); void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts); bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, - struct page *page, u8 **data_ptr, unsigned int *len, + struct xdp_buff xdp, struct page *page, unsigned int *len, u8 *event); int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp); int bnxt_xdp_xmit(struct net_device *dev, int num_frames, struct xdp_frame **frames, u32 flags); +bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr); + +void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, + u16 cons, u8 **data_ptr, unsigned int *len, + struct xdp_buff *xdp); +void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr, + struct xdp_buff *xdp); +struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, + u8 num_frags, struct page_pool *pool, + struct xdp_buff *xdp, + struct rx_cmp_ext *rxcmp1); #endif diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 2dd79af9411b..bf1ec8fdc2ad 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -76,7 +76,7 @@ static inline void bcmgenet_writel(u32 value, void __iomem *offset) if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) __raw_writel(value, offset); else - writel(value, offset); + writel_relaxed(value, offset); } static inline u32 bcmgenet_readl(void __iomem *offset) @@ -84,7 +84,7 @@ static inline u32 bcmgenet_readl(void __iomem *offset) if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) return __raw_readl(offset); else - return readl(offset); + return readl_relaxed(offset); } static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, @@ -2035,6 +2035,11 @@ static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev, return skb; } +static void bcmgenet_hide_tsb(struct sk_buff *skb) +{ + __skb_pull(skb, sizeof(struct status_64)); +} + static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); @@ -2141,6 +2146,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) } GENET_CB(skb)->last_cb = tx_cb_ptr; + + bcmgenet_hide_tsb(skb); skb_tx_timestamp(skb); /* Decrement total BD count and advance our write pointer */ diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index a1a38456c9a3..5d5f10180158 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -2534,7 +2534,12 @@ static int sbmac_probe(struct platform_device *pldev) int err; res = platform_get_resource(pldev, IORESOURCE_MEM, 0); - BUG_ON(!res); + if (!res) { + printk(KERN_ERR "%s: failed to get resource\n", + dev_name(&pldev->dev)); + err = -EINVAL; + goto out_out; + } sbm_base = ioremap(res->start, resource_size(res)); if (!sbm_base) { printk(KERN_ERR "%s: unable to map device registers\n", diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index f1d2c4cd5da2..f6fe08df568b 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -1881,7 +1881,6 @@ poll_exit: return rcvd; } -#define BNAD_NAPI_POLL_QUOTA 64 static void bnad_napi_add(struct bnad *bnad, u32 rx_id) { @@ -1892,7 +1891,7 @@ bnad_napi_add(struct bnad *bnad, u32 rx_id) for (i = 0; i < bnad->num_rxp_per_rx; i++) { rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i]; netif_napi_add(bnad->netdev, &rx_ctrl->napi, - bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA); + bnad_napi_poll_rx, NAPI_POLL_WEIGHT); } } diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 800d5ced5800..6434e74c04f1 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -337,11 +337,9 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) struct macb *bp = bus->priv; int status; - status = pm_runtime_get_sync(&bp->pdev->dev); - if (status < 0) { - pm_runtime_put_noidle(&bp->pdev->dev); + status = pm_runtime_resume_and_get(&bp->pdev->dev); + if (status < 0) goto mdio_pm_exit; - } status = macb_mdio_wait_for_idle(bp); if (status < 0) @@ -391,11 +389,9 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, struct macb *bp = bus->priv; int status; - status = pm_runtime_get_sync(&bp->pdev->dev); - if (status < 0) { - pm_runtime_put_noidle(&bp->pdev->dev); + status = pm_runtime_resume_and_get(&bp->pdev->dev); + if (status < 0) goto mdio_pm_exit; - } status = macb_mdio_wait_for_idle(bp); if (status < 0) @@ -1658,6 +1654,7 @@ static void macb_tx_restart(struct macb_queue *queue) unsigned int head = queue->tx_head; unsigned int tail = queue->tx_tail; struct macb *bp = queue->bp; + unsigned int head_idx, tbqp; if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) queue_writel(queue, ISR, MACB_BIT(TXUBR)); @@ -1665,6 +1662,13 @@ static void macb_tx_restart(struct macb_queue *queue) if (head == tail) return; + tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp); + tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp)); + head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, head)); + + if (tbqp == head_idx) + return; + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); } @@ -2745,9 +2749,9 @@ static int macb_open(struct net_device *dev) netdev_dbg(bp->dev, "open\n"); - err = pm_runtime_get_sync(&bp->pdev->dev); + err = pm_runtime_resume_and_get(&bp->pdev->dev); if (err < 0) - goto pm_exit; + return err; /* RX buffers initialization */ macb_init_rx_buffer_size(bp, bufsz); @@ -4134,11 +4138,9 @@ static int at91ether_open(struct net_device *dev) u32 ctl; int ret; - ret = pm_runtime_get_sync(&lp->pdev->dev); - if (ret < 0) { - pm_runtime_put_noidle(&lp->pdev->dev); + ret = pm_runtime_resume_and_get(&lp->pdev->dev); + if (ret < 0) return ret; - } /* Clear internal statistics */ ctl = macb_readl(lp, NCR); @@ -4586,7 +4588,7 @@ static int zynqmp_init(struct platform_device *pdev) if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) { /* Ensure PS-GTR PHY device used in SGMII mode is ready */ - bp->sgmii_phy = devm_phy_get(&pdev->dev, "sgmii-phy"); + bp->sgmii_phy = devm_phy_optional_get(&pdev->dev, NULL); if (IS_ERR(bp->sgmii_phy)) { ret = PTR_ERR(bp->sgmii_phy); diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index 457cb7121000..1281d1565ef8 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -1224,7 +1224,7 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit) * @budget : maximum number of packets that the current CPU can receive from * all interfaces. * Description : - * This function implements the the reception process. + * This function implements the reception process. * Also it runs the TX completion thread */ static int xgmac_poll(struct napi_struct *napi, int budget) diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c index 59683f79959c..60b648b46f75 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c @@ -483,7 +483,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, tx_info->ip_family = AF_INET; #if IS_ENABLED(CONFIG_IPV6) } else { - if (!sk->sk_ipv6only && + if (!ipv6_only_sock(sk) && ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) { memcpy(daaddr, &sk->sk_daddr, 4); tx_info->ip_family = AF_INET; diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h index 9e2378013642..41714203ace8 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h @@ -567,7 +567,7 @@ void chtls_shutdown(struct sock *sk, int how); void chtls_destroy_sock(struct sock *sk); int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); int chtls_recvmsg(struct sock *sk, struct msghdr *msg, - size_t len, int nonblock, int flags, int *addr_len); + size_t len, int flags, int *addr_len); int chtls_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags); int send_tx_flowc_wr(struct sock *sk, int compl, diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c index c320cc8ca68d..539992dad8ba 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c @@ -1426,7 +1426,7 @@ static void chtls_cleanup_rbuf(struct sock *sk, int copied) } static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, - int nonblock, int flags, int *addr_len) + int flags, int *addr_len) { struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); struct chtls_hws *hws = &csk->tlshws; @@ -1441,7 +1441,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, buffers_freed = 0; - timeo = sock_rcvtimeo(sk, nonblock); + timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND))) @@ -1616,7 +1616,7 @@ skip_copy: * Peek at data in a socket's receive buffer. */ static int peekmsg(struct sock *sk, struct msghdr *msg, - size_t len, int nonblock, int flags) + size_t len, int flags) { struct tcp_sock *tp = tcp_sk(sk); u32 peek_seq, offset; @@ -1626,7 +1626,7 @@ static int peekmsg(struct sock *sk, struct msghdr *msg, long timeo; lock_sock(sk); - timeo = sock_rcvtimeo(sk, nonblock); + timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); peek_seq = tp->copied_seq; do { @@ -1737,7 +1737,7 @@ found_ok_skb: } int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, - int nonblock, int flags, int *addr_len) + int flags, int *addr_len) { struct tcp_sock *tp = tcp_sk(sk); struct chtls_sock *csk; @@ -1750,25 +1750,23 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, buffers_freed = 0; if (unlikely(flags & MSG_OOB)) - return tcp_prot.recvmsg(sk, msg, len, nonblock, flags, - addr_len); + return tcp_prot.recvmsg(sk, msg, len, flags, addr_len); if (unlikely(flags & MSG_PEEK)) - return peekmsg(sk, msg, len, nonblock, flags); + return peekmsg(sk, msg, len, flags); if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue) && sk->sk_state == TCP_ESTABLISHED) - sk_busy_loop(sk, nonblock); + sk_busy_loop(sk, flags & MSG_DONTWAIT); lock_sock(sk); csk = rcu_dereference_sk_user_data(sk); if (is_tls_rx(csk)) - return chtls_pt_recvmsg(sk, msg, len, nonblock, - flags, addr_len); + return chtls_pt_recvmsg(sk, msg, len, flags, addr_len); - timeo = sock_rcvtimeo(sk, nonblock); + timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND))) diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 8014eb33937c..9e6de2f968fa 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -68,7 +68,6 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); #define DEFAULT_GMAC_RXQ_ORDER 9 #define DEFAULT_GMAC_TXQ_ORDER 8 #define DEFAULT_RX_BUF_ORDER 11 -#define DEFAULT_NAPI_WEIGHT 64 #define TX_MAX_FRAGS 16 #define TX_QUEUE_NUM 1 /* max: 6 */ #define RX_MAX_ALLOC_ORDER 2 @@ -2472,8 +2471,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) netdev->max_mtu = 10236 - VLAN_ETH_HLEN; port->freeq_refill = 0; - netif_napi_add(netdev, &port->napi, gmac_napi_poll, - DEFAULT_NAPI_WEIGHT); + netif_napi_add(netdev, &port->napi, gmac_napi_poll, NAPI_POLL_WEIGHT); ret = of_get_mac_address(np, mac); if (!ret) { diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index 86b1d23eba83..1db19463fd46 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -474,8 +474,6 @@ err_out_netdev: No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that made udelay() unreliable. - The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is - deprecated. */ #define eeprom_delay(ee_addr) ioread32(ee_addr) diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 8689d4a51fe5..61fe9625bed1 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -101,8 +101,7 @@ #define MAX_ROCE_EQS 5 #define MAX_MSIX_VECTORS 32 #define MIN_MSIX_VECTORS 1 -#define BE_NAPI_WEIGHT 64 -#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */ +#define MAX_RX_POST NAPI_POLL_WEIGHT /* Frags posted at a time */ #define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) #define MAX_NUM_POST_ERX_DB 255u diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index d0c262f2695a..5939068a8f62 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -2983,7 +2983,7 @@ static int be_evt_queues_create(struct be_adapter *adapter) cpumask_set_cpu(cpumask_local_spread(i, numa_node), eqo->affinity_mask); netif_napi_add(adapter->netdev, &eqo->napi, be_poll, - BE_NAPI_WEIGHT); + NAPI_POLL_WEIGHT); } return 0; } diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index 904f3304727e..49c93aa38862 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -1091,8 +1091,7 @@ static int tsnep_mdio_init(struct tsnep_adapter *adapter) retval = of_mdiobus_register(adapter->mdiobus, np); out: - if (np) - of_node_put(np); + of_node_put(np); return retval; } diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index d5356db7539a..caf48023f8ea 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1835,11 +1835,6 @@ static int ftgmac100_probe(struct platform_device *pdev) priv->rxdes0_edorr_mask = BIT(30); priv->txdes0_edotr_mask = BIT(30); priv->is_aspeed = true; - /* Disable ast2600 problematic HW arbitration */ - if (of_device_is_compatible(np, "aspeed,ast2600-mac")) { - iowrite32(FTGMAC100_TM_DEFAULT, - priv->base + FTGMAC100_OFFSET_TM); - } } else { priv->rxdes0_edorr_mask = BIT(15); priv->txdes0_edotr_mask = BIT(15); @@ -1911,6 +1906,11 @@ static int ftgmac100_probe(struct platform_device *pdev) err = ftgmac100_setup_clk(priv); if (err) goto err_phy_connect; + + /* Disable ast2600 problematic HW arbitration */ + if (of_device_is_compatible(np, "aspeed,ast2600-mac")) + iowrite32(FTGMAC100_TM_DEFAULT, + priv->base + FTGMAC100_OFFSET_TM); } /* Default ring sizes */ diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 763d2c7b5fb1..5750f9a56393 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -489,11 +489,15 @@ static int dpaa_get_ts_info(struct net_device *net_dev, info->phc_index = -1; fman_node = of_get_parent(mac_node); - if (fman_node) + if (fman_node) { ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0); + of_node_put(fman_node); + } - if (ptp_node) + if (ptp_node) { ptp_dev = of_find_device_by_node(ptp_node); + of_node_put(ptp_node); + } if (ptp_dev) ptp = platform_get_drvdata(ptp_dev); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c index 5f5f8c53c4a0..c8cb541572ff 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c @@ -167,7 +167,7 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev) base = of_iomap(node, 0); if (!base) { err = -ENOMEM; - goto err_close; + goto err_put; } err = fsl_mc_allocate_irqs(mc_dev); @@ -210,6 +210,8 @@ err_free_mc_irq: fsl_mc_free_irqs(mc_dev); err_unmap: iounmap(base); +err_put: + of_node_put(node); err_close: dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); err_free_mcp: diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index 79afb1d7289b..9182631856d5 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -297,10 +297,6 @@ int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data) if (tc < 0 || tc >= priv->num_tx_rings) return -EINVAL; - /* Do not support TXSTART and TX CSUM offload simutaniously */ - if (ndev->features & NETIF_F_CSUM_MASK) - return -EBUSY; - /* TSD and Qbv are mutually exclusive in hardware */ if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE) return -EBUSY; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 11227f51404c..9f33ec838b52 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3731,7 +3731,7 @@ static int fec_enet_init_stop_mode(struct fec_enet_private *fep, ARRAY_SIZE(out_val)); if (ret) { dev_dbg(&fep->pdev->dev, "no stop mode property\n"); - return ret; + goto out; } fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 206b7a35eaf5..f0b652a65043 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -3232,7 +3232,7 @@ static int gfar_probe(struct platform_device *ofdev) /* Register for napi ...We are registering NAPI for each grp */ for (i = 0; i < priv->num_grps; i++) { netif_napi_add(dev, &priv->gfargrp[i].napi_rx, - gfar_poll_rx_sq, GFAR_DEV_WEIGHT); + gfar_poll_rx_sq, NAPI_POLL_WEIGHT); netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx, gfar_poll_tx_sq, 2); } diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index ca5e14f908fe..68b59d3202e3 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -52,9 +52,6 @@ struct ethtool_rx_list { unsigned int count; }; -/* The maximum number of packets to be handled in one call of gfar_poll */ -#define GFAR_DEV_WEIGHT 64 - /* Length for FCB */ #define GMAC_FCB_LEN 8 diff --git a/drivers/net/ethernet/fungible/funcore/fun_dev.c b/drivers/net/ethernet/fungible/funcore/fun_dev.c index 5d7aef73df61..fb5120d90f26 100644 --- a/drivers/net/ethernet/fungible/funcore/fun_dev.c +++ b/drivers/net/ethernet/fungible/funcore/fun_dev.c @@ -586,8 +586,8 @@ static int fun_get_dev_limits(struct fun_dev *fdev) /* Calculate the max QID based on SQ/CQ/doorbell counts. * SQ/CQ doorbells alternate. */ - num_dbs = (pci_resource_len(pdev, 0) - NVME_REG_DBS) / - (fdev->db_stride * 4); + num_dbs = (pci_resource_len(pdev, 0) - NVME_REG_DBS) >> + (2 + NVME_CAP_STRIDE(fdev->cap_reg)); fdev->max_qid = min3(cq_count, sq_count, num_dbs / 2) - 1; fdev->kern_end_qid = fdev->max_qid + 1; return 0; diff --git a/drivers/net/ethernet/fungible/funeth/funeth_devlink.c b/drivers/net/ethernet/fungible/funeth/funeth_devlink.c index a849b3c6b01f..d50c222948b4 100644 --- a/drivers/net/ethernet/fungible/funeth/funeth_devlink.c +++ b/drivers/net/ethernet/fungible/funeth/funeth_devlink.c @@ -6,13 +6,7 @@ static int fun_dl_info_get(struct devlink *dl, struct devlink_info_req *req, struct netlink_ext_ack *extack) { - int err; - - err = devlink_info_driver_name_put(req, KBUILD_MODNAME); - if (err) - return err; - - return 0; + return devlink_info_driver_name_put(req, KBUILD_MODNAME); } static const struct devlink_ops fun_dl_ops = { diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 7edf8569514c..928d934cb21a 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -1065,19 +1065,23 @@ int hns_mac_init(struct dsaf_device *dsaf_dev) device_for_each_child_node(dsaf_dev->dev, child) { ret = fwnode_property_read_u32(child, "reg", &port_id); if (ret) { + fwnode_handle_put(child); dev_err(dsaf_dev->dev, "get reg fail, ret=%d!\n", ret); return ret; } if (port_id >= max_port_num) { + fwnode_handle_put(child); dev_err(dsaf_dev->dev, "reg(%u) out of range!\n", port_id); return -EINVAL; } mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb), GFP_KERNEL); - if (!mac_cb) + if (!mac_cb) { + fwnode_handle_put(child); return -ENOMEM; + } mac_cb->fw_port = child; mac_cb->mac_id = (u8)port_id; dsaf_dev->mac_cb[port_id] = mac_cb; diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index b668df6193be..8c7fadf2b734 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -135,10 +135,19 @@ struct hclge_vf_to_pf_msg { struct hclge_pf_to_vf_msg { u16 code; - u16 vf_mbx_msg_code; - u16 vf_mbx_msg_subcode; - u16 resp_status; - u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE]; + union { + /* used for mbx response */ + struct { + u16 vf_mbx_msg_code; + u16 vf_mbx_msg_subcode; + u16 resp_status; + u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE]; + }; + /* used for general mbx */ + struct { + u8 msg_data[HCLGE_MBX_MAX_MSG_SIZE]; + }; + }; }; struct hclge_mbx_vf_to_pf_cmd { diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 79c64f4e67d2..8a3a446219f7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -96,6 +96,7 @@ enum HNAE3_DEV_CAP_BITS { HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, + HNAE3_DEV_SUPPORT_CQ_B, }; #define hnae3_dev_fd_supported(hdev) \ @@ -155,6 +156,9 @@ enum HNAE3_DEV_CAP_BITS { #define hnae3_ae_dev_mc_mac_mng_supported(ae_dev) \ test_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, (ae_dev)->caps) +#define hnae3_ae_dev_cq_supported(ae_dev) \ + test_bit(HNAE3_DEV_SUPPORT_CQ_B, (ae_dev)->caps) + enum HNAE3_PF_CAP_BITS { HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0, }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c index c15ca710dabb..c8b151d29f53 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c @@ -149,6 +149,7 @@ static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = { {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B}, {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B}, + {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B}, }; static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = { @@ -160,6 +161,7 @@ static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = { {HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B}, {HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B}, {HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B}, + {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B}, }; static void diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h index 876650eddac4..7a7d4cf9bf35 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h @@ -338,6 +338,7 @@ enum HCLGE_COMM_CAP_BITS { HCLGE_COMM_CAP_PAUSE_B = 14, HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B = 15, HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B = 17, + HCLGE_COMM_CAP_CQ_B = 18, }; enum HCLGE_COMM_API_CAP_BITS { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c index 0c60f41fca8a..f3c9395d8351 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c @@ -75,7 +75,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle, ret = hclge_comm_cmd_send(hw, &desc, 1); if (ret) { dev_err(&hw->cmq.csq.pdev->dev, - "failed to get tqp stat, ret = %d, tx = %u.\n", + "failed to get tqp stat, ret = %d, rx = %u.\n", ret, i); return ret; } @@ -89,7 +89,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle, ret = hclge_comm_cmd_send(hw, &desc, 1); if (ret) { dev_err(&hw->cmq.csq.pdev->dev, - "failed to get tqp stat, ret = %d, rx = %u.\n", + "failed to get tqp stat, ret = %d, tx = %u.\n", ret, i); return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 44d9b560b337..93aeb615191d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -562,12 +562,12 @@ static void hns3_dbg_tx_spare_info(struct hns3_enet_ring *ring, char *buf, for (i = 0; i < ring_num; i++) { j = 0; - sprintf(result[j++], "%8u", i); - sprintf(result[j++], "%9u", ring->tx_copybreak); - sprintf(result[j++], "%3u", tx_spare->len); - sprintf(result[j++], "%3u", tx_spare->next_to_use); - sprintf(result[j++], "%3u", tx_spare->next_to_clean); - sprintf(result[j++], "%3u", tx_spare->last_to_clean); + sprintf(result[j++], "%u", i); + sprintf(result[j++], "%u", ring->tx_copybreak); + sprintf(result[j++], "%u", tx_spare->len); + sprintf(result[j++], "%u", tx_spare->next_to_use); + sprintf(result[j++], "%u", tx_spare->next_to_clean); + sprintf(result[j++], "%u", tx_spare->last_to_clean); sprintf(result[j++], "%pad", &tx_spare->dma); hns3_dbg_fill_content(content, sizeof(content), tx_spare_info_items, @@ -598,35 +598,35 @@ static void hns3_dump_rx_queue_info(struct hns3_enet_ring *ring, u32 base_add_l, base_add_h; u32 j = 0; - sprintf(result[j++], "%8u", index); + sprintf(result[j++], "%u", index); - sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_BD_NUM_REG)); - sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_BD_LEN_REG)); - sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG)); - sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG)); - sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG)); - sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_PKTNUM_RECORD_REG)); - sprintf(result[j++], "%9u", ring->rx_copybreak); + sprintf(result[j++], "%u", ring->rx_copybreak); - sprintf(result[j++], "%7s", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base + HNS3_RING_EN_REG) ? "on" : "off"); if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev)) - sprintf(result[j++], "%10s", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_EN_REG) ? "on" : "off"); else - sprintf(result[j++], "%10s", "NA"); + sprintf(result[j++], "%s", "NA"); base_add_h = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_BASEADDR_H_REG); @@ -700,36 +700,36 @@ static void hns3_dump_tx_queue_info(struct hns3_enet_ring *ring, u32 base_add_l, base_add_h; u32 j = 0; - sprintf(result[j++], "%8u", index); - sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", index); + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_BD_NUM_REG)); - sprintf(result[j++], "%2u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG)); - sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG)); - sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG)); - sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_FBDNUM_REG)); - sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_OFFSET_REG)); - sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_PKTNUM_RECORD_REG)); - sprintf(result[j++], "%7s", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base + HNS3_RING_EN_REG) ? "on" : "off"); if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev)) - sprintf(result[j++], "%10s", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_EN_REG) ? "on" : "off"); else - sprintf(result[j++], "%10s", "NA"); + sprintf(result[j++], "%s", "NA"); base_add_h = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_BASEADDR_H_REG); @@ -848,15 +848,15 @@ static void hns3_dump_rx_bd_info(struct hns3_nic_priv *priv, { unsigned int j = 0; - sprintf(result[j++], "%5d", idx); + sprintf(result[j++], "%d", idx); sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.l234_info)); - sprintf(result[j++], "%7u", le16_to_cpu(desc->rx.pkt_len)); - sprintf(result[j++], "%4u", le16_to_cpu(desc->rx.size)); + sprintf(result[j++], "%u", le16_to_cpu(desc->rx.pkt_len)); + sprintf(result[j++], "%u", le16_to_cpu(desc->rx.size)); sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.rss_hash)); - sprintf(result[j++], "%5u", le16_to_cpu(desc->rx.fd_id)); - sprintf(result[j++], "%8u", le16_to_cpu(desc->rx.vlan_tag)); - sprintf(result[j++], "%15u", le16_to_cpu(desc->rx.o_dm_vlan_id_fb)); - sprintf(result[j++], "%11u", le16_to_cpu(desc->rx.ot_vlan_tag)); + sprintf(result[j++], "%u", le16_to_cpu(desc->rx.fd_id)); + sprintf(result[j++], "%u", le16_to_cpu(desc->rx.vlan_tag)); + sprintf(result[j++], "%u", le16_to_cpu(desc->rx.o_dm_vlan_id_fb)); + sprintf(result[j++], "%u", le16_to_cpu(desc->rx.ot_vlan_tag)); sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.bd_base_info)); if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) { u32 ol_info = le32_to_cpu(desc->rx.ol_info); @@ -930,19 +930,19 @@ static void hns3_dump_tx_bd_info(struct hns3_nic_priv *priv, { unsigned int j = 0; - sprintf(result[j++], "%6d", idx); + sprintf(result[j++], "%d", idx); sprintf(result[j++], "%#llx", le64_to_cpu(desc->addr)); - sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.vlan_tag)); - sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.send_size)); + sprintf(result[j++], "%u", le16_to_cpu(desc->tx.vlan_tag)); + sprintf(result[j++], "%u", le16_to_cpu(desc->tx.send_size)); sprintf(result[j++], "%#x", le32_to_cpu(desc->tx.type_cs_vlan_tso_len)); - sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.outer_vlan_tag)); - sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.tv)); - sprintf(result[j++], "%10u", + sprintf(result[j++], "%u", le16_to_cpu(desc->tx.outer_vlan_tag)); + sprintf(result[j++], "%u", le16_to_cpu(desc->tx.tv)); + sprintf(result[j++], "%u", le32_to_cpu(desc->tx.ol_type_vlan_len_msec)); sprintf(result[j++], "%#x", le32_to_cpu(desc->tx.paylen_ol4cs)); sprintf(result[j++], "%#x", le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri)); - sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.mss_hw_csum)); + sprintf(result[j++], "%u", le16_to_cpu(desc->tx.mss_hw_csum)); } static int hns3_dbg_tx_bd_info(struct hns3_dbg_data *d, char *buf, int len) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 14dc12c2155d..ae56306400b8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -5159,10 +5159,7 @@ static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv, priv->tqp_vector[i].rx_group.dim.mode = mode; } - /* only device version above V3(include V3), GL can switch CQ/EQ - * period mode. - */ - if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) { + if (hnae3_ae_dev_cq_supported(ae_dev)) { u32 new_mode; u64 reg; @@ -5203,6 +5200,13 @@ static void hns3_state_init(struct hnae3_handle *handle) set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state); } +static void hns3_state_uninit(struct hnae3_handle *handle) +{ + struct hns3_nic_priv *priv = handle->priv; + + clear_bit(HNS3_NIC_STATE_INITED, &priv->state); +} + static int hns3_client_init(struct hnae3_handle *handle) { struct pci_dev *pdev = handle->pdev; @@ -5320,7 +5324,9 @@ static int hns3_client_init(struct hnae3_handle *handle) return ret; out_reg_netdev_fail: + hns3_state_uninit(handle); hns3_dbg_uninit(handle); + hns3_client_stop(handle); out_client_start: hns3_free_rx_cpu_rmap(netdev); hns3_nic_uninit_irq(priv); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index f4da77452126..1db8a86f046d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -664,6 +664,8 @@ static void hns3_get_ringparam(struct net_device *netdev, param->tx_pending = priv->ring[0].desc_num; param->rx_pending = priv->ring[rx_queue_index].desc_num; kernel_param->rx_buf_len = priv->ring[rx_queue_index].buf_size; + kernel_param->tx_push = test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, + &priv->state); } static void hns3_get_pauseparam(struct net_device *netdev, @@ -1104,6 +1106,36 @@ static int hns3_check_ringparam(struct net_device *ndev, return 0; } +static bool +hns3_is_ringparam_changed(struct net_device *ndev, + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct hns3_ring_param *old_ringparam, + struct hns3_ring_param *new_ringparam) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + u16 queue_num = h->kinfo.num_tqps; + + new_ringparam->tx_desc_num = ALIGN(param->tx_pending, + HNS3_RING_BD_MULTIPLE); + new_ringparam->rx_desc_num = ALIGN(param->rx_pending, + HNS3_RING_BD_MULTIPLE); + old_ringparam->tx_desc_num = priv->ring[0].desc_num; + old_ringparam->rx_desc_num = priv->ring[queue_num].desc_num; + old_ringparam->rx_buf_len = priv->ring[queue_num].buf_size; + new_ringparam->rx_buf_len = kernel_param->rx_buf_len; + + if (old_ringparam->tx_desc_num == new_ringparam->tx_desc_num && + old_ringparam->rx_desc_num == new_ringparam->rx_desc_num && + old_ringparam->rx_buf_len == new_ringparam->rx_buf_len) { + netdev_info(ndev, "ringparam not changed\n"); + return false; + } + + return true; +} + static int hns3_change_rx_buf_len(struct net_device *ndev, u32 rx_buf_len) { struct hns3_nic_priv *priv = netdev_priv(ndev); @@ -1120,62 +1152,80 @@ static int hns3_change_rx_buf_len(struct net_device *ndev, u32 rx_buf_len) return 0; } +static int hns3_set_tx_push(struct net_device *netdev, u32 tx_push) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); + u32 old_state = test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state); + + if (!test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps) && tx_push) + return -EOPNOTSUPP; + + if (tx_push == old_state) + return 0; + + netdev_dbg(netdev, "Changing tx push from %s to %s\n", + old_state ? "on" : "off", tx_push ? "on" : "off"); + + if (tx_push) + set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state); + else + clear_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state); + + return 0; +} + static int hns3_set_ringparam(struct net_device *ndev, struct ethtool_ringparam *param, struct kernel_ethtool_ringparam *kernel_param, struct netlink_ext_ack *extack) { + struct hns3_ring_param old_ringparam, new_ringparam; struct hns3_nic_priv *priv = netdev_priv(ndev); struct hnae3_handle *h = priv->ae_handle; struct hns3_enet_ring *tmp_rings; bool if_running = netif_running(ndev); - u32 old_tx_desc_num, new_tx_desc_num; - u32 old_rx_desc_num, new_rx_desc_num; - u16 queue_num = h->kinfo.num_tqps; - u32 old_rx_buf_len; int ret, i; ret = hns3_check_ringparam(ndev, param, kernel_param); if (ret) return ret; - /* Hardware requires that its descriptors must be multiple of eight */ - new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE); - new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE); - old_tx_desc_num = priv->ring[0].desc_num; - old_rx_desc_num = priv->ring[queue_num].desc_num; - old_rx_buf_len = priv->ring[queue_num].buf_size; - if (old_tx_desc_num == new_tx_desc_num && - old_rx_desc_num == new_rx_desc_num && - kernel_param->rx_buf_len == old_rx_buf_len) + ret = hns3_set_tx_push(ndev, kernel_param->tx_push); + if (ret) + return ret; + + if (!hns3_is_ringparam_changed(ndev, param, kernel_param, + &old_ringparam, &new_ringparam)) return 0; tmp_rings = hns3_backup_ringparam(priv); if (!tmp_rings) { - netdev_err(ndev, - "backup ring param failed by allocating memory fail\n"); + netdev_err(ndev, "backup ring param failed by allocating memory fail\n"); return -ENOMEM; } netdev_info(ndev, - "Changing Tx/Rx ring depth from %u/%u to %u/%u, Changing rx buffer len from %d to %d\n", - old_tx_desc_num, old_rx_desc_num, - new_tx_desc_num, new_rx_desc_num, - old_rx_buf_len, kernel_param->rx_buf_len); + "Changing Tx/Rx ring depth from %u/%u to %u/%u, Changing rx buffer len from %u to %u\n", + old_ringparam.tx_desc_num, old_ringparam.rx_desc_num, + new_ringparam.tx_desc_num, new_ringparam.rx_desc_num, + old_ringparam.rx_buf_len, new_ringparam.rx_buf_len); if (if_running) ndev->netdev_ops->ndo_stop(ndev); - hns3_change_all_ring_bd_num(priv, new_tx_desc_num, new_rx_desc_num); - hns3_change_rx_buf_len(ndev, kernel_param->rx_buf_len); + hns3_change_all_ring_bd_num(priv, new_ringparam.tx_desc_num, + new_ringparam.rx_desc_num); + hns3_change_rx_buf_len(ndev, new_ringparam.rx_buf_len); ret = hns3_init_all_ring(priv); if (ret) { netdev_err(ndev, "set ringparam fail, revert to old value(%d)\n", ret); - hns3_change_rx_buf_len(ndev, old_rx_buf_len); - hns3_change_all_ring_bd_num(priv, old_tx_desc_num, - old_rx_desc_num); + hns3_change_rx_buf_len(ndev, old_ringparam.rx_buf_len); + hns3_change_all_ring_bd_num(priv, old_ringparam.tx_desc_num, + old_ringparam.rx_desc_num); for (i = 0; i < h->kinfo.num_tqps * 2; i++) memcpy(&priv->ring[i], &tmp_rings[i], sizeof(struct hns3_enet_ring)); @@ -1385,11 +1435,33 @@ static int hns3_check_ql_coalesce_param(struct net_device *netdev, return 0; } -static int hns3_check_coalesce_para(struct net_device *netdev, - struct ethtool_coalesce *cmd) +static int +hns3_check_cqe_coalesce_param(struct net_device *netdev, + struct kernel_ethtool_coalesce *kernel_coal) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); + + if ((kernel_coal->use_cqe_mode_tx || kernel_coal->use_cqe_mode_rx) && + !hnae3_ae_dev_cq_supported(ae_dev)) { + netdev_err(netdev, "coalesced cqe mode is not supported\n"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int +hns3_check_coalesce_para(struct net_device *netdev, + struct ethtool_coalesce *cmd, + struct kernel_ethtool_coalesce *kernel_coal) { int ret; + ret = hns3_check_cqe_coalesce_param(netdev, kernel_coal); + if (ret) + return ret; + ret = hns3_check_gl_coalesce_para(netdev, cmd); if (ret) { netdev_err(netdev, @@ -1464,7 +1536,7 @@ static int hns3_set_coalesce(struct net_device *netdev, if (hns3_nic_resetting(netdev)) return -EBUSY; - ret = hns3_check_coalesce_para(netdev, cmd); + ret = hns3_check_coalesce_para(netdev, cmd, kernel_coal); if (ret) return ret; @@ -1825,23 +1897,27 @@ static int hns3_set_tunable(struct net_device *netdev, case ETHTOOL_TX_COPYBREAK_BUF_SIZE: old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size; new_tx_spare_buf_size = *(u32 *)data; + netdev_info(netdev, "request to set tx spare buf size from %u to %u\n", + old_tx_spare_buf_size, new_tx_spare_buf_size); ret = hns3_set_tx_spare_buf_size(netdev, new_tx_spare_buf_size); if (ret || (!priv->ring->tx_spare && new_tx_spare_buf_size != 0)) { int ret1; - netdev_warn(netdev, - "change tx spare buf size fail, revert to old value\n"); + netdev_warn(netdev, "change tx spare buf size fail, revert to old value\n"); ret1 = hns3_set_tx_spare_buf_size(netdev, old_tx_spare_buf_size); if (ret1) { - netdev_err(netdev, - "revert to old tx spare buf size fail\n"); + netdev_err(netdev, "revert to old tx spare buf size fail\n"); return ret1; } return ret; } + + netdev_info(netdev, "the active tx spare buf size is %u, due to page order\n", + priv->ring->tx_spare->len); + break; default: ret = -EOPNOTSUPP; @@ -1858,7 +1934,8 @@ static int hns3_set_tunable(struct net_device *netdev, ETHTOOL_COALESCE_MAX_FRAMES | \ ETHTOOL_COALESCE_USE_CQE) -#define HNS3_ETHTOOL_RING ETHTOOL_RING_USE_RX_BUF_LEN +#define HNS3_ETHTOOL_RING (ETHTOOL_RING_USE_RX_BUF_LEN | \ + ETHTOOL_RING_USE_TX_PUSH) static int hns3_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h index 822d6fcbc73b..da207d1d9aa9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h @@ -28,4 +28,10 @@ struct hns3_ethtool_link_ext_state_mapping { u8 link_ext_substate; }; +struct hns3_ring_param { + u32 tx_desc_num; + u32 rx_desc_num; + u32 rx_buf_len; +}; + #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index 42a9e73d8588..6efd768cc07c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -1977,7 +1977,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, * @num: number of extended command structures * * This function handles all the PF RAS errors in the - * hw register/s using command. + * hw registers using command. */ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev, struct hclge_desc *desc, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 8cebb180c812..a5dd2c8c244a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -10449,6 +10449,9 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu) /* PF's mps must be greater then VF's mps */ for (i = 1; i < hdev->num_alloc_vport; i++) if (max_frm_size < hdev->vport[i].mps) { + dev_err(&hdev->pdev->dev, + "failed to set pf mtu for less than vport %d, mps = %u.\n", + i, hdev->vport[i].mps); mutex_unlock(&hdev->vport_lock); return -EINVAL; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 6799d16de34b..49c40744cda5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -94,6 +94,13 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, enum hclge_comm_cmd_status status; struct hclge_desc desc; + if (msg_len > HCLGE_MBX_MAX_MSG_SIZE) { + dev_err(&hdev->pdev->dev, + "msg data length(=%u) exceeds maximum(=%u)\n", + msg_len, HCLGE_MBX_MAX_MSG_SIZE); + return -EMSGSIZE; + } + resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false); @@ -102,7 +109,7 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, resp_pf_to_vf->msg_len = msg_len; resp_pf_to_vf->msg.code = mbx_opcode; - memcpy(&resp_pf_to_vf->msg.vf_mbx_msg_code, msg, msg_len); + memcpy(resp_pf_to_vf->msg.msg_data, msg, msg_len); trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf); @@ -176,7 +183,7 @@ static int hclge_get_ring_chain_from_mbx( ring_num = req->msg.ring_num; if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM) - return -ENOMEM; + return -EINVAL; for (i = 0; i < ring_num; i++) { if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) { @@ -587,9 +594,9 @@ static int hclge_set_vf_mtu(struct hclge_vport *vport, return hclge_set_vport_mtu(vport, mtu); } -static void hclge_get_queue_id_in_pf(struct hclge_vport *vport, - struct hclge_mbx_vf_to_pf_cmd *mbx_req, - struct hclge_respond_to_vf_msg *resp_msg) +static int hclge_get_queue_id_in_pf(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + struct hclge_respond_to_vf_msg *resp_msg) { struct hnae3_handle *handle = &vport->nic; struct hclge_dev *hdev = vport->back; @@ -599,17 +606,18 @@ static void hclge_get_queue_id_in_pf(struct hclge_vport *vport, if (queue_id >= handle->kinfo.num_tqps) { dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n", queue_id, mbx_req->mbx_src_vfid); - return; + return -EINVAL; } qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id); memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf)); resp_msg->len = sizeof(qid_in_pf); + return 0; } -static void hclge_get_rss_key(struct hclge_vport *vport, - struct hclge_mbx_vf_to_pf_cmd *mbx_req, - struct hclge_respond_to_vf_msg *resp_msg) +static int hclge_get_rss_key(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + struct hclge_respond_to_vf_msg *resp_msg) { #define HCLGE_RSS_MBX_RESP_LEN 8 struct hclge_dev *hdev = vport->back; @@ -627,13 +635,14 @@ static void hclge_get_rss_key(struct hclge_vport *vport, dev_warn(&hdev->pdev->dev, "failed to get the rss hash key, the index(%u) invalid !\n", index); - return; + return -EINVAL; } memcpy(resp_msg->data, &rss_cfg->rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN], HCLGE_RSS_MBX_RESP_LEN); resp_msg->len = HCLGE_RSS_MBX_RESP_LEN; + return 0; } static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code) @@ -809,10 +818,10 @@ void hclge_mbx_handler(struct hclge_dev *hdev) "VF fail(%d) to set mtu\n", ret); break; case HCLGE_MBX_GET_QID_IN_PF: - hclge_get_queue_id_in_pf(vport, req, &resp_msg); + ret = hclge_get_queue_id_in_pf(vport, req, &resp_msg); break; case HCLGE_MBX_GET_RSS_KEY: - hclge_get_rss_key(vport, req, &resp_msg); + ret = hclge_get_rss_key(vport, req, &resp_msg); break; case HCLGE_MBX_GET_LINK_MODE: hclge_get_link_mode(vport, req); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 342d7cdf6285..e13d71abd9f7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -2963,7 +2963,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_config; } - /* ensure vf tbl list as empty before init*/ + /* ensure vf tbl list as empty before init */ ret = hclgevf_clear_vport_list(hdev); if (ret) { dev_err(&pdev->dev, @@ -3315,7 +3315,7 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, for (i = 0; i < reg_um; i++) *reg++ = hclgevf_read_dev(&hdev->hw, ring_reg_addr_list[i] + - 0x200 * j); + HCLGEVF_TQP_REG_SIZE * j); for (i = 0; i < separator_num; i++) *reg++ = SEPARATOR_VALUE; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index d5e0a3f762f7..c8055d69255c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -17,7 +17,7 @@ static int hclgevf_resp_to_errno(u16 resp_code) static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev) { /* this function should be called with mbx_resp.mbx_mutex held - * to prtect the received_response from race condition + * to protect the received_response from race condition */ hdev->mbx_resp.received_resp = false; hdev->mbx_resp.origin_mbx_msg = 0; @@ -32,8 +32,10 @@ static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev) /* hclgevf_get_mbx_resp: used to get a response from PF after VF sends a mailbox * message to PF. * @hdev: pointer to struct hclgevf_dev - * @resp_msg: pointer to store the original message type and response status - * @len: the resp_msg data array length. + * @code0: the message opcode VF send to PF. + * @code1: the message sub-opcode VF send to PF. + * @resp_data: pointer to store response data from PF to VF. + * @resp_len: the length of resp_data from PF to VF. */ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, u8 *resp_data, u16 resp_len) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 77683909ca3d..3699c5435396 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -257,12 +257,14 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, struct ibmvnic_long_term_buff *ltb, int size) { struct device *dev = &adapter->vdev->dev; + u64 prev = 0; int rc; if (!reuse_ltb(ltb, size)) { dev_dbg(dev, "LTB size changed from 0x%llx to 0x%x, reallocating\n", ltb->size, size); + prev = ltb->size; free_long_term_buff(adapter, ltb); } @@ -283,8 +285,8 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, bitmap_set(adapter->map_ids, ltb->map_id, 1); dev_dbg(dev, - "Allocated new LTB [map %d, size 0x%llx]\n", - ltb->map_id, ltb->size); + "Allocated new LTB [map %d, size 0x%llx was 0x%llx]\n", + ltb->map_id, ltb->size, prev); } /* Ensure ltb is zeroed - specially when reusing it. */ @@ -345,6 +347,208 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter, ltb->map_id = 0; } +/** + * free_ltb_set - free the given set of long term buffers (LTBS) + * @adapter: The ibmvnic adapter containing this ltb set + * @ltb_set: The ltb_set to be freed + * + * Free the set of LTBs in the given set. + */ + +static void free_ltb_set(struct ibmvnic_adapter *adapter, + struct ibmvnic_ltb_set *ltb_set) +{ + int i; + + for (i = 0; i < ltb_set->num_ltbs; i++) + free_long_term_buff(adapter, <b_set->ltbs[i]); + + kfree(ltb_set->ltbs); + ltb_set->ltbs = NULL; + ltb_set->num_ltbs = 0; +} + +/** + * alloc_ltb_set() - Allocate a set of long term buffers (LTBs) + * + * @adapter: ibmvnic adapter associated to the LTB + * @ltb_set: container object for the set of LTBs + * @num_buffs: Number of buffers in the LTB + * @buff_size: Size of each buffer in the LTB + * + * Allocate a set of LTBs to accommodate @num_buffs buffers of @buff_size + * each. We currently cap size each LTB to IBMVNIC_ONE_LTB_SIZE. If the + * new set of LTBs have fewer LTBs than the old set, free the excess LTBs. + * If new set needs more than in old set, allocate the remaining ones. + * Try and reuse as many LTBs as possible and avoid reallocation. + * + * Any changes to this allocation strategy must be reflected in + * map_rxpool_buff_to_ltb() and map_txpool_buff_to_ltb(). + */ +static int alloc_ltb_set(struct ibmvnic_adapter *adapter, + struct ibmvnic_ltb_set *ltb_set, int num_buffs, + int buff_size) +{ + struct device *dev = &adapter->vdev->dev; + struct ibmvnic_ltb_set old_set; + struct ibmvnic_ltb_set new_set; + int rem_size; + int tot_size; /* size of all ltbs */ + int ltb_size; /* size of one ltb */ + int nltbs; + int rc; + int n; + int i; + + dev_dbg(dev, "%s() num_buffs %d, buff_size %d\n", __func__, num_buffs, + buff_size); + + ltb_size = rounddown(IBMVNIC_ONE_LTB_SIZE, buff_size); + tot_size = num_buffs * buff_size; + + if (ltb_size > tot_size) + ltb_size = tot_size; + + nltbs = tot_size / ltb_size; + if (tot_size % ltb_size) + nltbs++; + + old_set = *ltb_set; + + if (old_set.num_ltbs == nltbs) { + new_set = old_set; + } else { + int tmp = nltbs * sizeof(struct ibmvnic_long_term_buff); + + new_set.ltbs = kzalloc(tmp, GFP_KERNEL); + if (!new_set.ltbs) + return -ENOMEM; + + new_set.num_ltbs = nltbs; + + /* Free any excess ltbs in old set */ + for (i = new_set.num_ltbs; i < old_set.num_ltbs; i++) + free_long_term_buff(adapter, &old_set.ltbs[i]); + + /* Copy remaining ltbs to new set. All LTBs except the + * last one are of the same size. alloc_long_term_buff() + * will realloc if the size changes. + */ + n = min(old_set.num_ltbs, new_set.num_ltbs); + for (i = 0; i < n; i++) + new_set.ltbs[i] = old_set.ltbs[i]; + + /* Any additional ltbs in new set will have NULL ltbs for + * now and will be allocated in alloc_long_term_buff(). + */ + + /* We no longer need the old_set so free it. Note that we + * may have reused some ltbs from old set and freed excess + * ltbs above. So we only need to free the container now + * not the LTBs themselves. (i.e. dont free_ltb_set()!) + */ + kfree(old_set.ltbs); + old_set.ltbs = NULL; + old_set.num_ltbs = 0; + + /* Install the new set. If allocations fail below, we will + * retry later and know what size LTBs we need. + */ + *ltb_set = new_set; + } + + i = 0; + rem_size = tot_size; + while (rem_size) { + if (ltb_size > rem_size) + ltb_size = rem_size; + + rem_size -= ltb_size; + + rc = alloc_long_term_buff(adapter, &new_set.ltbs[i], ltb_size); + if (rc) + goto out; + i++; + } + + WARN_ON(i != new_set.num_ltbs); + + return 0; +out: + /* We may have allocated one/more LTBs before failing and we + * want to try and reuse on next reset. So don't free ltb set. + */ + return rc; +} + +/** + * map_rxpool_buf_to_ltb - Map given rxpool buffer to offset in an LTB. + * @rxpool: The receive buffer pool containing buffer + * @bufidx: Index of buffer in rxpool + * @ltbp: (Output) pointer to the long term buffer containing the buffer + * @offset: (Output) offset of buffer in the LTB from @ltbp + * + * Map the given buffer identified by [rxpool, bufidx] to an LTB in the + * pool and its corresponding offset. Assume for now that each LTB is of + * different size but could possibly be optimized based on the allocation + * strategy in alloc_ltb_set(). + */ +static void map_rxpool_buf_to_ltb(struct ibmvnic_rx_pool *rxpool, + unsigned int bufidx, + struct ibmvnic_long_term_buff **ltbp, + unsigned int *offset) +{ + struct ibmvnic_long_term_buff *ltb; + int nbufs; /* # of buffers in one ltb */ + int i; + + WARN_ON(bufidx >= rxpool->size); + + for (i = 0; i < rxpool->ltb_set.num_ltbs; i++) { + ltb = &rxpool->ltb_set.ltbs[i]; + nbufs = ltb->size / rxpool->buff_size; + if (bufidx < nbufs) + break; + bufidx -= nbufs; + } + + *ltbp = ltb; + *offset = bufidx * rxpool->buff_size; +} + +/** + * map_txpool_buf_to_ltb - Map given txpool buffer to offset in an LTB. + * @txpool: The transmit buffer pool containing buffer + * @bufidx: Index of buffer in txpool + * @ltbp: (Output) pointer to the long term buffer (LTB) containing the buffer + * @offset: (Output) offset of buffer in the LTB from @ltbp + * + * Map the given buffer identified by [txpool, bufidx] to an LTB in the + * pool and its corresponding offset. + */ +static void map_txpool_buf_to_ltb(struct ibmvnic_tx_pool *txpool, + unsigned int bufidx, + struct ibmvnic_long_term_buff **ltbp, + unsigned int *offset) +{ + struct ibmvnic_long_term_buff *ltb; + int nbufs; /* # of buffers in one ltb */ + int i; + + WARN_ON_ONCE(bufidx >= txpool->num_buffers); + + for (i = 0; i < txpool->ltb_set.num_ltbs; i++) { + ltb = &txpool->ltb_set.ltbs[i]; + nbufs = ltb->size / txpool->buf_size; + if (bufidx < nbufs) + break; + bufidx -= nbufs; + } + + *ltbp = ltb; + *offset = bufidx * txpool->buf_size; +} + static void deactivate_rx_pools(struct ibmvnic_adapter *adapter) { int i; @@ -361,6 +565,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter, struct device *dev = &adapter->vdev->dev; struct ibmvnic_ind_xmit_queue *ind_bufp; struct ibmvnic_sub_crq_queue *rx_scrq; + struct ibmvnic_long_term_buff *ltb; union sub_crq *sub_crq; int buffers_added = 0; unsigned long lpar_rc; @@ -369,7 +574,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter, dma_addr_t dma_addr; unsigned char *dst; int shift = 0; - int index; + int bufidx; int i; if (!pool->active) @@ -385,14 +590,14 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter, * be 0. */ for (i = ind_bufp->index; i < count; ++i) { - index = pool->free_map[pool->next_free]; + bufidx = pool->free_map[pool->next_free]; /* We maybe reusing the skb from earlier resets. Allocate * only if necessary. But since the LTB may have changed * during reset (see init_rx_pools()), update LTB below * even if reusing skb. */ - skb = pool->rx_buff[index].skb; + skb = pool->rx_buff[bufidx].skb; if (!skb) { skb = netdev_alloc_skb(adapter->netdev, pool->buff_size); @@ -407,26 +612,26 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter, pool->next_free = (pool->next_free + 1) % pool->size; /* Copy the skb to the long term mapped DMA buffer */ - offset = index * pool->buff_size; - dst = pool->long_term_buff.buff + offset; + map_rxpool_buf_to_ltb(pool, bufidx, <b, &offset); + dst = ltb->buff + offset; memset(dst, 0, pool->buff_size); - dma_addr = pool->long_term_buff.addr + offset; + dma_addr = ltb->addr + offset; /* add the skb to an rx_buff in the pool */ - pool->rx_buff[index].data = dst; - pool->rx_buff[index].dma = dma_addr; - pool->rx_buff[index].skb = skb; - pool->rx_buff[index].pool_index = pool->index; - pool->rx_buff[index].size = pool->buff_size; + pool->rx_buff[bufidx].data = dst; + pool->rx_buff[bufidx].dma = dma_addr; + pool->rx_buff[bufidx].skb = skb; + pool->rx_buff[bufidx].pool_index = pool->index; + pool->rx_buff[bufidx].size = pool->buff_size; /* queue the rx_buff for the next send_subcrq_indirect */ sub_crq = &ind_bufp->indir_arr[ind_bufp->index++]; memset(sub_crq, 0, sizeof(*sub_crq)); sub_crq->rx_add.first = IBMVNIC_CRQ_CMD; sub_crq->rx_add.correlator = - cpu_to_be64((u64)&pool->rx_buff[index]); + cpu_to_be64((u64)&pool->rx_buff[bufidx]); sub_crq->rx_add.ioba = cpu_to_be32(dma_addr); - sub_crq->rx_add.map_id = pool->long_term_buff.map_id; + sub_crq->rx_add.map_id = ltb->map_id; /* The length field of the sCRQ is defined to be 24 bits so the * buffer size needs to be left shifted by a byte before it is @@ -466,10 +671,10 @@ failure: sub_crq = &ind_bufp->indir_arr[i]; rx_buff = (struct ibmvnic_rx_buff *) be64_to_cpu(sub_crq->rx_add.correlator); - index = (int)(rx_buff - pool->rx_buff); - pool->free_map[pool->next_free] = index; - dev_kfree_skb_any(pool->rx_buff[index].skb); - pool->rx_buff[index].skb = NULL; + bufidx = (int)(rx_buff - pool->rx_buff); + pool->free_map[pool->next_free] = bufidx; + dev_kfree_skb_any(pool->rx_buff[bufidx].skb); + pool->rx_buff[bufidx].skb = NULL; } adapter->replenish_add_buff_failure += ind_bufp->index; atomic_add(buffers_added, &pool->available); @@ -579,7 +784,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter) kfree(rx_pool->free_map); - free_long_term_buff(adapter, &rx_pool->long_term_buff); + free_ltb_set(adapter, &rx_pool->ltb_set); if (!rx_pool->rx_buff) continue; @@ -724,8 +929,8 @@ update_ltb: dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n", i, rx_pool->size, rx_pool->buff_size); - rc = alloc_long_term_buff(adapter, &rx_pool->long_term_buff, - rx_pool->size * rx_pool->buff_size); + rc = alloc_ltb_set(adapter, &rx_pool->ltb_set, + rx_pool->size, rx_pool->buff_size); if (rc) goto out; @@ -782,7 +987,7 @@ static void release_one_tx_pool(struct ibmvnic_adapter *adapter, { kfree(tx_pool->tx_buff); kfree(tx_pool->free_map); - free_long_term_buff(adapter, &tx_pool->long_term_buff); + free_ltb_set(adapter, &tx_pool->ltb_set); } /** @@ -972,17 +1177,16 @@ update_ltb: for (i = 0; i < num_pools; i++) { struct ibmvnic_tx_pool *tso_pool; struct ibmvnic_tx_pool *tx_pool; - u32 ltb_size; tx_pool = &adapter->tx_pool[i]; - ltb_size = tx_pool->num_buffers * tx_pool->buf_size; - if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, - ltb_size)) - goto out; - dev_dbg(dev, "Updated LTB for tx pool %d [%p, %d, %d]\n", - i, tx_pool->long_term_buff.buff, - tx_pool->num_buffers, tx_pool->buf_size); + dev_dbg(dev, "Updating LTB for tx pool %d [%d, %d]\n", + i, tx_pool->num_buffers, tx_pool->buf_size); + + rc = alloc_ltb_set(adapter, &tx_pool->ltb_set, + tx_pool->num_buffers, tx_pool->buf_size); + if (rc) + goto out; tx_pool->consumer_index = 0; tx_pool->producer_index = 0; @@ -991,14 +1195,14 @@ update_ltb: tx_pool->free_map[j] = j; tso_pool = &adapter->tso_pool[i]; - ltb_size = tso_pool->num_buffers * tso_pool->buf_size; - if (alloc_long_term_buff(adapter, &tso_pool->long_term_buff, - ltb_size)) - goto out; - dev_dbg(dev, "Updated LTB for tso pool %d [%p, %d, %d]\n", - i, tso_pool->long_term_buff.buff, - tso_pool->num_buffers, tso_pool->buf_size); + dev_dbg(dev, "Updating LTB for tso pool %d [%d, %d]\n", + i, tso_pool->num_buffers, tso_pool->buf_size); + + rc = alloc_ltb_set(adapter, &tso_pool->ltb_set, + tso_pool->num_buffers, tso_pool->buf_size); + if (rc) + goto out; tso_pool->consumer_index = 0; tso_pool->producer_index = 0; @@ -1911,6 +2115,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) struct ibmvnic_ind_xmit_queue *ind_bufp; struct ibmvnic_tx_buff *tx_buff = NULL; struct ibmvnic_sub_crq_queue *tx_scrq; + struct ibmvnic_long_term_buff *ltb; struct ibmvnic_tx_pool *tx_pool; unsigned int tx_send_failed = 0; netdev_tx_t ret = NETDEV_TX_OK; @@ -1926,7 +2131,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) unsigned int offset; int num_entries = 1; unsigned char *dst; - int index = 0; + int bufidx = 0; u8 proto = 0; /* If a reset is in progress, drop the packet since @@ -1960,9 +2165,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) else tx_pool = &adapter->tx_pool[queue_num]; - index = tx_pool->free_map[tx_pool->consumer_index]; + bufidx = tx_pool->free_map[tx_pool->consumer_index]; - if (index == IBMVNIC_INVALID_MAP) { + if (bufidx == IBMVNIC_INVALID_MAP) { dev_kfree_skb_any(skb); tx_send_failed++; tx_dropped++; @@ -1973,10 +2178,11 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP; - offset = index * tx_pool->buf_size; - dst = tx_pool->long_term_buff.buff + offset; + map_txpool_buf_to_ltb(tx_pool, bufidx, <b, &offset); + + dst = ltb->buff + offset; memset(dst, 0, tx_pool->buf_size); - data_dma_addr = tx_pool->long_term_buff.addr + offset; + data_dma_addr = ltb->addr + offset; if (skb_shinfo(skb)->nr_frags) { int cur, i; @@ -2003,9 +2209,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_pool->consumer_index = (tx_pool->consumer_index + 1) % tx_pool->num_buffers; - tx_buff = &tx_pool->tx_buff[index]; + tx_buff = &tx_pool->tx_buff[bufidx]; tx_buff->skb = skb; - tx_buff->index = index; + tx_buff->index = bufidx; tx_buff->pool_index = queue_num; memset(&tx_crq, 0, sizeof(tx_crq)); @@ -2017,10 +2223,10 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) if (skb_is_gso(skb)) tx_crq.v1.correlator = - cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK); + cpu_to_be32(bufidx | IBMVNIC_TSO_POOL_MASK); else - tx_crq.v1.correlator = cpu_to_be32(index); - tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id); + tx_crq.v1.correlator = cpu_to_be32(bufidx); + tx_crq.v1.dma_reg = cpu_to_be16(ltb->map_id); tx_crq.v1.sge_len = cpu_to_be32(skb->len); tx_crq.v1.ioba = cpu_to_be64(data_dma_addr); @@ -3210,13 +3416,8 @@ static void ibmvnic_get_ringparam(struct net_device *netdev, { struct ibmvnic_adapter *adapter = netdev_priv(netdev); - if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) { - ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq; - ring->tx_max_pending = adapter->max_tx_entries_per_subcrq; - } else { - ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ; - ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ; - } + ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq; + ring->tx_max_pending = adapter->max_tx_entries_per_subcrq; ring->rx_mini_max_pending = 0; ring->rx_jumbo_max_pending = 0; ring->rx_pending = adapter->req_rx_add_entries_per_subcrq; @@ -3231,23 +3432,21 @@ static int ibmvnic_set_ringparam(struct net_device *netdev, struct netlink_ext_ack *extack) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); - int ret; - ret = 0; + if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq || + ring->tx_pending > adapter->max_tx_entries_per_subcrq) { + netdev_err(netdev, "Invalid request.\n"); + netdev_err(netdev, "Max tx buffers = %llu\n", + adapter->max_rx_add_entries_per_subcrq); + netdev_err(netdev, "Max rx buffers = %llu\n", + adapter->max_tx_entries_per_subcrq); + return -EINVAL; + } + adapter->desired.rx_entries = ring->rx_pending; adapter->desired.tx_entries = ring->tx_pending; - ret = wait_for_reset(adapter); - - if (!ret && - (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending || - adapter->req_tx_entries_per_subcrq != ring->tx_pending)) - netdev_info(netdev, - "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n", - ring->rx_pending, ring->tx_pending, - adapter->req_rx_add_entries_per_subcrq, - adapter->req_tx_entries_per_subcrq); - return ret; + return wait_for_reset(adapter); } static void ibmvnic_get_channels(struct net_device *netdev, @@ -3255,14 +3454,8 @@ static void ibmvnic_get_channels(struct net_device *netdev, { struct ibmvnic_adapter *adapter = netdev_priv(netdev); - if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) { - channels->max_rx = adapter->max_rx_queues; - channels->max_tx = adapter->max_tx_queues; - } else { - channels->max_rx = IBMVNIC_MAX_QUEUES; - channels->max_tx = IBMVNIC_MAX_QUEUES; - } - + channels->max_rx = adapter->max_rx_queues; + channels->max_tx = adapter->max_tx_queues; channels->max_other = 0; channels->max_combined = 0; channels->rx_count = adapter->req_rx_queues; @@ -3275,22 +3468,11 @@ static int ibmvnic_set_channels(struct net_device *netdev, struct ethtool_channels *channels) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); - int ret; - ret = 0; adapter->desired.rx_queues = channels->rx_count; adapter->desired.tx_queues = channels->tx_count; - ret = wait_for_reset(adapter); - - if (!ret && - (adapter->req_rx_queues != channels->rx_count || - adapter->req_tx_queues != channels->tx_count)) - netdev_info(netdev, - "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n", - channels->rx_count, channels->tx_count, - adapter->req_rx_queues, adapter->req_tx_queues); - return ret; + return wait_for_reset(adapter); } static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -3298,43 +3480,32 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) struct ibmvnic_adapter *adapter = netdev_priv(dev); int i; - switch (stringset) { - case ETH_SS_STATS: - for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); - i++, data += ETH_GSTRING_LEN) - memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); + if (stringset != ETH_SS_STATS) + return; - for (i = 0; i < adapter->req_tx_queues; i++) { - snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i); - data += ETH_GSTRING_LEN; + for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN) + memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); - snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i); - data += ETH_GSTRING_LEN; + for (i = 0; i < adapter->req_tx_queues; i++) { + snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i); + data += ETH_GSTRING_LEN; - snprintf(data, ETH_GSTRING_LEN, - "tx%d_dropped_packets", i); - data += ETH_GSTRING_LEN; - } + snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i); + data += ETH_GSTRING_LEN; - for (i = 0; i < adapter->req_rx_queues; i++) { - snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i); - data += ETH_GSTRING_LEN; + snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i); + data += ETH_GSTRING_LEN; + } - snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i); - data += ETH_GSTRING_LEN; + for (i = 0; i < adapter->req_rx_queues; i++) { + snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i); + data += ETH_GSTRING_LEN; - snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i); - data += ETH_GSTRING_LEN; - } - break; + snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i); + data += ETH_GSTRING_LEN; - case ETH_SS_PRIV_FLAGS: - for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++) - strcpy(data + i * ETH_GSTRING_LEN, - ibmvnic_priv_flags[i]); - break; - default: - return; + snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i); + data += ETH_GSTRING_LEN; } } @@ -3347,8 +3518,6 @@ static int ibmvnic_get_sset_count(struct net_device *dev, int sset) return ARRAY_SIZE(ibmvnic_stats) + adapter->req_tx_queues * NUM_TX_STATS + adapter->req_rx_queues * NUM_RX_STATS; - case ETH_SS_PRIV_FLAGS: - return ARRAY_SIZE(ibmvnic_priv_flags); default: return -EOPNOTSUPP; } @@ -3401,26 +3570,6 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, } } -static u32 ibmvnic_get_priv_flags(struct net_device *netdev) -{ - struct ibmvnic_adapter *adapter = netdev_priv(netdev); - - return adapter->priv_flags; -} - -static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags) -{ - struct ibmvnic_adapter *adapter = netdev_priv(netdev); - bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES); - - if (which_maxes) - adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES; - else - adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES; - - return 0; -} - static const struct ethtool_ops ibmvnic_ethtool_ops = { .get_drvinfo = ibmvnic_get_drvinfo, .get_msglevel = ibmvnic_get_msglevel, @@ -3434,8 +3583,6 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = { .get_sset_count = ibmvnic_get_sset_count, .get_ethtool_stats = ibmvnic_get_ethtool_stats, .get_link_ksettings = ibmvnic_get_link_ksettings, - .get_priv_flags = ibmvnic_get_priv_flags, - .set_priv_flags = ibmvnic_set_priv_flags, }; /* Routines for managing CRQs/sCRQs */ @@ -4031,16 +4178,16 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry) adapter->desired.rx_entries = adapter->max_rx_add_entries_per_subcrq; - max_entries = IBMVNIC_MAX_LTB_SIZE / + max_entries = IBMVNIC_LTB_SET_SIZE / (adapter->req_mtu + IBMVNIC_BUFFER_HLEN); if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * - adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) { + adapter->desired.tx_entries > IBMVNIC_LTB_SET_SIZE) { adapter->desired.tx_entries = max_entries; } if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * - adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) { + adapter->desired.rx_entries > IBMVNIC_LTB_SET_SIZE) { adapter->desired.rx_entries = max_entries; } diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 8f5cefb932dd..e5c6ff3d0c47 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -36,16 +36,52 @@ #define IBMVNIC_TSO_BUFS 64 #define IBMVNIC_TSO_POOL_MASK 0x80000000 -#define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE) -#define IBMVNIC_BUFFER_HLEN 500 +/* A VNIC adapter has set of Rx and Tx pools (aka queues). Each Rx/Tx pool + * has a set of buffers. The size of each buffer is determined by the MTU. + * + * Each Rx/Tx pool is also associated with a DMA region that is shared + * with the "hardware" (VIOS) and used to send/receive packets. The DMA + * region is also referred to as a Long Term Buffer or LTB. + * + * The size of the DMA region required for an Rx/Tx pool depends on the + * number and size (MTU) of the buffers in the pool. At the max levels + * of 4096 jumbo frames (MTU=9000) we will need about 9K*4K = 36MB plus + * some padding. + * + * But the size of a single DMA region is limited by MAX_ORDER in the + * kernel (about 16MB currently). To support say 4K Jumbo frames, we + * use a set of LTBs (struct ltb_set) per pool. + * + * IBMVNIC_ONE_LTB_MAX - max size of each LTB supported by kernel + * IBMVNIC_ONE_LTB_SIZE - current max size of each LTB in an ltb_set + * (must be <= IBMVNIC_ONE_LTB_MAX) + * IBMVNIC_LTB_SET_SIZE - current size of all LTBs in an ltb_set + * + * Each VNIC can have upto 16 Rx, 16 Tx and 16 TSO pools. The TSO pools + * are of fixed length (IBMVNIC_TSO_BUF_SZ * IBMVNIC_TSO_BUFS) of 4MB. + * + * The Rx and Tx pools can have upto 4096 buffers. The max size of these + * buffers is about 9588 (for jumbo frames, including IBMVNIC_BUFFER_HLEN). + * So, setting the IBMVNIC_LTB_SET_SIZE for a pool to 4096 * 9588 ~= 38MB. + * + * There is a trade-off in setting IBMVNIC_ONE_LTB_SIZE. If it is large, + * the allocation of the LTB can fail when system is low in memory. If + * its too small, we would need several mappings for each of the Rx/ + * Tx/TSO pools but there is a limit of 255 mappings per vnic in the + * VNIC protocol. + * + * So setting IBMVNIC_ONE_LTB_SIZE to 8MB. With IBMVNIC_LTB_SET_SIZE set + * to 38MB, we will need 5 LTBs per Rx and Tx pool and 1 LTB per TSO + * pool for the 4MB. Thus the 16 Rx and Tx queues require 32 * 5 = 160 + * plus 16 for the TSO pools for a total of 176 LTB mappings per VNIC. + */ +#define IBMVNIC_ONE_LTB_MAX ((u32)((1 << (MAX_ORDER - 1)) * PAGE_SIZE)) +#define IBMVNIC_ONE_LTB_SIZE min((u32)(8 << 20), IBMVNIC_ONE_LTB_MAX) +#define IBMVNIC_LTB_SET_SIZE (38 << 20) +#define IBMVNIC_BUFFER_HLEN 500 #define IBMVNIC_RESET_DELAY 100 -static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = { -#define IBMVNIC_USE_SERVER_MAXES 0x1 - "use-server-maxes" -}; - struct ibmvnic_login_buffer { __be32 len; __be32 version; @@ -798,6 +834,11 @@ struct ibmvnic_long_term_buff { u8 map_id; }; +struct ibmvnic_ltb_set { + int num_ltbs; + struct ibmvnic_long_term_buff *ltbs; +}; + struct ibmvnic_tx_buff { struct sk_buff *skb; int index; @@ -810,7 +851,7 @@ struct ibmvnic_tx_pool { int *free_map; int consumer_index; int producer_index; - struct ibmvnic_long_term_buff long_term_buff; + struct ibmvnic_ltb_set ltb_set; int num_buffers; int buf_size; } ____cacheline_aligned; @@ -833,7 +874,7 @@ struct ibmvnic_rx_pool { int next_free; int next_alloc; int active; - struct ibmvnic_long_term_buff long_term_buff; + struct ibmvnic_ltb_set ltb_set; } ____cacheline_aligned; struct ibmvnic_vpd { @@ -883,7 +924,6 @@ struct ibmvnic_adapter { struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl; dma_addr_t ip_offload_ctrl_tok; u32 msg_enable; - u32 priv_flags; /* Vital Product Data (VPD) */ struct ibmvnic_vpd *vpd; diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index d60e2016d03c..e6c8e6d5234f 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1009,8 +1009,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) { u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) | link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND; - u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */ - u16 lat_enc_d = 0; /* latency decoded */ + u32 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */ + u32 lat_enc_d = 0; /* latency decoded */ u16 lat_enc = 0; /* latency encoded */ if (link) { diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 55c6bce5da61..18558a019353 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -852,6 +852,7 @@ struct i40e_vsi { u64 tx_busy; u64 tx_linearize; u64 tx_force_wb; + u64 tx_stopped; u64 rx_buf_failed; u64 rx_page_failed; u64 rx_page_reuse; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 6aefffd83615..2819e261a126 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -47,6 +47,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_1G_BASE_T_X722: case I40E_DEV_ID_10G_BASE_T_X722: case I40E_DEV_ID_SFP_I_X722: + case I40E_DEV_ID_SFP_X722_A: hw->mac.type = I40E_MAC_X722; break; default: diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index be7c6f34d45c..c9dcd6d92c83 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -309,10 +309,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) tx_ring->stats.bytes, tx_ring->tx_stats.restart_queue); dev_info(&pf->pdev->dev, - " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n", + " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld, tx_stopped = %lld\n", i, tx_ring->tx_stats.tx_busy, - tx_ring->tx_stats.tx_done_old); + tx_ring->tx_stats.tx_done_old, + tx_ring->tx_stats.tx_stopped); dev_info(&pf->pdev->dev, " tx_rings[%i]: size = %i\n", i, tx_ring->size); diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h index 1bcb0ec0f0c0..2610338002fe 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h @@ -33,6 +33,7 @@ #define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 #define I40E_DEV_ID_SFP_I_X722 0x37D3 +#define I40E_DEV_ID_SFP_X722_A 0x0DDA #endif /* _I40E_DEVIDS_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index e48499624d22..610f00cbaff9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -293,12 +293,14 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = { I40E_VSI_STAT("tx_linearize", tx_linearize), I40E_VSI_STAT("tx_force_wb", tx_force_wb), I40E_VSI_STAT("tx_busy", tx_busy), + I40E_VSI_STAT("tx_stopped", tx_stopped), I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed), I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed), I40E_VSI_STAT("rx_cache_reuse", rx_page_reuse), I40E_VSI_STAT("rx_cache_alloc", rx_page_alloc), I40E_VSI_STAT("rx_cache_waive", rx_page_waive), I40E_VSI_STAT("rx_cache_busy", rx_page_busy), + I40E_VSI_STAT("tx_restart", tx_restart), }; /* These PF_STATs might look like duplicates of some NETDEV_STATs, diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 6778df2177a1..358c2edc118d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -77,6 +77,7 @@ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722_A), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0}, @@ -785,6 +786,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) unsigned int start; u64 tx_linearize; u64 tx_force_wb; + u64 tx_stopped; u64 rx_p, rx_b; u64 tx_p, tx_b; u16 q; @@ -804,6 +806,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) rx_b = rx_p = 0; tx_b = tx_p = 0; tx_restart = tx_busy = tx_linearize = tx_force_wb = 0; + tx_stopped = 0; rx_page = 0; rx_buf = 0; rx_reuse = 0; @@ -828,6 +831,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) tx_busy += p->tx_stats.tx_busy; tx_linearize += p->tx_stats.tx_linearize; tx_force_wb += p->tx_stats.tx_force_wb; + tx_stopped += p->tx_stats.tx_stopped; /* locate Rx ring */ p = READ_ONCE(vsi->rx_rings[q]); @@ -872,6 +876,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) vsi->tx_busy = tx_busy; vsi->tx_linearize = tx_linearize; vsi->tx_force_wb = tx_force_wb; + vsi->tx_stopped = tx_stopped; vsi->rx_page_failed = rx_page; vsi->rx_buf_failed = rx_buf; vsi->rx_page_reuse = rx_reuse; @@ -13436,8 +13441,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) np->vsi = vsi; hw_enc_features = NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | + NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_SOFT_FEATURES | NETIF_F_TSO | @@ -13468,6 +13472,23 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) /* record features VLANs can make use of */ netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; +#define I40E_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + + netdev->gso_partial_features = I40E_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_GSO_PARTIAL | + I40E_GSO_PARTIAL_FEATURES; + + netdev->mpls_features |= NETIF_F_SG; + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->mpls_features |= NETIF_F_TSO; + netdev->mpls_features |= NETIF_F_TSO6; + netdev->mpls_features |= I40E_GSO_PARTIAL_FEATURES; + /* enable macvlan offloads */ netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 0eae5858f2fe..7bc1174edf6b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -3,6 +3,7 @@ #include <linux/prefetch.h> #include <linux/bpf_trace.h> +#include <net/mpls.h> #include <net/xdp.h> #include "i40e.h" #include "i40e_trace.h" @@ -3015,6 +3016,7 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, { struct sk_buff *skb = first->skb; u64 cd_cmd, cd_tso_len, cd_mss; + __be16 protocol; union { struct iphdr *v4; struct ipv6hdr *v6; @@ -3026,7 +3028,7 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, unsigned char *hdr; } l4; u32 paylen, l4_offset; - u16 gso_segs, gso_size; + u16 gso_size; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -3039,15 +3041,23 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, if (err < 0) return err; - ip.hdr = skb_network_header(skb); - l4.hdr = skb_transport_header(skb); + protocol = vlan_get_protocol(skb); + + if (eth_p_mpls(protocol)) + ip.hdr = skb_inner_network_header(skb); + else + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); /* initialize outer IP header fields */ if (ip.v4->version == 4) { ip.v4->tot_len = 0; ip.v4->check = 0; + + first->tx_flags |= I40E_TX_FLAGS_TSO; } else { ip.v6->payload_len = 0; + first->tx_flags |= I40E_TX_FLAGS_TSO; } if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | @@ -3100,10 +3110,9 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, /* pull values out of skb_shinfo */ gso_size = skb_shinfo(skb)->gso_size; - gso_segs = skb_shinfo(skb)->gso_segs; /* update GSO size and bytecount with header size */ - first->gso_segs = gso_segs; + first->gso_segs = skb_shinfo(skb)->gso_segs; first->bytecount += (first->gso_segs - 1) * *hdr_len; /* find the field values */ @@ -3187,13 +3196,27 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, unsigned char *exthdr; u32 offset, cmd = 0; __be16 frag_off; + __be16 protocol; u8 l4_proto = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) return 0; - ip.hdr = skb_network_header(skb); - l4.hdr = skb_transport_header(skb); + protocol = vlan_get_protocol(skb); + + if (eth_p_mpls(protocol)) + ip.hdr = skb_inner_network_header(skb); + else + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); + + /* set the tx_flags to indicate the IP protocol type. this is + * required so that checksum header computation below is accurate. + */ + if (ip.v4->version == 4) + *tx_flags |= I40E_TX_FLAGS_IPV4; + else + *tx_flags |= I40E_TX_FLAGS_IPV6; /* compute outer L2 header size */ offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; @@ -3373,6 +3396,8 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) /* Memory barrier before checking head and tail */ smp_mb(); + ++tx_ring->tx_stats.tx_stopped; + /* Check again in a case another CPU has just made room available. */ if (likely(I40E_DESC_UNUSED(tx_ring) < size)) return -EBUSY; @@ -3749,7 +3774,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, struct i40e_tx_buffer *first; u32 td_offset = 0; u32 tx_flags = 0; - __be16 protocol; u32 td_cmd = 0; u8 hdr_len = 0; int tso, count; @@ -3791,15 +3815,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) goto out_drop; - /* obtain protocol of skb */ - protocol = vlan_get_protocol(skb); - - /* setup IPv4/IPv6 offloads */ - if (protocol == htons(ETH_P_IP)) - tx_flags |= I40E_TX_FLAGS_IPV4; - else if (protocol == htons(ETH_P_IPV6)) - tx_flags |= I40E_TX_FLAGS_IPV6; - tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss); if (tso < 0) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index c471c2da313c..41f86e9535a0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -290,6 +290,7 @@ struct i40e_tx_queue_stats { u64 tx_done_old; u64 tx_linearize; u64 tx_force_wb; + u64 tx_stopped; int prev_pkt_ctr; }; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h index 19da3b22160f..8c5118c8baaf 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h @@ -20,6 +20,7 @@ void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val); #define I40E_XDP_CONSUMED BIT(0) #define I40E_XDP_TX BIT(1) #define I40E_XDP_REDIR BIT(2) +#define I40E_XDP_EXIT BIT(3) /* * build_ctob - Builds the Tx descriptor (cmd, offset and type) qword diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index c1d25b0b0ca2..af3e7e6afc85 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -161,9 +161,13 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) if (likely(act == XDP_REDIRECT)) { err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); - if (err) - goto out_failure; - return I40E_XDP_REDIR; + if (!err) + return I40E_XDP_REDIR; + if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) + result = I40E_XDP_EXIT; + else + result = I40E_XDP_CONSUMED; + goto out_failure; } switch (act) { @@ -175,16 +179,16 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) if (result == I40E_XDP_CONSUMED) goto out_failure; break; + case XDP_DROP: + result = I40E_XDP_CONSUMED; + break; default: bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); fallthrough; case XDP_ABORTED: + result = I40E_XDP_CONSUMED; out_failure: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); - fallthrough; /* handle aborts by dropping packet */ - case XDP_DROP: - result = I40E_XDP_CONSUMED; - break; } return result; } @@ -271,7 +275,8 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring, unsigned int *rx_packets, unsigned int *rx_bytes, unsigned int size, - unsigned int xdp_res) + unsigned int xdp_res, + bool *failure) { struct sk_buff *skb; @@ -281,11 +286,15 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring, if (likely(xdp_res == I40E_XDP_REDIR) || xdp_res == I40E_XDP_TX) return; + if (xdp_res == I40E_XDP_EXIT) { + *failure = true; + return; + } + if (xdp_res == I40E_XDP_CONSUMED) { xsk_buff_free(xdp_buff); return; } - if (xdp_res == I40E_XDP_PASS) { /* NB! We are not checking for errors using * i40e_test_staterr with @@ -371,7 +380,9 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) xdp_res = i40e_run_xdp_zc(rx_ring, bi); i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets, - &rx_bytes, size, xdp_res); + &rx_bytes, size, xdp_res, &failure); + if (failure) + break; total_rx_packets += rx_packets; total_rx_bytes += rx_bytes; xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR); @@ -382,7 +393,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask; if (cleaned_count >= I40E_RX_BUFFER_WRITE) - failure = !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count); + failure |= !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count); i40e_finalize_xdp_rx(rx_ring, xdp_xmit); i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); @@ -594,13 +605,13 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) return -ENETDOWN; if (!i40e_enabled_xdp_vsi(vsi)) - return -ENXIO; + return -EINVAL; if (queue_id >= vsi->num_queue_pairs) - return -ENXIO; + return -EINVAL; if (!vsi->xdp_rings[queue_id]->xsk_pool) - return -ENXIO; + return -EINVAL; ring = vsi->xdp_rings[queue_id]; diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 190590d32faf..7dfcf78b57fb 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -2871,7 +2871,6 @@ continue_reset: running = adapter->state == __IAVF_RUNNING; if (running) { - netdev->flags &= ~IFF_UP; netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); adapter->link_up = false; @@ -2988,7 +2987,7 @@ continue_reset: * to __IAVF_RUNNING */ iavf_up_complete(adapter); - netdev->flags |= IFF_UP; + iavf_irq_enable(adapter, true); } else { iavf_change_state(adapter, __IAVF_DOWN); @@ -3004,10 +3003,8 @@ continue_reset: reset_err: mutex_unlock(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); - if (running) { + if (running) iavf_change_state(adapter, __IAVF_RUNNING); - netdev->flags |= IFF_UP; - } dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); iavf_close(netdev); } diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index d4f1874df7d0..8ed3c9ab7ff7 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -301,7 +301,6 @@ enum ice_vsi_state { ICE_VSI_NETDEV_REGISTERED, ICE_VSI_UMAC_FLTR_CHANGED, ICE_VSI_MMAC_FLTR_CHANGED, - ICE_VSI_VLAN_FLTR_CHANGED, ICE_VSI_PROMISC_CHANGED, ICE_VSI_STATE_NBITS /* must be last */ }; @@ -672,7 +671,7 @@ static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev) static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi) { - return !!vsi->xdp_prog; + return !!READ_ONCE(vsi->xdp_prog); } static inline void ice_set_ring_xdp(struct ice_tx_ring *ring) diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c index 5daade32ea62..fba178e07600 100644 --- a/drivers/net/ethernet/intel/ice/ice_arfs.c +++ b/drivers/net/ethernet/intel/ice/ice_arfs.c @@ -577,7 +577,7 @@ void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) { struct net_device *netdev; - if (!vsi || vsi->type != ICE_VSI_PF || !vsi->arfs_fltr_list) + if (!vsi || vsi->type != ICE_VSI_PF) return; netdev = vsi->netdev; @@ -599,7 +599,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi) int base_idx, i; if (!vsi || vsi->type != ICE_VSI_PF) - return -EINVAL; + return 0; pf = vsi->back; netdev = vsi->netdev; @@ -636,7 +636,6 @@ void ice_remove_arfs(struct ice_pf *pf) if (!pf_vsi) return; - ice_free_cpu_rx_rmap(pf_vsi); ice_clear_arfs(pf_vsi); } @@ -653,9 +652,5 @@ void ice_rebuild_arfs(struct ice_pf *pf) return; ice_remove_arfs(pf); - if (ice_set_cpu_rx_rmap(pf_vsi)) { - dev_err(ice_pf_to_dev(pf), "Failed to rebuild aRFS\n"); - return; - } ice_init_arfs(pf_vsi); } diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index 9a84d746a6c4..6a463b242c7d 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -361,7 +361,8 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) np = netdev_priv(netdev); vsi = np->vsi; - if (ice_is_reset_in_progress(vsi->back->state)) + if (ice_is_reset_in_progress(vsi->back->state) || + test_bit(ICE_VF_DIS, vsi->back->state)) return NETDEV_TX_BUSY; repr = ice_netdev_to_repr(netdev); diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h index bd58d9d2e565..6a413331572b 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.h +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h @@ -52,7 +52,7 @@ static inline void ice_eswitch_update_repr(struct ice_vsi *vsi) { } static inline int ice_eswitch_configure(struct ice_pf *pf) { - return -EOPNOTSUPP; + return 0; } static inline int ice_eswitch_rebuild(struct ice_pf *pf) diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c index af57eb114966..85a94483c2ed 100644 --- a/drivers/net/ethernet/intel/ice/ice_fltr.c +++ b/drivers/net/ethernet/intel/ice/ice_fltr.c @@ -58,7 +58,16 @@ int ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, u8 promisc_mask) { - return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false); + struct ice_pf *pf = hw->back; + int result; + + result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false); + if (result) + dev_err(ice_pf_to_dev(pf), + "Error setting promisc mode on VSI %i (rc=%d)\n", + vsi->vsi_num, result); + + return result; } /** @@ -73,7 +82,16 @@ int ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, u8 promisc_mask) { - return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true); + struct ice_pf *pf = hw->back; + int result; + + result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true); + if (result) + dev_err(ice_pf_to_dev(pf), + "Error clearing promisc mode on VSI %i (rc=%d)\n", + vsi->vsi_num, result); + + return result; } /** @@ -87,7 +105,16 @@ int ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid) { - return ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid); + struct ice_pf *pf = hw->back; + int result; + + result = ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid); + if (result) + dev_err(ice_pf_to_dev(pf), + "Error clearing promisc mode on VSI %i for VID %u (rc=%d)\n", + ice_get_hw_vsi_num(hw, vsi_handle), vid, result); + + return result; } /** @@ -101,7 +128,16 @@ int ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid) { - return ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid); + struct ice_pf *pf = hw->back; + int result; + + result = ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid); + if (result) + dev_err(ice_pf_to_dev(pf), + "Error setting promisc mode on VSI %i for VID %u (rc=%d)\n", + ice_get_hw_vsi_num(hw, vsi_handle), vid, result); + + return result; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index b897926f817d..6d19c58ccacd 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1480,6 +1480,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ring->tx_tstamps = &pf->ptp.port.tx; ring->dev = dev; ring->count = vsi->num_tx_desc; + ring->txq_teid = ICE_INVAL_TEID; if (dvm_ena) ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2; else @@ -2688,6 +2689,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi) return; vsi->irqs_ready = false; + ice_free_cpu_rx_rmap(vsi); + ice_for_each_q_vector(vsi, i) { u16 vector = i + base; int irq_num; @@ -2701,7 +2704,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi) continue; /* clear the affinity notifier in the IRQ descriptor */ - irq_set_affinity_notifier(irq_num, NULL); + if (!IS_ENABLED(CONFIG_RFS_ACCEL)) + irq_set_affinity_notifier(irq_num, NULL); /* clear the affinity_mask in the IRQ descriptor */ irq_set_affinity_hint(irq_num, NULL); @@ -2983,6 +2987,8 @@ int ice_vsi_release(struct ice_vsi *vsi) } } + if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) + ice_clear_dflt_vsi(pf->first_sw); ice_fltr_remove_all(vsi); ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index b588d7995631..6d8beb84d852 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -243,8 +243,7 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) { return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) || - test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state) || - test_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); + test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); } /** @@ -260,10 +259,15 @@ static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m) if (vsi->type != ICE_VSI_PF) return 0; - if (ice_vsi_has_non_zero_vlans(vsi)) - status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m); - else - status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0); + if (ice_vsi_has_non_zero_vlans(vsi)) { + promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX); + status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, + promisc_m); + } else { + status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, + promisc_m, 0); + } + return status; } @@ -280,10 +284,15 @@ static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m) if (vsi->type != ICE_VSI_PF) return 0; - if (ice_vsi_has_non_zero_vlans(vsi)) - status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m); - else - status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0); + if (ice_vsi_has_non_zero_vlans(vsi)) { + promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX); + status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, + promisc_m); + } else { + status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, + promisc_m, 0); + } + return status; } @@ -302,7 +311,6 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; u32 changed_flags = 0; - u8 promisc_m; int err; if (!vsi->netdev) @@ -320,7 +328,6 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) if (ice_vsi_fltr_changed(vsi)) { clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); - clear_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); /* grab the netdev's addr_list_lock */ netif_addr_lock_bh(netdev); @@ -369,29 +376,15 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) /* check for changes in promiscuous modes */ if (changed_flags & IFF_ALLMULTI) { if (vsi->current_netdev_flags & IFF_ALLMULTI) { - if (ice_vsi_has_non_zero_vlans(vsi)) - promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; - else - promisc_m = ICE_MCAST_PROMISC_BITS; - - err = ice_set_promisc(vsi, promisc_m); + err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS); if (err) { - netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n", - vsi->vsi_num); vsi->current_netdev_flags &= ~IFF_ALLMULTI; goto out_promisc; } } else { /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */ - if (ice_vsi_has_non_zero_vlans(vsi)) - promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; - else - promisc_m = ICE_MCAST_PROMISC_BITS; - - err = ice_clear_promisc(vsi, promisc_m); + err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS); if (err) { - netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n", - vsi->vsi_num); vsi->current_netdev_flags |= IFF_ALLMULTI; goto out_promisc; } @@ -2517,6 +2510,13 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); } + err = ice_set_cpu_rx_rmap(vsi); + if (err) { + netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n", + vsi->vsi_num, ERR_PTR(err)); + goto free_q_irqs; + } + vsi->irqs_ready = true; return 0; @@ -2569,7 +2569,7 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) spin_lock_init(&xdp_ring->tx_lock); for (j = 0; j < xdp_ring->count; j++) { tx_desc = ICE_TX_DESC(xdp_ring, j); - tx_desc->cmd_type_offset_bsz = cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE); + tx_desc->cmd_type_offset_bsz = 0; } } @@ -2765,8 +2765,10 @@ free_qmap: ice_for_each_xdp_txq(vsi, i) if (vsi->xdp_rings[i]) { - if (vsi->xdp_rings[i]->desc) + if (vsi->xdp_rings[i]->desc) { + synchronize_rcu(); ice_free_tx_ring(vsi->xdp_rings[i]); + } kfree_rcu(vsi->xdp_rings[i], rcu); vsi->xdp_rings[i] = NULL; } @@ -3334,7 +3336,9 @@ static void ice_set_netdev_features(struct net_device *netdev) vlano_features | tso_features; /* add support for HW_CSUM on packets with MPLS header */ - netdev->mpls_features = NETIF_F_HW_CSUM; + netdev->mpls_features = NETIF_F_HW_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6; /* enable features */ netdev->features |= netdev->hw_features; @@ -3488,6 +3492,20 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) if (!vid) return 0; + while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) + usleep_range(1000, 2000); + + /* Add multicast promisc rule for the VLAN ID to be added if + * all-multicast is currently enabled. + */ + if (vsi->current_netdev_flags & IFF_ALLMULTI) { + ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, + ICE_MCAST_VLAN_PROMISC_BITS, + vid); + if (ret) + goto finish; + } + vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged @@ -3495,8 +3513,23 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) */ vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0); ret = vlan_ops->add_vlan(vsi, &vlan); - if (!ret) - set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); + if (ret) + goto finish; + + /* If all-multicast is currently enabled and this VLAN ID is only one + * besides VLAN-0 we have to update look-up type of multicast promisc + * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN. + */ + if ((vsi->current_netdev_flags & IFF_ALLMULTI) && + ice_vsi_num_non_zero_vlans(vsi) == 1) { + ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, + ICE_MCAST_PROMISC_BITS, 0); + ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, + ICE_MCAST_VLAN_PROMISC_BITS, 0); + } + +finish: + clear_bit(ICE_CFG_BUSY, vsi->state); return ret; } @@ -3522,6 +3555,9 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) if (!vid) return 0; + while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) + usleep_range(1000, 2000); + vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); /* Make sure VLAN delete is successful before updating VLAN @@ -3530,10 +3566,33 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0); ret = vlan_ops->del_vlan(vsi, &vlan); if (ret) - return ret; + goto finish; - set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); - return 0; + /* Remove multicast promisc rule for the removed VLAN ID if + * all-multicast is enabled. + */ + if (vsi->current_netdev_flags & IFF_ALLMULTI) + ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, + ICE_MCAST_VLAN_PROMISC_BITS, vid); + + if (!ice_vsi_has_non_zero_vlans(vsi)) { + /* Update look-up type of multicast promisc rule for VLAN 0 + * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when + * all-multicast is enabled and VLAN 0 is the only VLAN rule. + */ + if (vsi->current_netdev_flags & IFF_ALLMULTI) { + ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, + ICE_MCAST_VLAN_PROMISC_BITS, + 0); + ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, + ICE_MCAST_PROMISC_BITS, 0); + } + } + +finish: + clear_bit(ICE_CFG_BUSY, vsi->state); + + return ret; } /** @@ -3642,20 +3701,12 @@ static int ice_setup_pf_sw(struct ice_pf *pf) */ ice_napi_add(vsi); - status = ice_set_cpu_rx_rmap(vsi); - if (status) { - dev_err(dev, "Failed to set CPU Rx map VSI %d error %d\n", - vsi->vsi_num, status); - goto unroll_napi_add; - } status = ice_init_mac_fltr(pf); if (status) - goto free_cpu_rx_map; + goto unroll_napi_add; return 0; -free_cpu_rx_map: - ice_free_cpu_rx_rmap(vsi); unroll_napi_add: ice_tc_indir_block_unregister(vsi); unroll_cfg_netdev: @@ -5117,7 +5168,6 @@ static int __maybe_unused ice_suspend(struct device *dev) continue; ice_vsi_free_q_vectors(pf->vsi[v]); } - ice_free_cpu_rx_rmap(ice_get_main_vsi(pf)); ice_clear_interrupt_scheme(pf); pci_save_state(pdev); @@ -5475,16 +5525,19 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) /* Add filter for new MAC. If filter exists, return success */ err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); - if (err == -EEXIST) + if (err == -EEXIST) { /* Although this MAC filter is already present in hardware it's * possible in some cases (e.g. bonding) that dev_addr was * modified outside of the driver and needs to be restored back * to this value. */ netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac); - else if (err) + + return 0; + } else if (err) { /* error if the new filter addition failed */ err = -EADDRNOTAVAIL; + } err_update_filters: if (err) { @@ -6878,12 +6931,15 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type); +#define ICE_EMP_RESET_SLEEP_MS 5000 if (reset_type == ICE_RESET_EMPR) { /* If an EMP reset has occurred, any previously pending flash * update will have completed. We no longer know whether or * not the NVM update EMP reset is restricted. */ pf->fw_emp_reset_disabled = false; + + msleep(ICE_EMP_RESET_SLEEP_MS); } err = ice_init_all_ctrlq(hw); diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index 4eb0599714f4..13cdb5ea594d 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -641,6 +641,7 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank, status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, 0, orom_data, hw->flash.banks.orom_size); if (status) { + vfree(orom_data); ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM data\n"); return status; } diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index 8915a9d39e36..0c438219f7a3 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -1046,8 +1046,8 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) if (!num_vfs) { if (!pci_vfs_assigned(pdev)) { - ice_mbx_deinit_snapshot(&pf->hw); ice_free_vfs(pf); + ice_mbx_deinit_snapshot(&pf->hw); if (pf->lag) ice_enable_lag(pf->lag); return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 25b8f6f726eb..496250f9f8fc 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -30,12 +30,46 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0, 0x2, 0, 0, 0, 0, 0, 0x81, 0, 0, 0}; +enum { + ICE_PKT_VLAN = BIT(0), + ICE_PKT_OUTER_IPV6 = BIT(1), + ICE_PKT_TUN_GTPC = BIT(2), + ICE_PKT_TUN_GTPU = BIT(3), + ICE_PKT_TUN_NVGRE = BIT(4), + ICE_PKT_TUN_UDP = BIT(5), + ICE_PKT_INNER_IPV6 = BIT(6), + ICE_PKT_INNER_TCP = BIT(7), + ICE_PKT_INNER_UDP = BIT(8), + ICE_PKT_GTP_NOPAY = BIT(9), +}; + struct ice_dummy_pkt_offsets { enum ice_protocol_type type; u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */ }; -static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = { +struct ice_dummy_pkt_profile { + const struct ice_dummy_pkt_offsets *offsets; + const u8 *pkt; + u32 match; + u16 pkt_len; +}; + +#define ICE_DECLARE_PKT_OFFSETS(type) \ + static const struct ice_dummy_pkt_offsets \ + ice_dummy_##type##_packet_offsets[] + +#define ICE_DECLARE_PKT_TEMPLATE(type) \ + static const u8 ice_dummy_##type##_packet[] + +#define ICE_PKT_PROFILE(type, m) { \ + .match = (m), \ + .pkt = ice_dummy_##type##_packet, \ + .pkt_len = sizeof(ice_dummy_##type##_packet), \ + .offsets = ice_dummy_##type##_packet_offsets, \ +} + +ICE_DECLARE_PKT_OFFSETS(gre_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, @@ -47,7 +81,7 @@ static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_gre_tcp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(gre_tcp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -82,7 +116,7 @@ static const u8 dummy_gre_tcp_packet[] = { 0x00, 0x00, 0x00, 0x00 }; -static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(gre_udp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, @@ -94,7 +128,7 @@ static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_gre_udp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(gre_udp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -126,7 +160,7 @@ static const u8 dummy_gre_udp_packet[] = { 0x00, 0x08, 0x00, 0x00, }; -static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(udp_tun_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, @@ -141,7 +175,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_udp_tun_tcp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(udp_tun_tcp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -179,7 +213,7 @@ static const u8 dummy_udp_tun_tcp_packet[] = { 0x00, 0x00, 0x00, 0x00 }; -static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(udp_tun_udp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, @@ -194,7 +228,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_udp_tun_udp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(udp_tun_udp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -229,8 +263,7 @@ static const u8 dummy_udp_tun_udp_packet[] = { 0x00, 0x08, 0x00, 0x00, }; -static const struct ice_dummy_pkt_offsets -dummy_gre_ipv6_tcp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(gre_ipv6_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, @@ -242,7 +275,7 @@ dummy_gre_ipv6_tcp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_gre_ipv6_tcp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_tcp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -282,8 +315,7 @@ static const u8 dummy_gre_ipv6_tcp_packet[] = { 0x00, 0x00, 0x00, 0x00 }; -static const struct ice_dummy_pkt_offsets -dummy_gre_ipv6_udp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(gre_ipv6_udp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, @@ -295,7 +327,7 @@ dummy_gre_ipv6_udp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_gre_ipv6_udp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_udp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -332,8 +364,7 @@ static const u8 dummy_gre_ipv6_udp_packet[] = { 0x00, 0x08, 0x00, 0x00, }; -static const struct ice_dummy_pkt_offsets -dummy_udp_tun_ipv6_tcp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, @@ -348,7 +379,7 @@ dummy_udp_tun_ipv6_tcp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_udp_tun_ipv6_tcp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_tcp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -391,8 +422,7 @@ static const u8 dummy_udp_tun_ipv6_tcp_packet[] = { 0x00, 0x00, 0x00, 0x00 }; -static const struct ice_dummy_pkt_offsets -dummy_udp_tun_ipv6_udp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_udp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, @@ -407,7 +437,7 @@ dummy_udp_tun_ipv6_udp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_udp_tun_ipv6_udp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_udp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -448,7 +478,7 @@ static const u8 dummy_udp_tun_ipv6_udp_packet[] = { }; /* offset info for MAC + IPv4 + UDP dummy packet */ -static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(udp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, @@ -457,7 +487,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = { }; /* Dummy packet for MAC + IPv4 + UDP */ -static const u8 dummy_udp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(udp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -477,7 +507,7 @@ static const u8 dummy_udp_packet[] = { }; /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */ -static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(vlan_udp) = { { ICE_MAC_OFOS, 0 }, { ICE_VLAN_OFOS, 12 }, { ICE_ETYPE_OL, 16 }, @@ -487,7 +517,7 @@ static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = { }; /* C-tag (801.1Q), IPv4:UDP dummy packet */ -static const u8 dummy_vlan_udp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(vlan_udp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -509,7 +539,7 @@ static const u8 dummy_vlan_udp_packet[] = { }; /* offset info for MAC + IPv4 + TCP dummy packet */ -static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, @@ -518,7 +548,7 @@ static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = { }; /* Dummy packet for MAC + IPv4 + TCP */ -static const u8 dummy_tcp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(tcp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -541,7 +571,7 @@ static const u8 dummy_tcp_packet[] = { }; /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */ -static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(vlan_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_VLAN_OFOS, 12 }, { ICE_ETYPE_OL, 16 }, @@ -551,7 +581,7 @@ static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = { }; /* C-tag (801.1Q), IPv4:TCP dummy packet */ -static const u8 dummy_vlan_tcp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(vlan_tcp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -575,7 +605,7 @@ static const u8 dummy_vlan_tcp_packet[] = { 0x00, 0x00, /* 2 bytes for 4 byte alignment */ }; -static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(tcp_ipv6) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV6_OFOS, 14 }, @@ -583,7 +613,7 @@ static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_tcp_ipv6_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(tcp_ipv6) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -611,8 +641,7 @@ static const u8 dummy_tcp_ipv6_packet[] = { }; /* C-tag (802.1Q): IPv6 + TCP */ -static const struct ice_dummy_pkt_offsets -dummy_vlan_tcp_ipv6_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(vlan_tcp_ipv6) = { { ICE_MAC_OFOS, 0 }, { ICE_VLAN_OFOS, 12 }, { ICE_ETYPE_OL, 16 }, @@ -622,7 +651,7 @@ dummy_vlan_tcp_ipv6_packet_offsets[] = { }; /* C-tag (802.1Q), IPv6 + TCP dummy packet */ -static const u8 dummy_vlan_tcp_ipv6_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(vlan_tcp_ipv6) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -652,7 +681,7 @@ static const u8 dummy_vlan_tcp_ipv6_packet[] = { }; /* IPv6 + UDP */ -static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(udp_ipv6) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV6_OFOS, 14 }, @@ -661,7 +690,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = { }; /* IPv6 + UDP dummy packet */ -static const u8 dummy_udp_ipv6_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(udp_ipv6) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -689,8 +718,7 @@ static const u8 dummy_udp_ipv6_packet[] = { }; /* C-tag (802.1Q): IPv6 + UDP */ -static const struct ice_dummy_pkt_offsets -dummy_vlan_udp_ipv6_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(vlan_udp_ipv6) = { { ICE_MAC_OFOS, 0 }, { ICE_VLAN_OFOS, 12 }, { ICE_ETYPE_OL, 16 }, @@ -700,7 +728,7 @@ dummy_vlan_udp_ipv6_packet_offsets[] = { }; /* C-tag (802.1Q), IPv6 + UDP dummy packet */ -static const u8 dummy_vlan_udp_ipv6_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(vlan_udp_ipv6) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -727,8 +755,7 @@ static const u8 dummy_vlan_udp_ipv6_packet[] = { }; /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */ -static const -struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_IPV4_OFOS, 14 }, { ICE_UDP_OF, 34 }, @@ -738,7 +765,7 @@ struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_tcp) = { 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -776,8 +803,7 @@ static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = { }; /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */ -static const -struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_udp) = { { ICE_MAC_OFOS, 0 }, { ICE_IPV4_OFOS, 14 }, { ICE_UDP_OF, 34 }, @@ -787,7 +813,7 @@ struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_udp) = { 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -822,8 +848,7 @@ static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = { }; /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */ -static const -struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_IPV4_OFOS, 14 }, { ICE_UDP_OF, 34 }, @@ -833,7 +858,7 @@ struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_tcp) = { 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -875,8 +900,7 @@ static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = { 0x00, 0x00, /* 2 bytes for 4 byte alignment */ }; -static const -struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_udp) = { { ICE_MAC_OFOS, 0 }, { ICE_IPV4_OFOS, 14 }, { ICE_UDP_OF, 34 }, @@ -886,7 +910,7 @@ struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_udp) = { 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -925,8 +949,7 @@ static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = { 0x00, 0x00, /* 2 bytes for 4 byte alignment */ }; -static const -struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_IPV6_OFOS, 14 }, { ICE_UDP_OF, 54 }, @@ -936,7 +959,7 @@ struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_tcp) = { 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -978,8 +1001,7 @@ static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = { 0x00, 0x00, /* 2 bytes for 4 byte alignment */ }; -static const -struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_udp) = { { ICE_MAC_OFOS, 0 }, { ICE_IPV6_OFOS, 14 }, { ICE_UDP_OF, 54 }, @@ -989,7 +1011,7 @@ struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_udp) = { 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -1028,8 +1050,7 @@ static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = { 0x00, 0x00, /* 2 bytes for 4 byte alignment */ }; -static const -struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_IPV6_OFOS, 14 }, { ICE_UDP_OF, 54 }, @@ -1039,7 +1060,7 @@ struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_tcp) = { 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -1086,8 +1107,7 @@ static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = { 0x00, 0x00, /* 2 bytes for 4 byte alignment */ }; -static const -struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_udp) = { { ICE_MAC_OFOS, 0 }, { ICE_IPV6_OFOS, 14 }, { ICE_UDP_OF, 54 }, @@ -1097,7 +1117,7 @@ struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_udp) = { 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -1141,7 +1161,15 @@ static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = { 0x00, 0x00, /* 2 bytes for 4 byte alignment */ }; -static const u8 dummy_ipv4_gtpu_ipv4_packet[] = { +ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4) = { + { ICE_MAC_OFOS, 0 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_UDP_OF, 34 }, + { ICE_GTP_NO_PAY, 42 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -1171,17 +1199,7 @@ static const u8 dummy_ipv4_gtpu_ipv4_packet[] = { 0x00, 0x00, }; -static const -struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = { - { ICE_MAC_OFOS, 0 }, - { ICE_IPV4_OFOS, 14 }, - { ICE_UDP_OF, 34 }, - { ICE_GTP_NO_PAY, 42 }, - { ICE_PROTOCOL_LAST, 0 }, -}; - -static const -struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(ipv6_gtp) = { { ICE_MAC_OFOS, 0 }, { ICE_IPV6_OFOS, 14 }, { ICE_UDP_OF, 54 }, @@ -1189,7 +1207,7 @@ struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_ipv6_gtp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -1215,6 +1233,55 @@ static const u8 dummy_ipv6_gtp_packet[] = { 0x00, 0x00, }; +static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = { + ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPU | ICE_PKT_OUTER_IPV6 | + ICE_PKT_GTP_NOPAY), + ICE_PKT_PROFILE(ipv6_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU | + ICE_PKT_OUTER_IPV6 | + ICE_PKT_INNER_IPV6 | + ICE_PKT_INNER_UDP), + ICE_PKT_PROFILE(ipv6_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU | + ICE_PKT_OUTER_IPV6 | + ICE_PKT_INNER_IPV6), + ICE_PKT_PROFILE(ipv6_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU | + ICE_PKT_OUTER_IPV6 | + ICE_PKT_INNER_UDP), + ICE_PKT_PROFILE(ipv6_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU | + ICE_PKT_OUTER_IPV6), + ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPU | ICE_PKT_GTP_NOPAY), + ICE_PKT_PROFILE(ipv4_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU | + ICE_PKT_INNER_IPV6 | + ICE_PKT_INNER_UDP), + ICE_PKT_PROFILE(ipv4_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU | + ICE_PKT_INNER_IPV6), + ICE_PKT_PROFILE(ipv4_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU | + ICE_PKT_INNER_UDP), + ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU), + ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6), + ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC), + ICE_PKT_PROFILE(gre_ipv6_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6 | + ICE_PKT_INNER_TCP), + ICE_PKT_PROFILE(gre_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_TCP), + ICE_PKT_PROFILE(gre_ipv6_udp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6), + ICE_PKT_PROFILE(gre_udp, ICE_PKT_TUN_NVGRE), + ICE_PKT_PROFILE(udp_tun_ipv6_tcp, ICE_PKT_TUN_UDP | + ICE_PKT_INNER_IPV6 | + ICE_PKT_INNER_TCP), + ICE_PKT_PROFILE(udp_tun_tcp, ICE_PKT_TUN_UDP | ICE_PKT_INNER_TCP), + ICE_PKT_PROFILE(udp_tun_ipv6_udp, ICE_PKT_TUN_UDP | + ICE_PKT_INNER_IPV6), + ICE_PKT_PROFILE(udp_tun_udp, ICE_PKT_TUN_UDP), + ICE_PKT_PROFILE(vlan_udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP | + ICE_PKT_VLAN), + ICE_PKT_PROFILE(udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP), + ICE_PKT_PROFILE(vlan_udp, ICE_PKT_INNER_UDP | ICE_PKT_VLAN), + ICE_PKT_PROFILE(udp, ICE_PKT_INNER_UDP), + ICE_PKT_PROFILE(vlan_tcp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_VLAN), + ICE_PKT_PROFILE(tcp_ipv6, ICE_PKT_OUTER_IPV6), + ICE_PKT_PROFILE(vlan_tcp, ICE_PKT_VLAN), + ICE_PKT_PROFILE(tcp, 0), +}; + #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \ (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \ (DUMMY_ETH_HDR_LEN * \ @@ -5501,212 +5568,66 @@ err_free_lkup_exts: * structure per protocol header * @lkups_cnt: number of protocols * @tun_type: tunnel type - * @pkt: dummy packet to fill according to filter match criteria - * @pkt_len: packet length of dummy packet - * @offsets: pointer to receive the pointer to the offsets for the packet + * + * Returns the &ice_dummy_pkt_profile corresponding to these lookup params. */ -static void +static const struct ice_dummy_pkt_profile * ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, - enum ice_sw_tunnel_type tun_type, - const u8 **pkt, u16 *pkt_len, - const struct ice_dummy_pkt_offsets **offsets) + enum ice_sw_tunnel_type tun_type) { - bool inner_tcp = false, inner_udp = false, outer_ipv6 = false; - bool vlan = false, inner_ipv6 = false, gtp_no_pay = false; + const struct ice_dummy_pkt_profile *ret = ice_dummy_pkt_profiles; + u32 match = 0; u16 i; + switch (tun_type) { + case ICE_SW_TUN_GTPC: + match |= ICE_PKT_TUN_GTPC; + break; + case ICE_SW_TUN_GTPU: + match |= ICE_PKT_TUN_GTPU; + break; + case ICE_SW_TUN_NVGRE: + match |= ICE_PKT_TUN_NVGRE; + break; + case ICE_SW_TUN_GENEVE: + case ICE_SW_TUN_VXLAN: + match |= ICE_PKT_TUN_UDP; + break; + default: + break; + } + for (i = 0; i < lkups_cnt; i++) { if (lkups[i].type == ICE_UDP_ILOS) - inner_udp = true; + match |= ICE_PKT_INNER_UDP; else if (lkups[i].type == ICE_TCP_IL) - inner_tcp = true; + match |= ICE_PKT_INNER_TCP; else if (lkups[i].type == ICE_IPV6_OFOS) - outer_ipv6 = true; + match |= ICE_PKT_OUTER_IPV6; else if (lkups[i].type == ICE_VLAN_OFOS) - vlan = true; + match |= ICE_PKT_VLAN; else if (lkups[i].type == ICE_ETYPE_OL && lkups[i].h_u.ethertype.ethtype_id == cpu_to_be16(ICE_IPV6_ETHER_ID) && lkups[i].m_u.ethertype.ethtype_id == cpu_to_be16(0xFFFF)) - outer_ipv6 = true; + match |= ICE_PKT_OUTER_IPV6; else if (lkups[i].type == ICE_ETYPE_IL && lkups[i].h_u.ethertype.ethtype_id == cpu_to_be16(ICE_IPV6_ETHER_ID) && lkups[i].m_u.ethertype.ethtype_id == cpu_to_be16(0xFFFF)) - inner_ipv6 = true; + match |= ICE_PKT_INNER_IPV6; else if (lkups[i].type == ICE_IPV6_IL) - inner_ipv6 = true; + match |= ICE_PKT_INNER_IPV6; else if (lkups[i].type == ICE_GTP_NO_PAY) - gtp_no_pay = true; + match |= ICE_PKT_GTP_NOPAY; } - if (tun_type == ICE_SW_TUN_GTPU) { - if (outer_ipv6) { - if (gtp_no_pay) { - *pkt = dummy_ipv6_gtp_packet; - *pkt_len = sizeof(dummy_ipv6_gtp_packet); - *offsets = dummy_ipv6_gtp_no_pay_packet_offsets; - } else if (inner_ipv6) { - if (inner_udp) { - *pkt = dummy_ipv6_gtpu_ipv6_udp_packet; - *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_udp_packet); - *offsets = dummy_ipv6_gtpu_ipv6_udp_packet_offsets; - } else { - *pkt = dummy_ipv6_gtpu_ipv6_tcp_packet; - *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_tcp_packet); - *offsets = dummy_ipv6_gtpu_ipv6_tcp_packet_offsets; - } - } else { - if (inner_udp) { - *pkt = dummy_ipv6_gtpu_ipv4_udp_packet; - *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_udp_packet); - *offsets = dummy_ipv6_gtpu_ipv4_udp_packet_offsets; - } else { - *pkt = dummy_ipv6_gtpu_ipv4_tcp_packet; - *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_tcp_packet); - *offsets = dummy_ipv6_gtpu_ipv4_tcp_packet_offsets; - } - } - } else { - if (gtp_no_pay) { - *pkt = dummy_ipv4_gtpu_ipv4_packet; - *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet); - *offsets = dummy_ipv4_gtp_no_pay_packet_offsets; - } else if (inner_ipv6) { - if (inner_udp) { - *pkt = dummy_ipv4_gtpu_ipv6_udp_packet; - *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_udp_packet); - *offsets = dummy_ipv4_gtpu_ipv6_udp_packet_offsets; - } else { - *pkt = dummy_ipv4_gtpu_ipv6_tcp_packet; - *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_tcp_packet); - *offsets = dummy_ipv4_gtpu_ipv6_tcp_packet_offsets; - } - } else { - if (inner_udp) { - *pkt = dummy_ipv4_gtpu_ipv4_udp_packet; - *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_udp_packet); - *offsets = dummy_ipv4_gtpu_ipv4_udp_packet_offsets; - } else { - *pkt = dummy_ipv4_gtpu_ipv4_tcp_packet; - *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_tcp_packet); - *offsets = dummy_ipv4_gtpu_ipv4_tcp_packet_offsets; - } - } - } - return; - } - - if (tun_type == ICE_SW_TUN_GTPC) { - if (outer_ipv6) { - *pkt = dummy_ipv6_gtp_packet; - *pkt_len = sizeof(dummy_ipv6_gtp_packet); - *offsets = dummy_ipv6_gtp_no_pay_packet_offsets; - } else { - *pkt = dummy_ipv4_gtpu_ipv4_packet; - *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet); - *offsets = dummy_ipv4_gtp_no_pay_packet_offsets; - } - return; - } - - if (tun_type == ICE_SW_TUN_NVGRE) { - if (inner_tcp && inner_ipv6) { - *pkt = dummy_gre_ipv6_tcp_packet; - *pkt_len = sizeof(dummy_gre_ipv6_tcp_packet); - *offsets = dummy_gre_ipv6_tcp_packet_offsets; - return; - } - if (inner_tcp) { - *pkt = dummy_gre_tcp_packet; - *pkt_len = sizeof(dummy_gre_tcp_packet); - *offsets = dummy_gre_tcp_packet_offsets; - return; - } - if (inner_ipv6) { - *pkt = dummy_gre_ipv6_udp_packet; - *pkt_len = sizeof(dummy_gre_ipv6_udp_packet); - *offsets = dummy_gre_ipv6_udp_packet_offsets; - return; - } - *pkt = dummy_gre_udp_packet; - *pkt_len = sizeof(dummy_gre_udp_packet); - *offsets = dummy_gre_udp_packet_offsets; - return; - } - - if (tun_type == ICE_SW_TUN_VXLAN || - tun_type == ICE_SW_TUN_GENEVE) { - if (inner_tcp && inner_ipv6) { - *pkt = dummy_udp_tun_ipv6_tcp_packet; - *pkt_len = sizeof(dummy_udp_tun_ipv6_tcp_packet); - *offsets = dummy_udp_tun_ipv6_tcp_packet_offsets; - return; - } - if (inner_tcp) { - *pkt = dummy_udp_tun_tcp_packet; - *pkt_len = sizeof(dummy_udp_tun_tcp_packet); - *offsets = dummy_udp_tun_tcp_packet_offsets; - return; - } - if (inner_ipv6) { - *pkt = dummy_udp_tun_ipv6_udp_packet; - *pkt_len = sizeof(dummy_udp_tun_ipv6_udp_packet); - *offsets = dummy_udp_tun_ipv6_udp_packet_offsets; - return; - } - *pkt = dummy_udp_tun_udp_packet; - *pkt_len = sizeof(dummy_udp_tun_udp_packet); - *offsets = dummy_udp_tun_udp_packet_offsets; - return; - } + while (ret->match && (match & ret->match) != ret->match) + ret++; - if (inner_udp && !outer_ipv6) { - if (vlan) { - *pkt = dummy_vlan_udp_packet; - *pkt_len = sizeof(dummy_vlan_udp_packet); - *offsets = dummy_vlan_udp_packet_offsets; - return; - } - *pkt = dummy_udp_packet; - *pkt_len = sizeof(dummy_udp_packet); - *offsets = dummy_udp_packet_offsets; - return; - } else if (inner_udp && outer_ipv6) { - if (vlan) { - *pkt = dummy_vlan_udp_ipv6_packet; - *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet); - *offsets = dummy_vlan_udp_ipv6_packet_offsets; - return; - } - *pkt = dummy_udp_ipv6_packet; - *pkt_len = sizeof(dummy_udp_ipv6_packet); - *offsets = dummy_udp_ipv6_packet_offsets; - return; - } else if ((inner_tcp && outer_ipv6) || outer_ipv6) { - if (vlan) { - *pkt = dummy_vlan_tcp_ipv6_packet; - *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet); - *offsets = dummy_vlan_tcp_ipv6_packet_offsets; - return; - } - *pkt = dummy_tcp_ipv6_packet; - *pkt_len = sizeof(dummy_tcp_ipv6_packet); - *offsets = dummy_tcp_ipv6_packet_offsets; - return; - } - - if (vlan) { - *pkt = dummy_vlan_tcp_packet; - *pkt_len = sizeof(dummy_vlan_tcp_packet); - *offsets = dummy_vlan_tcp_packet_offsets; - } else { - *pkt = dummy_tcp_packet; - *pkt_len = sizeof(dummy_tcp_packet); - *offsets = dummy_tcp_packet_offsets; - } + return ret; } /** @@ -5716,15 +5637,12 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, * structure per protocol header * @lkups_cnt: number of protocols * @s_rule: stores rule information from the match criteria - * @dummy_pkt: dummy packet to fill according to filter match criteria - * @pkt_len: packet length of dummy packet - * @offsets: offset info for the dummy packet + * @profile: dummy packet profile (the template, its size and header offsets) */ static int ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, struct ice_aqc_sw_rules_elem *s_rule, - const u8 *dummy_pkt, u16 pkt_len, - const struct ice_dummy_pkt_offsets *offsets) + const struct ice_dummy_pkt_profile *profile) { u8 *pkt; u16 i; @@ -5734,9 +5652,10 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, */ pkt = s_rule->pdata.lkup_tx_rx.hdr; - memcpy(pkt, dummy_pkt, pkt_len); + memcpy(pkt, profile->pkt, profile->pkt_len); for (i = 0; i < lkups_cnt; i++) { + const struct ice_dummy_pkt_offsets *offsets = profile->offsets; enum ice_protocol_type type; u16 offset = 0, len = 0, j; bool found = false; @@ -5810,16 +5729,18 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, * indicated by the mask to make sure we don't improperly write * over any significant packet data. */ - for (j = 0; j < len / sizeof(u16); j++) - if (((u16 *)&lkups[i].m_u)[j]) - ((u16 *)(pkt + offset))[j] = - (((u16 *)(pkt + offset))[j] & - ~((u16 *)&lkups[i].m_u)[j]) | - (((u16 *)&lkups[i].h_u)[j] & - ((u16 *)&lkups[i].m_u)[j]); + for (j = 0; j < len / sizeof(u16); j++) { + u16 *ptr = (u16 *)(pkt + offset); + u16 mask = lkups[i].m_raw[j]; + + if (!mask) + continue; + + ptr[j] = (ptr[j] & ~mask) | (lkups[i].h_raw[j] & mask); + } } - s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(pkt_len); + s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(profile->pkt_len); return 0; } @@ -6042,12 +5963,11 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, struct ice_rule_query_data *added_entry) { struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL; - u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle; - const struct ice_dummy_pkt_offsets *pkt_offsets; struct ice_aqc_sw_rules_elem *s_rule = NULL; + const struct ice_dummy_pkt_profile *profile; + u16 rid = 0, i, rule_buf_sz, vsi_handle; struct list_head *rule_head; struct ice_switch_info *sw; - const u8 *pkt = NULL; u16 word_cnt; u32 act = 0; int status; @@ -6065,24 +5985,18 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, /* get # of words we need to match */ word_cnt = 0; for (i = 0; i < lkups_cnt; i++) { - u16 j, *ptr; + u16 j; - ptr = (u16 *)&lkups[i].m_u; - for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++) - if (ptr[j] != 0) + for (j = 0; j < ARRAY_SIZE(lkups->m_raw); j++) + if (lkups[i].m_raw[j]) word_cnt++; } if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS) return -EINVAL; - /* make sure that we can locate a dummy packet */ - ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len, - &pkt_offsets); - if (!pkt) { - status = -EINVAL; - goto err_ice_add_adv_rule; - } + /* locate a dummy packet */ + profile = ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type); if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI || rinfo->sw_act.fltr_act == ICE_FWD_TO_Q || @@ -6123,7 +6037,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, } return status; } - rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len; + rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + profile->pkt_len; s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); if (!s_rule) return -ENOMEM; @@ -6183,8 +6097,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(rid); s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act); - status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt, - pkt_len, pkt_offsets); + status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, profile); if (status) goto err_ice_add_adv_rule; @@ -6192,7 +6105,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) { status = ice_fill_adv_packet_tun(hw, rinfo->tun_type, s_rule->pdata.lkup_tx_rx.hdr, - pkt_offsets); + profile->offsets); if (status) goto err_ice_add_adv_rule; } diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index ed3d1d03befa..ecac75e71395 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -138,8 +138,16 @@ struct ice_update_recipe_lkup_idx_params { struct ice_adv_lkup_elem { enum ice_protocol_type type; - union ice_prot_hdr h_u; /* Header values */ - union ice_prot_hdr m_u; /* Mask of header values to match */ + union { + union ice_prot_hdr h_u; /* Header values */ + /* Used to iterate over the headers */ + u16 h_raw[sizeof(union ice_prot_hdr) / sizeof(u16)]; + }; + union { + union ice_prot_hdr m_u; /* Mask of header values to match */ + /* Used to iterate over header mask */ + u16 m_raw[sizeof(union ice_prot_hdr) / sizeof(u16)]; + }; }; struct ice_sw_act_ctrl { diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index f9bf008471c9..3f8b7274ed2f 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -8,6 +8,7 @@ #include <linux/prefetch.h> #include <linux/bpf_trace.h> #include <net/dsfield.h> +#include <net/mpls.h> #include <net/xdp.h> #include "ice_txrx_lib.h" #include "ice_lib.h" @@ -1748,18 +1749,24 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) if (skb->ip_summed != CHECKSUM_PARTIAL) return 0; - ip.hdr = skb_network_header(skb); - l4.hdr = skb_transport_header(skb); + protocol = vlan_get_protocol(skb); + + if (eth_p_mpls(protocol)) + ip.hdr = skb_inner_network_header(skb); + else + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); /* compute outer L2 header size */ l2_len = ip.hdr - skb->data; offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S; - protocol = vlan_get_protocol(skb); - - if (protocol == htons(ETH_P_IP)) + /* set the tx_flags to indicate the IP protocol type. this is + * required so that checksum header computation below is accurate. + */ + if (ip.v4->version == 4) first->tx_flags |= ICE_TX_FLAGS_IPV4; - else if (protocol == htons(ETH_P_IPV6)) + else if (ip.v6->version == 6) first->tx_flags |= ICE_TX_FLAGS_IPV6; if (skb->encapsulation) { @@ -1957,6 +1964,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) unsigned char *hdr; } l4; u64 cd_mss, cd_tso_len; + __be16 protocol; u32 paylen; u8 l4_start; int err; @@ -1972,8 +1980,13 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) return err; /* cppcheck-suppress unreadVariable */ - ip.hdr = skb_network_header(skb); - l4.hdr = skb_transport_header(skb); + protocol = vlan_get_protocol(skb); + + if (eth_p_mpls(protocol)) + ip.hdr = skb_inner_network_header(skb); + else + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); /* initialize outer IP header fields */ if (ip.v4->version == 4) { diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index cead3eb149bd..f5a906c03669 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -133,6 +133,7 @@ static inline int ice_skb_pad(void) #define ICE_XDP_CONSUMED BIT(0) #define ICE_XDP_TX BIT(1) #define ICE_XDP_REDIR BIT(2) +#define ICE_XDP_EXIT BIT(3) #define ICE_RX_DMA_ATTR \ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index 3f1a63815bac..b72606c9e6d0 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -1358,9 +1358,9 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) goto error_param; } - /* Skip queue if not enabled */ if (!test_bit(vf_q_id, vf->txq_ena)) - continue; + dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n", + vf_q_id, vsi->vsi_num); ice_fill_txq_meta(vsi, ring, &txq_meta); @@ -3625,6 +3625,8 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) return; } + mutex_lock(&vf->cfg_lock); + /* Check if VF is disabled. */ if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) { err = -EPERM; @@ -3642,32 +3644,20 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) err = -EINVAL; } - if (!ice_vc_is_opcode_allowed(vf, v_opcode)) { - ice_vc_send_msg_to_vf(vf, v_opcode, - VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL, - 0); - ice_put_vf(vf); - return; - } - error_handler: if (err) { ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM, NULL, 0); dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n", vf_id, v_opcode, msglen, err); - ice_put_vf(vf); - return; + goto finish; } - /* VF is being configured in another context that triggers a VFR, so no - * need to process this message - */ - if (!mutex_trylock(&vf->cfg_lock)) { - dev_info(dev, "VF %u is being configured in another context that will trigger a VFR, so there is no need to handle this message\n", - vf->vf_id); - ice_put_vf(vf); - return; + if (!ice_vc_is_opcode_allowed(vf, v_opcode)) { + ice_vc_send_msg_to_vf(vf, v_opcode, + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL, + 0); + goto finish; } switch (v_opcode) { @@ -3780,6 +3770,7 @@ error_handler: vf_id, v_opcode, err); } +finish: mutex_unlock(&vf->cfg_lock); ice_put_vf(vf); } diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index dfbcaf08520e..49ba8bfdbf04 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -41,8 +41,10 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx) static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx) { ice_clean_tx_ring(vsi->tx_rings[q_idx]); - if (ice_is_xdp_ena_vsi(vsi)) + if (ice_is_xdp_ena_vsi(vsi)) { + synchronize_rcu(); ice_clean_tx_ring(vsi->xdp_rings[q_idx]); + } ice_clean_rx_ring(vsi->rx_rings[q_idx]); } @@ -413,8 +415,8 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp, */ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count) { + u32 nb_buffs_extra = 0, nb_buffs = 0; union ice_32b_rx_flex_desc *rx_desc; - u32 nb_buffs_extra = 0, nb_buffs; u16 ntu = rx_ring->next_to_use; u16 total_count = count; struct xdp_buff **xdp; @@ -426,6 +428,10 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count) nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, rx_desc, rx_ring->count - ntu); + if (nb_buffs_extra != rx_ring->count - ntu) { + ntu += nb_buffs_extra; + goto exit; + } rx_desc = ICE_RX_DESC(rx_ring, 0); xdp = ice_xdp_buf(rx_ring, 0); ntu = 0; @@ -439,6 +445,7 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count) if (ntu == rx_ring->count) ntu = 0; +exit: if (rx_ring->next_to_use != ntu) ice_release_rx_desc(rx_ring, ntu); @@ -538,9 +545,13 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, if (likely(act == XDP_REDIRECT)) { err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); - if (err) - goto out_failure; - return ICE_XDP_REDIR; + if (!err) + return ICE_XDP_REDIR; + if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) + result = ICE_XDP_EXIT; + else + result = ICE_XDP_CONSUMED; + goto out_failure; } switch (act) { @@ -551,15 +562,16 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, if (result == ICE_XDP_CONSUMED) goto out_failure; break; + case XDP_DROP: + result = ICE_XDP_CONSUMED; + break; default: bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); fallthrough; case XDP_ABORTED: + result = ICE_XDP_CONSUMED; out_failure: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); - fallthrough; - case XDP_DROP: - result = ICE_XDP_CONSUMED; break; } @@ -580,6 +592,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) unsigned int xdp_xmit = 0; struct bpf_prog *xdp_prog; bool failure = false; + int entries_to_alloc; /* ZC patch is enabled only when XDP program is set, * so here it can not be NULL @@ -627,18 +640,23 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool); xdp_res = ice_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring); - if (xdp_res) { - if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) - xdp_xmit |= xdp_res; - else - xsk_buff_free(xdp); + if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) { + xdp_xmit |= xdp_res; + } else if (xdp_res == ICE_XDP_EXIT) { + failure = true; + break; + } else if (xdp_res == ICE_XDP_CONSUMED) { + xsk_buff_free(xdp); + } else if (xdp_res == ICE_XDP_PASS) { + goto construct_skb; + } - total_rx_bytes += size; - total_rx_packets++; + total_rx_bytes += size; + total_rx_packets++; + + ice_bump_ntc(rx_ring); + continue; - ice_bump_ntc(rx_ring); - continue; - } construct_skb: /* XDP_PASS path */ skb = ice_construct_skb_zc(rx_ring, xdp); @@ -666,7 +684,9 @@ construct_skb: ice_receive_skb(rx_ring, skb, vlan_tag); } - failure = !ice_alloc_rx_bufs_zc(rx_ring, ICE_DESC_UNUSED(rx_ring)); + entries_to_alloc = ICE_DESC_UNUSED(rx_ring); + if (entries_to_alloc > ICE_RING_QUARTER(rx_ring)) + failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc); ice_finalize_xdp_rx(xdp_ring, xdp_xmit); ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes); @@ -918,17 +938,17 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, struct ice_vsi *vsi = np->vsi; struct ice_tx_ring *ring; - if (test_bit(ICE_DOWN, vsi->state)) + if (test_bit(ICE_VSI_DOWN, vsi->state)) return -ENETDOWN; if (!ice_is_xdp_ena_vsi(vsi)) - return -ENXIO; + return -EINVAL; if (queue_id >= vsi->num_txq) - return -ENXIO; + return -EINVAL; if (!vsi->xdp_rings[queue_id]->xsk_pool) - return -ENXIO; + return -EINVAL; ring = vsi->xdp_rings[queue_id]; diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c index 66ea566488d1..59d5c467ea6e 100644 --- a/drivers/net/ethernet/intel/igc/igc_i225.c +++ b/drivers/net/ethernet/intel/igc/igc_i225.c @@ -156,8 +156,15 @@ void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask) { u32 swfw_sync; - while (igc_get_hw_semaphore_i225(hw)) - ; /* Empty */ + /* Releasing the resource requires first getting the HW semaphore. + * If we fail to get the semaphore, there is nothing we can do, + * except log an error and quit. We are not allowed to hang here + * indefinitely, as it may cause denial of service or system crash. + */ + if (igc_get_hw_semaphore_i225(hw)) { + hw_dbg("Failed to release SW_FW_SYNC.\n"); + return; + } swfw_sync = rd32(IGC_SW_FW_SYNC); swfw_sync &= ~mask; diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c index 40dbf4b43234..6961f65d36b9 100644 --- a/drivers/net/ethernet/intel/igc/igc_phy.c +++ b/drivers/net/ethernet/intel/igc/igc_phy.c @@ -581,7 +581,7 @@ static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data) * the lower time out */ for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) { - usleep_range(500, 1000); + udelay(50); mdic = rd32(IGC_MDIC); if (mdic & IGC_MDIC_READY) break; @@ -638,7 +638,7 @@ static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data) * the lower time out */ for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) { - usleep_range(500, 1000); + udelay(50); mdic = rd32(IGC_MDIC); if (mdic & IGC_MDIC_READY) break; diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 0d6e3215e98f..653e9f1e35b5 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -992,6 +992,17 @@ static void igc_ptp_time_restore(struct igc_adapter *adapter) igc_ptp_write_i225(adapter, &ts); } +static void igc_ptm_stop(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 ctrl; + + ctrl = rd32(IGC_PTM_CTRL); + ctrl &= ~IGC_PTM_CTRL_EN; + + wr32(IGC_PTM_CTRL, ctrl); +} + /** * igc_ptp_suspend - Disable PTP work items and prepare for suspend * @adapter: Board private structure @@ -1009,8 +1020,10 @@ void igc_ptp_suspend(struct igc_adapter *adapter) adapter->ptp_tx_skb = NULL; clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); - if (pci_device_is_present(adapter->pdev)) + if (pci_device_is_present(adapter->pdev)) { igc_ptp_time_save(adapter); + igc_ptm_stop(adapter); + } } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index e596e1a9fc75..69d11ff7677d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -903,7 +903,8 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) /* Tx IPsec offload doesn't seem to work on this * device, so block these requests for now. */ - if (!(sam->flags & XFRM_OFFLOAD_INBOUND)) { + sam->flags = sam->flags & ~XFRM_OFFLOAD_IPV6; + if (sam->flags != XFRM_OFFLOAD_INBOUND) { err = -EOPNOTSUPP; goto err_out; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h index bba3feaf3318..f1f69ce67420 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h @@ -8,6 +8,7 @@ #define IXGBE_XDP_CONSUMED BIT(0) #define IXGBE_XDP_TX BIT(1) #define IXGBE_XDP_REDIR BIT(2) +#define IXGBE_XDP_EXIT BIT(3) #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ IXGBE_TXD_CMD_RS) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index dd7ff66d422f..1703c640a434 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -109,9 +109,13 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, if (likely(act == XDP_REDIRECT)) { err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); - if (err) - goto out_failure; - return IXGBE_XDP_REDIR; + if (!err) + return IXGBE_XDP_REDIR; + if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) + result = IXGBE_XDP_EXIT; + else + result = IXGBE_XDP_CONSUMED; + goto out_failure; } switch (act) { @@ -130,16 +134,16 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, if (result == IXGBE_XDP_CONSUMED) goto out_failure; break; + case XDP_DROP: + result = IXGBE_XDP_CONSUMED; + break; default: bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); fallthrough; case XDP_ABORTED: + result = IXGBE_XDP_CONSUMED; out_failure: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); - fallthrough; /* handle aborts by dropping packet */ - case XDP_DROP: - result = IXGBE_XDP_CONSUMED; - break; } return result; } @@ -303,21 +307,26 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool); xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp); - if (xdp_res) { - if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) - xdp_xmit |= xdp_res; - else - xsk_buff_free(bi->xdp); + if (likely(xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))) { + xdp_xmit |= xdp_res; + } else if (xdp_res == IXGBE_XDP_EXIT) { + failure = true; + break; + } else if (xdp_res == IXGBE_XDP_CONSUMED) { + xsk_buff_free(bi->xdp); + } else if (xdp_res == IXGBE_XDP_PASS) { + goto construct_skb; + } - bi->xdp = NULL; - total_rx_packets++; - total_rx_bytes += size; + bi->xdp = NULL; + total_rx_packets++; + total_rx_bytes += size; - cleaned_count++; - ixgbe_inc_ntc(rx_ring); - continue; - } + cleaned_count++; + ixgbe_inc_ntc(rx_ring); + continue; +construct_skb: /* XDP_PASS path */ skb = ixgbe_construct_skb_zc(rx_ring, bi->xdp); if (!skb) { @@ -516,10 +525,10 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) return -ENETDOWN; if (!READ_ONCE(adapter->xdp_prog)) - return -ENXIO; + return -EINVAL; if (qid >= adapter->num_xdp_queues) - return -ENXIO; + return -EINVAL; ring = adapter->xdp_ring[qid]; @@ -527,7 +536,7 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) return -ENETDOWN; if (!ring->xsk_pool) - return -ENXIO; + return -EINVAL; if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) { u64 eics = BIT_ULL(ring->q_vector->v_idx); diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index fe0989c0fc25..f58a1c0144ba 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -62,6 +62,7 @@ config MVNETA select MVMDIO select PHYLINK select PAGE_POOL + select PAGE_POOL_STATS help This driver supports the network interface units in the Marvell ARMADA XP, ARMADA 370, ARMADA 38x and @@ -177,6 +178,7 @@ config SKY2_DEBUG source "drivers/net/ethernet/marvell/octeontx2/Kconfig" +source "drivers/net/ethernet/marvell/octeon_ep/Kconfig" source "drivers/net/ethernet/marvell/prestera/Kconfig" endif # NET_VENDOR_MARVELL diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile index 9f88fe822555..ceba4aa4f026 100644 --- a/drivers/net/ethernet/marvell/Makefile +++ b/drivers/net/ethernet/marvell/Makefile @@ -11,5 +11,6 @@ obj-$(CONFIG_MVPP2) += mvpp2/ obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o obj-$(CONFIG_SKGE) += skge.o obj-$(CONFIG_SKY2) += sky2.o +obj-y += octeon_ep/ obj-y += octeontx2/ obj-y += prestera/ diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 5f9ab1842d49..c18801490649 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2751,7 +2751,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, } ret = of_get_mac_address(pnp, ppd.mac_addr); - if (ret) + if (ret == -EPROBE_DEFER) return ret; mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size); diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 934f6dd90992..f6a54c7f0c69 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -4735,6 +4735,9 @@ static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset, for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) memcpy(data + i * ETH_GSTRING_LEN, mvneta_statistics[i].name, ETH_GSTRING_LEN); + + data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics); + page_pool_ethtool_stats_get_strings(data); } } @@ -4847,6 +4850,17 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp) } } +static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data) +{ + struct page_pool_stats stats = {}; + int i; + + for (i = 0; i < rxq_number; i++) + page_pool_get_stats(pp->rxqs[i].page_pool, &stats); + + page_pool_ethtool_stats_get(data, &stats); +} + static void mvneta_ethtool_get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { @@ -4857,12 +4871,16 @@ static void mvneta_ethtool_get_stats(struct net_device *dev, for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) *data++ = pp->ethtool_stats[i]; + + mvneta_ethtool_pp_stats(pp, data); } static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset) { if (sset == ETH_SS_STATS) - return ARRAY_SIZE(mvneta_statistics); + return ARRAY_SIZE(mvneta_statistics) + + page_pool_ethtool_stats_get_count(); + return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/marvell/octeon_ep/Kconfig b/drivers/net/ethernet/marvell/octeon_ep/Kconfig new file mode 100644 index 000000000000..0d7db815340e --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/Kconfig @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Marvell's Octeon PCI Endpoint NIC Driver Configuration +# + +config OCTEON_EP + tristate "Marvell Octeon PCI Endpoint NIC Driver" + depends on 64BIT + depends on PCI + depends on PTP_1588_CLOCK_OPTIONAL + help + This driver supports networking functionality of Marvell's + Octeon PCI Endpoint NIC. + + To know the list of devices supported by this driver, refer + documentation in + <file:Documentation/networking/device_drivers/ethernet/marvell/octeon_ep.rst>. + + To compile this drivers as a module, choose M here. Name of the + module is octeon_ep. diff --git a/drivers/net/ethernet/marvell/octeon_ep/Makefile b/drivers/net/ethernet/marvell/octeon_ep/Makefile new file mode 100644 index 000000000000..2026c8118158 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Network driver for Marvell's Octeon PCI Endpoint NIC +# + +obj-$(CONFIG_OCTEON_EP) += octeon_ep.o + +octeon_ep-y := octep_main.o octep_cn9k_pf.o octep_tx.o octep_rx.o \ + octep_ethtool.o octep_ctrl_mbox.o octep_ctrl_net.o diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c new file mode 100644 index 000000000000..6ad88d0fe43f --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c @@ -0,0 +1,737 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> + +#include "octep_config.h" +#include "octep_main.h" +#include "octep_regs_cn9k_pf.h" + +/* Names of Hardware non-queue generic interrupts */ +static char *cn93_non_ioq_msix_names[] = { + "epf_ire_rint", + "epf_ore_rint", + "epf_vfire_rint0", + "epf_vfire_rint1", + "epf_vfore_rint0", + "epf_vfore_rint1", + "epf_mbox_rint0", + "epf_mbox_rint1", + "epf_oei_rint", + "epf_dma_rint", + "epf_dma_vf_rint0", + "epf_dma_vf_rint1", + "epf_pp_vf_rint0", + "epf_pp_vf_rint1", + "epf_misc_rint", + "epf_rsvd", +}; + +/* Dump useful hardware CSRs for debug purpose */ +static void cn93_dump_regs(struct octep_device *oct, int qno) +{ + struct device *dev = &oct->pdev->dev; + + dev_info(dev, "IQ-%d register dump\n", qno); + dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_IN_INSTR_DBELL(qno), + octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(qno))); + dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_IN_CONTROL(qno), + octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(qno))); + dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_IN_ENABLE(qno), + octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(qno))); + dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_IN_INSTR_BADDR(qno), + octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(qno))); + dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_IN_INSTR_RSIZE(qno), + octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(qno))); + dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_IN_CNTS(qno), + octep_read_csr64(oct, CN93_SDP_R_IN_CNTS(qno))); + dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_IN_INT_LEVELS(qno), + octep_read_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(qno))); + dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_IN_PKT_CNT(qno), + octep_read_csr64(oct, CN93_SDP_R_IN_PKT_CNT(qno))); + dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_IN_BYTE_CNT(qno), + octep_read_csr64(oct, CN93_SDP_R_IN_BYTE_CNT(qno))); + + dev_info(dev, "OQ-%d register dump\n", qno); + dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_OUT_SLIST_DBELL(qno), + octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(qno))); + dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_OUT_CONTROL(qno), + octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(qno))); + dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_OUT_ENABLE(qno), + octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(qno))); + dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_OUT_SLIST_BADDR(qno), + octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_BADDR(qno))); + dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_OUT_SLIST_RSIZE(qno), + octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_RSIZE(qno))); + dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_OUT_CNTS(qno), + octep_read_csr64(oct, CN93_SDP_R_OUT_CNTS(qno))); + dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_OUT_INT_LEVELS(qno), + octep_read_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(qno))); + dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_OUT_PKT_CNT(qno), + octep_read_csr64(oct, CN93_SDP_R_OUT_PKT_CNT(qno))); + dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_OUT_BYTE_CNT(qno), + octep_read_csr64(oct, CN93_SDP_R_OUT_BYTE_CNT(qno))); + dev_info(dev, "R[%d]_ERR_TYPE[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_ERR_TYPE(qno), + octep_read_csr64(oct, CN93_SDP_R_ERR_TYPE(qno))); +} + +/* Reset Hardware Tx queue */ +static int cn93_reset_iq(struct octep_device *oct, int q_no) +{ + struct octep_config *conf = oct->conf; + u64 val = 0ULL; + + dev_dbg(&oct->pdev->dev, "Reset PF IQ-%d\n", q_no); + + /* Get absolute queue number */ + q_no += conf->pf_ring_cfg.srn; + + /* Disable the Tx/Instruction Ring */ + octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(q_no), val); + + /* clear the Instruction Ring packet/byte counts and doorbell CSRs */ + octep_write_csr64(oct, CN93_SDP_R_IN_CNTS(q_no), val); + octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(q_no), val); + octep_write_csr64(oct, CN93_SDP_R_IN_PKT_CNT(q_no), val); + octep_write_csr64(oct, CN93_SDP_R_IN_BYTE_CNT(q_no), val); + octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(q_no), val); + octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(q_no), val); + + val = 0xFFFFFFFF; + octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(q_no), val); + + return 0; +} + +/* Reset Hardware Rx queue */ +static void cn93_reset_oq(struct octep_device *oct, int q_no) +{ + u64 val = 0ULL; + + q_no += CFG_GET_PORTS_PF_SRN(oct->conf); + + /* Disable Output (Rx) Ring */ + octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(q_no), val); + + /* Clear count CSRs */ + val = octep_read_csr(oct, CN93_SDP_R_OUT_CNTS(q_no)); + octep_write_csr(oct, CN93_SDP_R_OUT_CNTS(q_no), val); + + octep_write_csr64(oct, CN93_SDP_R_OUT_PKT_CNT(q_no), 0xFFFFFFFFFULL); + octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(q_no), 0xFFFFFFFF); +} + +/* Reset all hardware Tx/Rx queues */ +static void octep_reset_io_queues_cn93_pf(struct octep_device *oct) +{ + struct pci_dev *pdev = oct->pdev; + int q; + + dev_dbg(&pdev->dev, "Reset OCTEP_CN93 PF IO Queues\n"); + + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { + cn93_reset_iq(oct, q); + cn93_reset_oq(oct, q); + } +} + +/* Initialize windowed addresses to access some hardware registers */ +static void octep_setup_pci_window_regs_cn93_pf(struct octep_device *oct) +{ + u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr; + + oct->pci_win_regs.pci_win_wr_addr = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_WR_ADDR64); + oct->pci_win_regs.pci_win_rd_addr = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_RD_ADDR64); + oct->pci_win_regs.pci_win_wr_data = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_WR_DATA64); + oct->pci_win_regs.pci_win_rd_data = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_RD_DATA64); +} + +/* Configure Hardware mapping: inform hardware which rings belong to PF. */ +static void octep_configure_ring_mapping_cn93_pf(struct octep_device *oct) +{ + struct octep_config *conf = oct->conf; + struct pci_dev *pdev = oct->pdev; + u64 pf_srn = CFG_GET_PORTS_PF_SRN(oct->conf); + int q; + + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(conf); q++) { + u64 regval = 0; + + if (oct->pcie_port) + regval = 8 << CN93_SDP_FUNC_SEL_EPF_BIT_POS; + + octep_write_csr64(oct, CN93_SDP_EPVF_RING(pf_srn + q), regval); + + regval = octep_read_csr64(oct, CN93_SDP_EPVF_RING(pf_srn + q)); + dev_dbg(&pdev->dev, "Write SDP_EPVF_RING[0x%llx] = 0x%llx\n", + CN93_SDP_EPVF_RING(pf_srn + q), regval); + } +} + +/* Initialize configuration limits and initial active config 93xx PF. */ +static void octep_init_config_cn93_pf(struct octep_device *oct) +{ + struct octep_config *conf = oct->conf; + struct pci_dev *pdev = oct->pdev; + u64 val; + + /* Read ring configuration: + * PF ring count, number of VFs and rings per VF supported + */ + val = octep_read_csr64(oct, CN93_SDP_EPF_RINFO); + conf->sriov_cfg.max_rings_per_vf = CN93_SDP_EPF_RINFO_RPVF(val); + conf->sriov_cfg.active_rings_per_vf = conf->sriov_cfg.max_rings_per_vf; + conf->sriov_cfg.max_vfs = CN93_SDP_EPF_RINFO_NVFS(val); + conf->sriov_cfg.active_vfs = conf->sriov_cfg.max_vfs; + conf->sriov_cfg.vf_srn = CN93_SDP_EPF_RINFO_SRN(val); + + val = octep_read_csr64(oct, CN93_SDP_MAC_PF_RING_CTL(oct->pcie_port)); + conf->pf_ring_cfg.srn = CN93_SDP_MAC_PF_RING_CTL_SRN(val); + conf->pf_ring_cfg.max_io_rings = CN93_SDP_MAC_PF_RING_CTL_RPPF(val); + conf->pf_ring_cfg.active_io_rings = conf->pf_ring_cfg.max_io_rings; + dev_info(&pdev->dev, "pf_srn=%u rpvf=%u nvfs=%u rppf=%u\n", + conf->pf_ring_cfg.srn, conf->sriov_cfg.active_rings_per_vf, + conf->sriov_cfg.active_vfs, conf->pf_ring_cfg.active_io_rings); + + conf->iq.num_descs = OCTEP_IQ_MAX_DESCRIPTORS; + conf->iq.instr_type = OCTEP_64BYTE_INSTR; + conf->iq.pkind = 0; + conf->iq.db_min = OCTEP_DB_MIN; + conf->iq.intr_threshold = OCTEP_IQ_INTR_THRESHOLD; + + conf->oq.num_descs = OCTEP_OQ_MAX_DESCRIPTORS; + conf->oq.buf_size = OCTEP_OQ_BUF_SIZE; + conf->oq.refill_threshold = OCTEP_OQ_REFILL_THRESHOLD; + conf->oq.oq_intr_pkt = OCTEP_OQ_INTR_PKT_THRESHOLD; + conf->oq.oq_intr_time = OCTEP_OQ_INTR_TIME_THRESHOLD; + + conf->msix_cfg.non_ioq_msix = CN93_NUM_NON_IOQ_INTR; + conf->msix_cfg.ioq_msix = conf->pf_ring_cfg.active_io_rings; + conf->msix_cfg.non_ioq_msix_names = cn93_non_ioq_msix_names; + + conf->ctrl_mbox_cfg.barmem_addr = (void __iomem *)oct->mmio[2].hw_addr + (0x400000ull * 7); +} + +/* Setup registers for a hardware Tx Queue */ +static void octep_setup_iq_regs_cn93_pf(struct octep_device *oct, int iq_no) +{ + struct octep_iq *iq = oct->iq[iq_no]; + u32 reset_instr_cnt; + u64 reg_val; + + iq_no += CFG_GET_PORTS_PF_SRN(oct->conf); + reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no)); + + /* wait for IDLE to set to 1 */ + if (!(reg_val & CN93_R_IN_CTL_IDLE)) { + do { + reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no)); + } while (!(reg_val & CN93_R_IN_CTL_IDLE)); + } + + reg_val |= CN93_R_IN_CTL_RDSIZE; + reg_val |= CN93_R_IN_CTL_IS_64B; + reg_val |= CN93_R_IN_CTL_ESR; + octep_write_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no), reg_val); + + /* Write the start of the input queue's ring and its size */ + octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(iq_no), + iq->desc_ring_dma); + octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(iq_no), + iq->max_count); + + /* Remember the doorbell & instruction count register addr + * for this queue + */ + iq->doorbell_reg = oct->mmio[0].hw_addr + + CN93_SDP_R_IN_INSTR_DBELL(iq_no); + iq->inst_cnt_reg = oct->mmio[0].hw_addr + + CN93_SDP_R_IN_CNTS(iq_no); + iq->intr_lvl_reg = oct->mmio[0].hw_addr + + CN93_SDP_R_IN_INT_LEVELS(iq_no); + + /* Store the current instruction counter (used in flush_iq calculation) */ + reset_instr_cnt = readl(iq->inst_cnt_reg); + writel(reset_instr_cnt, iq->inst_cnt_reg); + + /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */ + reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & 0xffffffff; + octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no), reg_val); +} + +/* Setup registers for a hardware Rx Queue */ +static void octep_setup_oq_regs_cn93_pf(struct octep_device *oct, int oq_no) +{ + u64 reg_val; + u64 oq_ctl = 0ULL; + u32 time_threshold = 0; + struct octep_oq *oq = oct->oq[oq_no]; + + oq_no += CFG_GET_PORTS_PF_SRN(oct->conf); + reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no)); + + /* wait for IDLE to set to 1 */ + if (!(reg_val & CN93_R_OUT_CTL_IDLE)) { + do { + reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no)); + } while (!(reg_val & CN93_R_OUT_CTL_IDLE)); + } + + reg_val &= ~(CN93_R_OUT_CTL_IMODE); + reg_val &= ~(CN93_R_OUT_CTL_ROR_P); + reg_val &= ~(CN93_R_OUT_CTL_NSR_P); + reg_val &= ~(CN93_R_OUT_CTL_ROR_I); + reg_val &= ~(CN93_R_OUT_CTL_NSR_I); + reg_val &= ~(CN93_R_OUT_CTL_ES_I); + reg_val &= ~(CN93_R_OUT_CTL_ROR_D); + reg_val &= ~(CN93_R_OUT_CTL_NSR_D); + reg_val &= ~(CN93_R_OUT_CTL_ES_D); + reg_val |= (CN93_R_OUT_CTL_ES_P); + + octep_write_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no), reg_val); + octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_BADDR(oq_no), + oq->desc_ring_dma); + octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_RSIZE(oq_no), + oq->max_count); + + oq_ctl = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no)); + oq_ctl &= ~0x7fffffULL; //clear the ISIZE and BSIZE (22-0) + oq_ctl |= (oq->buffer_size & 0xffff); //populate the BSIZE (15-0) + octep_write_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no), oq_ctl); + + /* Get the mapped address of the pkt_sent and pkts_credit regs */ + oq->pkts_sent_reg = oct->mmio[0].hw_addr + CN93_SDP_R_OUT_CNTS(oq_no); + oq->pkts_credit_reg = oct->mmio[0].hw_addr + + CN93_SDP_R_OUT_SLIST_DBELL(oq_no); + + time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf); + reg_val = ((u64)time_threshold << 32) | + CFG_GET_OQ_INTR_PKT(oct->conf); + octep_write_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no), reg_val); +} + +/* Setup registers for a PF mailbox */ +static void octep_setup_mbox_regs_cn93_pf(struct octep_device *oct, int q_no) +{ + struct octep_mbox *mbox = oct->mbox[q_no]; + + mbox->q_no = q_no; + + /* PF mbox interrupt reg */ + mbox->mbox_int_reg = oct->mmio[0].hw_addr + CN93_SDP_EPF_MBOX_RINT(0); + + /* PF to VF DATA reg. PF writes into this reg */ + mbox->mbox_write_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_PF_VF_DATA(q_no); + + /* VF to PF DATA reg. PF reads from this reg */ + mbox->mbox_read_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_VF_PF_DATA(q_no); +} + +/* Mailbox Interrupt handler */ +static void cn93_handle_pf_mbox_intr(struct octep_device *oct) +{ + u64 mbox_int_val = 0ULL, val = 0ULL, qno = 0ULL; + + mbox_int_val = readq(oct->mbox[0]->mbox_int_reg); + for (qno = 0; qno < OCTEP_MAX_VF; qno++) { + val = readq(oct->mbox[qno]->mbox_read_reg); + dev_dbg(&oct->pdev->dev, + "PF MBOX READ: val:%llx from VF:%llx\n", val, qno); + } + + writeq(mbox_int_val, oct->mbox[0]->mbox_int_reg); +} + +/* Interrupts handler for all non-queue generic interrupts. */ +static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev) +{ + struct octep_device *oct = (struct octep_device *)dev; + struct pci_dev *pdev = oct->pdev; + u64 reg_val = 0; + int i = 0; + + /* Check for IRERR INTR */ + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_IRERR_RINT); + if (reg_val) { + dev_info(&pdev->dev, + "received IRERR_RINT intr: 0x%llx\n", reg_val); + octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT, reg_val); + + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { + reg_val = octep_read_csr64(oct, + CN93_SDP_R_ERR_TYPE(i)); + if (reg_val) { + dev_info(&pdev->dev, + "Received err type on IQ-%d: 0x%llx\n", + i, reg_val); + octep_write_csr64(oct, CN93_SDP_R_ERR_TYPE(i), + reg_val); + } + } + goto irq_handled; + } + + /* Check for ORERR INTR */ + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_ORERR_RINT); + if (reg_val) { + dev_info(&pdev->dev, + "Received ORERR_RINT intr: 0x%llx\n", reg_val); + octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT, reg_val); + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { + reg_val = octep_read_csr64(oct, CN93_SDP_R_ERR_TYPE(i)); + if (reg_val) { + dev_info(&pdev->dev, + "Received err type on OQ-%d: 0x%llx\n", + i, reg_val); + octep_write_csr64(oct, CN93_SDP_R_ERR_TYPE(i), + reg_val); + } + } + + goto irq_handled; + } + + /* Check for VFIRE INTR */ + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0)); + if (reg_val) { + dev_info(&pdev->dev, + "Received VFIRE_RINT intr: 0x%llx\n", reg_val); + octep_write_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0), reg_val); + goto irq_handled; + } + + /* Check for VFORE INTR */ + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0)); + if (reg_val) { + dev_info(&pdev->dev, + "Received VFORE_RINT intr: 0x%llx\n", reg_val); + octep_write_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0), reg_val); + goto irq_handled; + } + + /* Check for MBOX INTR */ + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_MBOX_RINT(0)); + if (reg_val) { + dev_info(&pdev->dev, + "Received MBOX_RINT intr: 0x%llx\n", reg_val); + cn93_handle_pf_mbox_intr(oct); + goto irq_handled; + } + + /* Check for OEI INTR */ + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_OEI_RINT); + if (reg_val) { + dev_info(&pdev->dev, + "Received OEI_EINT intr: 0x%llx\n", reg_val); + octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT, reg_val); + queue_work(octep_wq, &oct->ctrl_mbox_task); + goto irq_handled; + } + + /* Check for DMA INTR */ + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_RINT); + if (reg_val) { + octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT, reg_val); + goto irq_handled; + } + + /* Check for DMA VF INTR */ + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0)); + if (reg_val) { + dev_info(&pdev->dev, + "Received DMA_VF_RINT intr: 0x%llx\n", reg_val); + octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0), reg_val); + goto irq_handled; + } + + /* Check for PPVF INTR */ + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0)); + if (reg_val) { + dev_info(&pdev->dev, + "Received PP_VF_RINT intr: 0x%llx\n", reg_val); + octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0), reg_val); + goto irq_handled; + } + + /* Check for MISC INTR */ + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_MISC_RINT); + if (reg_val) { + dev_info(&pdev->dev, + "Received MISC_RINT intr: 0x%llx\n", reg_val); + octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT, reg_val); + goto irq_handled; + } + + dev_info(&pdev->dev, "Reserved interrupts raised; Ignore\n"); +irq_handled: + return IRQ_HANDLED; +} + +/* Tx/Rx queue interrupt handler */ +static irqreturn_t octep_ioq_intr_handler_cn93_pf(void *data) +{ + struct octep_ioq_vector *vector = (struct octep_ioq_vector *)data; + struct octep_oq *oq = vector->oq; + + napi_schedule_irqoff(oq->napi); + return IRQ_HANDLED; +} + +/* soft reset of 93xx */ +static int octep_soft_reset_cn93_pf(struct octep_device *oct) +{ + dev_info(&oct->pdev->dev, "CN93XX: Doing soft reset\n"); + + octep_write_csr64(oct, CN93_SDP_WIN_WR_MASK_REG, 0xFF); + + /* Set core domain reset bit */ + OCTEP_PCI_WIN_WRITE(oct, CN93_RST_CORE_DOMAIN_W1S, 1); + /* Wait for 100ms as Octeon resets. */ + mdelay(100); + /* clear core domain reset bit */ + OCTEP_PCI_WIN_WRITE(oct, CN93_RST_CORE_DOMAIN_W1C, 1); + + return 0; +} + +/* Re-initialize Octeon hardware registers */ +static void octep_reinit_regs_cn93_pf(struct octep_device *oct) +{ + u32 i; + + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) + oct->hw_ops.setup_iq_regs(oct, i); + + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) + oct->hw_ops.setup_oq_regs(oct, i); + + oct->hw_ops.enable_interrupts(oct); + oct->hw_ops.enable_io_queues(oct); + + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) + writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg); +} + +/* Enable all interrupts */ +static void octep_enable_interrupts_cn93_pf(struct octep_device *oct) +{ + u64 intr_mask = 0ULL; + int srn, num_rings, i; + + srn = CFG_GET_PORTS_PF_SRN(oct->conf); + num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); + + for (i = 0; i < num_rings; i++) + intr_mask |= (0x1ULL << (srn + i)); + + octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1S, intr_mask); + octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1S, intr_mask); + octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1S, -1ULL); + octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1S, intr_mask); + octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1S, intr_mask); +} + +/* Disable all interrupts */ +static void octep_disable_interrupts_cn93_pf(struct octep_device *oct) +{ + u64 intr_mask = 0ULL; + int srn, num_rings, i; + + srn = CFG_GET_PORTS_PF_SRN(oct->conf); + num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); + + for (i = 0; i < num_rings; i++) + intr_mask |= (0x1ULL << (srn + i)); + + octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1C, intr_mask); + octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1C, intr_mask); + octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1C, -1ULL); + octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1C, intr_mask); + octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1C, intr_mask); +} + +/* Get new Octeon Read Index: index of descriptor that Octeon reads next. */ +static u32 octep_update_iq_read_index_cn93_pf(struct octep_iq *iq) +{ + u32 pkt_in_done = readl(iq->inst_cnt_reg); + u32 last_done, new_idx; + + last_done = pkt_in_done - iq->pkt_in_done; + iq->pkt_in_done = pkt_in_done; + + new_idx = (iq->octep_read_index + last_done) % iq->max_count; + + return new_idx; +} + +/* Enable a hardware Tx Queue */ +static void octep_enable_iq_cn93_pf(struct octep_device *oct, int iq_no) +{ + u64 loop = HZ; + u64 reg_val; + + iq_no += CFG_GET_PORTS_PF_SRN(oct->conf); + + octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(iq_no), 0xFFFFFFFF); + + while (octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(iq_no)) && + loop--) { + schedule_timeout_interruptible(1); + } + + reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no)); + reg_val |= (0x1ULL << 62); + octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no), reg_val); + + reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no)); + reg_val |= 0x1ULL; + octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no), reg_val); +} + +/* Enable a hardware Rx Queue */ +static void octep_enable_oq_cn93_pf(struct octep_device *oct, int oq_no) +{ + u64 reg_val = 0ULL; + + oq_no += CFG_GET_PORTS_PF_SRN(oct->conf); + + reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no)); + reg_val |= (0x1ULL << 62); + octep_write_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no), reg_val); + + octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(oq_no), 0xFFFFFFFF); + + reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no)); + reg_val |= 0x1ULL; + octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no), reg_val); +} + +/* Enable all hardware Tx/Rx Queues assined to PF */ +static void octep_enable_io_queues_cn93_pf(struct octep_device *oct) +{ + u8 q; + + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { + octep_enable_iq_cn93_pf(oct, q); + octep_enable_oq_cn93_pf(oct, q); + } +} + +/* Disable a hardware Tx Queue assined to PF */ +static void octep_disable_iq_cn93_pf(struct octep_device *oct, int iq_no) +{ + u64 reg_val = 0ULL; + + iq_no += CFG_GET_PORTS_PF_SRN(oct->conf); + + reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no)); + reg_val &= ~0x1ULL; + octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no), reg_val); +} + +/* Disable a hardware Rx Queue assined to PF */ +static void octep_disable_oq_cn93_pf(struct octep_device *oct, int oq_no) +{ + u64 reg_val = 0ULL; + + oq_no += CFG_GET_PORTS_PF_SRN(oct->conf); + reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no)); + reg_val &= ~0x1ULL; + octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no), reg_val); +} + +/* Disable all hardware Tx/Rx Queues assined to PF */ +static void octep_disable_io_queues_cn93_pf(struct octep_device *oct) +{ + int q = 0; + + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { + octep_disable_iq_cn93_pf(oct, q); + octep_disable_oq_cn93_pf(oct, q); + } +} + +/* Dump hardware registers (including Tx/Rx queues) for debugging. */ +static void octep_dump_registers_cn93_pf(struct octep_device *oct) +{ + u8 srn, num_rings, q; + + srn = CFG_GET_PORTS_PF_SRN(oct->conf); + num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); + + for (q = srn; q < srn + num_rings; q++) + cn93_dump_regs(oct, q); +} + +/** + * octep_device_setup_cn93_pf() - Setup Octeon device. + * + * @oct: Octeon device private data structure. + * + * - initialize hardware operations. + * - get target side pcie port number for the device. + * - setup window access to hardware registers. + * - set initial configuration and max limits. + * - setup hardware mapping of rings to the PF device. + */ +void octep_device_setup_cn93_pf(struct octep_device *oct) +{ + oct->hw_ops.setup_iq_regs = octep_setup_iq_regs_cn93_pf; + oct->hw_ops.setup_oq_regs = octep_setup_oq_regs_cn93_pf; + oct->hw_ops.setup_mbox_regs = octep_setup_mbox_regs_cn93_pf; + + oct->hw_ops.non_ioq_intr_handler = octep_non_ioq_intr_handler_cn93_pf; + oct->hw_ops.ioq_intr_handler = octep_ioq_intr_handler_cn93_pf; + oct->hw_ops.soft_reset = octep_soft_reset_cn93_pf; + oct->hw_ops.reinit_regs = octep_reinit_regs_cn93_pf; + + oct->hw_ops.enable_interrupts = octep_enable_interrupts_cn93_pf; + oct->hw_ops.disable_interrupts = octep_disable_interrupts_cn93_pf; + + oct->hw_ops.update_iq_read_idx = octep_update_iq_read_index_cn93_pf; + + oct->hw_ops.enable_iq = octep_enable_iq_cn93_pf; + oct->hw_ops.enable_oq = octep_enable_oq_cn93_pf; + oct->hw_ops.enable_io_queues = octep_enable_io_queues_cn93_pf; + + oct->hw_ops.disable_iq = octep_disable_iq_cn93_pf; + oct->hw_ops.disable_oq = octep_disable_oq_cn93_pf; + oct->hw_ops.disable_io_queues = octep_disable_io_queues_cn93_pf; + oct->hw_ops.reset_io_queues = octep_reset_io_queues_cn93_pf; + + oct->hw_ops.dump_registers = octep_dump_registers_cn93_pf; + + octep_setup_pci_window_regs_cn93_pf(oct); + + oct->pcie_port = octep_read_csr64(oct, CN93_SDP_MAC_NUMBER) & 0xff; + dev_info(&oct->pdev->dev, + "Octeon device using PCIE Port %d\n", oct->pcie_port); + + octep_init_config_cn93_pf(oct); + octep_configure_ring_mapping_cn93_pf(oct); +} diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_config.h b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h new file mode 100644 index 000000000000..f208f3f9a447 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h @@ -0,0 +1,204 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#ifndef _OCTEP_CONFIG_H_ +#define _OCTEP_CONFIG_H_ + +/* Tx instruction types by length */ +#define OCTEP_32BYTE_INSTR 32 +#define OCTEP_64BYTE_INSTR 64 + +/* Tx Queue: maximum descriptors per ring */ +#define OCTEP_IQ_MAX_DESCRIPTORS 1024 +/* Minimum input (Tx) requests to be enqueued to ring doorbell */ +#define OCTEP_DB_MIN 1 +/* Packet threshold for Tx queue interrupt */ +#define OCTEP_IQ_INTR_THRESHOLD 0x0 + +/* Rx Queue: maximum descriptors per ring */ +#define OCTEP_OQ_MAX_DESCRIPTORS 1024 + +/* Rx buffer size: Use page size buffers. + * Build skb from allocated page buffer once the packet is received. + * When a gathered packet is received, make head page as skb head and + * page buffers in consecutive Rx descriptors as fragments. + */ +#define OCTEP_OQ_BUF_SIZE (SKB_WITH_OVERHEAD(PAGE_SIZE)) +#define OCTEP_OQ_PKTS_PER_INTR 128 +#define OCTEP_OQ_REFILL_THRESHOLD (OCTEP_OQ_MAX_DESCRIPTORS / 4) + +#define OCTEP_OQ_INTR_PKT_THRESHOLD 1 +#define OCTEP_OQ_INTR_TIME_THRESHOLD 10 + +#define OCTEP_MSIX_NAME_SIZE (IFNAMSIZ + 32) + +/* Tx Queue wake threshold + * wakeup a stopped Tx queue if minimum 2 descriptors are available. + * Even a skb with fragments consume only one Tx queue descriptor entry. + */ +#define OCTEP_WAKE_QUEUE_THRESHOLD 2 + +/* Minimum MTU supported by Octeon network interface */ +#define OCTEP_MIN_MTU ETH_MIN_MTU +/* Maximum MTU supported by Octeon interface*/ +#define OCTEP_MAX_MTU (10000 - (ETH_HLEN + ETH_FCS_LEN)) +/* Default MTU */ +#define OCTEP_DEFAULT_MTU 1500 + +/* Macros to get octeon config params */ +#define CFG_GET_IQ_CFG(cfg) ((cfg)->iq) +#define CFG_GET_IQ_NUM_DESC(cfg) ((cfg)->iq.num_descs) +#define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type) +#define CFG_GET_IQ_PKIND(cfg) ((cfg)->iq.pkind) +#define CFG_GET_IQ_INSTR_SIZE(cfg) (64) +#define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min) +#define CFG_GET_IQ_INTR_THRESHOLD(cfg) ((cfg)->iq.intr_threshold) + +#define CFG_GET_OQ_NUM_DESC(cfg) ((cfg)->oq.num_descs) +#define CFG_GET_OQ_BUF_SIZE(cfg) ((cfg)->oq.buf_size) +#define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold) +#define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt) +#define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time) + +#define CFG_GET_PORTS_MAX_IO_RINGS(cfg) ((cfg)->pf_ring_cfg.max_io_rings) +#define CFG_GET_PORTS_ACTIVE_IO_RINGS(cfg) ((cfg)->pf_ring_cfg.active_io_rings) +#define CFG_GET_PORTS_PF_SRN(cfg) ((cfg)->pf_ring_cfg.srn) + +#define CFG_GET_DPI_PKIND(cfg) ((cfg)->core_cfg.dpi_pkind) +#define CFG_GET_CORE_TICS_PER_US(cfg) ((cfg)->core_cfg.core_tics_per_us) +#define CFG_GET_COPROC_TICS_PER_US(cfg) ((cfg)->core_cfg.coproc_tics_per_us) + +#define CFG_GET_MAX_VFS(cfg) ((cfg)->sriov_cfg.max_vfs) +#define CFG_GET_ACTIVE_VFS(cfg) ((cfg)->sriov_cfg.active_vfs) +#define CFG_GET_MAX_RPVF(cfg) ((cfg)->sriov_cfg.max_rings_per_vf) +#define CFG_GET_ACTIVE_RPVF(cfg) ((cfg)->sriov_cfg.active_rings_per_vf) +#define CFG_GET_VF_SRN(cfg) ((cfg)->sriov_cfg.vf_srn) + +#define CFG_GET_IOQ_MSIX(cfg) ((cfg)->msix_cfg.ioq_msix) +#define CFG_GET_NON_IOQ_MSIX(cfg) ((cfg)->msix_cfg.non_ioq_msix) +#define CFG_GET_NON_IOQ_MSIX_NAMES(cfg) ((cfg)->msix_cfg.non_ioq_msix_names) + +#define CFG_GET_CTRL_MBOX_MEM_ADDR(cfg) ((cfg)->ctrl_mbox_cfg.barmem_addr) + +/* Hardware Tx Queue configuration. */ +struct octep_iq_config { + /* Size of the Input queue (number of commands) */ + u16 num_descs; + + /* Command size - 32 or 64 bytes */ + u16 instr_type; + + /* pkind for packets sent to Octeon */ + u16 pkind; + + /* Minimum number of commands pending to be posted to Octeon before driver + * hits the Input queue doorbell. + */ + u16 db_min; + + /* Trigger the IQ interrupt when processed cmd count reaches + * this level. + */ + u32 intr_threshold; +}; + +/* Hardware Rx Queue configuration. */ +struct octep_oq_config { + /* Size of Output queue (number of descriptors) */ + u16 num_descs; + + /* Size of buffer in this Output queue. */ + u16 buf_size; + + /* The number of buffers that were consumed during packet processing + * by the driver on this Output queue before the driver attempts to + * replenish the descriptor ring with new buffers. + */ + u16 refill_threshold; + + /* Interrupt Coalescing (Packet Count). Octeon will interrupt the host + * only if it sent as many packets as specified by this field. + * The driver usually does not use packet count interrupt coalescing. + */ + u32 oq_intr_pkt; + + /* Interrupt Coalescing (Time Interval). Octeon will interrupt the host + * if at least one packet was sent in the time interval specified by + * this field. The driver uses time interval interrupt coalescing by + * default. The time is specified in microseconds. + */ + u32 oq_intr_time; +}; + +/* Tx/Rx configuration */ +struct octep_pf_ring_config { + /* Max number of IOQs */ + u16 max_io_rings; + + /* Number of active IOQs */ + u16 active_io_rings; + + /* Starting IOQ number: this changes based on which PEM is used */ + u16 srn; +}; + +/* Octeon Hardware SRIOV config */ +struct octep_sriov_config { + /* Max number of VF devices supported */ + u16 max_vfs; + + /* Number of VF devices enabled */ + u16 active_vfs; + + /* Max number of rings assigned to VF */ + u8 max_rings_per_vf; + + /* Number of rings enabled per VF */ + u8 active_rings_per_vf; + + /* starting ring number of VF's: ring-0 of VF-0 of the PF */ + u16 vf_srn; +}; + +/* Octeon MSI-x config. */ +struct octep_msix_config { + /* Number of IOQ interrupts */ + u16 ioq_msix; + + /* Number of Non IOQ interrupts */ + u16 non_ioq_msix; + + /* Names of Non IOQ interrupts */ + char **non_ioq_msix_names; +}; + +struct octep_ctrl_mbox_config { + /* Barmem address for control mbox */ + void __iomem *barmem_addr; +}; + +/* Data Structure to hold configuration limits and active config */ +struct octep_config { + /* Input Queue attributes. */ + struct octep_iq_config iq; + + /* Output Queue attributes. */ + struct octep_oq_config oq; + + /* NIC Port Configuration */ + struct octep_pf_ring_config pf_ring_cfg; + + /* SRIOV configuration of the PF */ + struct octep_sriov_config sriov_cfg; + + /* MSI-X interrupt config */ + struct octep_msix_config msix_cfg; + + /* ctrl mbox config */ + struct octep_ctrl_mbox_config ctrl_mbox_cfg; +}; +#endif /* _OCTEP_CONFIG_H_ */ diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c new file mode 100644 index 000000000000..39322e4dd100 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/mutex.h> +#include <linux/jiffies.h> +#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/io.h> +#include <linux/pci.h> +#include <linux/etherdevice.h> + +#include "octep_ctrl_mbox.h" +#include "octep_config.h" +#include "octep_main.h" + +/* Timeout in msecs for message response */ +#define OCTEP_CTRL_MBOX_MSG_TIMEOUT_MS 100 +/* Time in msecs to wait for message response */ +#define OCTEP_CTRL_MBOX_MSG_WAIT_MS 10 + +#define OCTEP_CTRL_MBOX_INFO_MAGIC_NUM_OFFSET(m) (m) +#define OCTEP_CTRL_MBOX_INFO_BARMEM_SZ_OFFSET(m) ((m) + 8) +#define OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(m) ((m) + 24) +#define OCTEP_CTRL_MBOX_INFO_FW_STATUS_OFFSET(m) ((m) + 144) + +#define OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m) ((m) + OCTEP_CTRL_MBOX_INFO_SZ) +#define OCTEP_CTRL_MBOX_H2FQ_PROD_OFFSET(m) (OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m)) +#define OCTEP_CTRL_MBOX_H2FQ_CONS_OFFSET(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m)) + 4) +#define OCTEP_CTRL_MBOX_H2FQ_ELEM_SZ_OFFSET(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m)) + 8) +#define OCTEP_CTRL_MBOX_H2FQ_ELEM_CNT_OFFSET(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m)) + 12) + +#define OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m) ((m) + \ + OCTEP_CTRL_MBOX_INFO_SZ + \ + OCTEP_CTRL_MBOX_H2FQ_INFO_SZ) +#define OCTEP_CTRL_MBOX_F2HQ_PROD_OFFSET(m) (OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m)) +#define OCTEP_CTRL_MBOX_F2HQ_CONS_OFFSET(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m)) + 4) +#define OCTEP_CTRL_MBOX_F2HQ_ELEM_SZ_OFFSET(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m)) + 8) +#define OCTEP_CTRL_MBOX_F2HQ_ELEM_CNT_OFFSET(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m)) + 12) + +#define OCTEP_CTRL_MBOX_Q_OFFSET(m, i) ((m) + \ + (sizeof(struct octep_ctrl_mbox_msg) * (i))) + +static u32 octep_ctrl_mbox_circq_inc(u32 index, u32 mask) +{ + return (index + 1) & mask; +} + +static u32 octep_ctrl_mbox_circq_space(u32 pi, u32 ci, u32 mask) +{ + return mask - ((pi - ci) & mask); +} + +static u32 octep_ctrl_mbox_circq_depth(u32 pi, u32 ci, u32 mask) +{ + return ((pi - ci) & mask); +} + +int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox) +{ + u64 magic_num, status; + + if (!mbox) + return -EINVAL; + + if (!mbox->barmem) { + pr_info("octep_ctrl_mbox : Invalid barmem %p\n", mbox->barmem); + return -EINVAL; + } + + magic_num = readq(OCTEP_CTRL_MBOX_INFO_MAGIC_NUM_OFFSET(mbox->barmem)); + if (magic_num != OCTEP_CTRL_MBOX_MAGIC_NUMBER) { + pr_info("octep_ctrl_mbox : Invalid magic number %llx\n", magic_num); + return -EINVAL; + } + + status = readq(OCTEP_CTRL_MBOX_INFO_FW_STATUS_OFFSET(mbox->barmem)); + if (status != OCTEP_CTRL_MBOX_STATUS_READY) { + pr_info("octep_ctrl_mbox : Firmware is not ready.\n"); + return -EINVAL; + } + + mbox->barmem_sz = readl(OCTEP_CTRL_MBOX_INFO_BARMEM_SZ_OFFSET(mbox->barmem)); + + writeq(OCTEP_CTRL_MBOX_STATUS_INIT, OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem)); + + mbox->h2fq.elem_cnt = readl(OCTEP_CTRL_MBOX_H2FQ_ELEM_CNT_OFFSET(mbox->barmem)); + mbox->h2fq.elem_sz = readl(OCTEP_CTRL_MBOX_H2FQ_ELEM_SZ_OFFSET(mbox->barmem)); + mbox->h2fq.mask = (mbox->h2fq.elem_cnt - 1); + mutex_init(&mbox->h2fq_lock); + + mbox->f2hq.elem_cnt = readl(OCTEP_CTRL_MBOX_F2HQ_ELEM_CNT_OFFSET(mbox->barmem)); + mbox->f2hq.elem_sz = readl(OCTEP_CTRL_MBOX_F2HQ_ELEM_SZ_OFFSET(mbox->barmem)); + mbox->f2hq.mask = (mbox->f2hq.elem_cnt - 1); + mutex_init(&mbox->f2hq_lock); + + mbox->h2fq.hw_prod = OCTEP_CTRL_MBOX_H2FQ_PROD_OFFSET(mbox->barmem); + mbox->h2fq.hw_cons = OCTEP_CTRL_MBOX_H2FQ_CONS_OFFSET(mbox->barmem); + mbox->h2fq.hw_q = mbox->barmem + + OCTEP_CTRL_MBOX_INFO_SZ + + OCTEP_CTRL_MBOX_H2FQ_INFO_SZ + + OCTEP_CTRL_MBOX_F2HQ_INFO_SZ; + + mbox->f2hq.hw_prod = OCTEP_CTRL_MBOX_F2HQ_PROD_OFFSET(mbox->barmem); + mbox->f2hq.hw_cons = OCTEP_CTRL_MBOX_F2HQ_CONS_OFFSET(mbox->barmem); + mbox->f2hq.hw_q = mbox->h2fq.hw_q + + ((mbox->h2fq.elem_sz + sizeof(union octep_ctrl_mbox_msg_hdr)) * + mbox->h2fq.elem_cnt); + + /* ensure ready state is seen after everything is initialized */ + wmb(); + writeq(OCTEP_CTRL_MBOX_STATUS_READY, OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem)); + + pr_info("Octep ctrl mbox : Init successful.\n"); + + return 0; +} + +int octep_ctrl_mbox_send(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg) +{ + unsigned long timeout = msecs_to_jiffies(OCTEP_CTRL_MBOX_MSG_TIMEOUT_MS); + unsigned long period = msecs_to_jiffies(OCTEP_CTRL_MBOX_MSG_WAIT_MS); + struct octep_ctrl_mbox_q *q; + unsigned long expire; + u64 *mbuf, *word0; + u8 __iomem *qidx; + u16 pi, ci; + int i; + + if (!mbox || !msg) + return -EINVAL; + + q = &mbox->h2fq; + pi = readl(q->hw_prod); + ci = readl(q->hw_cons); + + if (!octep_ctrl_mbox_circq_space(pi, ci, q->mask)) + return -ENOMEM; + + qidx = OCTEP_CTRL_MBOX_Q_OFFSET(q->hw_q, pi); + mbuf = (u64 *)msg->msg; + word0 = &msg->hdr.word0; + + mutex_lock(&mbox->h2fq_lock); + for (i = 1; i <= msg->hdr.sizew; i++) + writeq(*mbuf++, (qidx + (i * 8))); + + writeq(*word0, qidx); + + pi = octep_ctrl_mbox_circq_inc(pi, q->mask); + writel(pi, q->hw_prod); + mutex_unlock(&mbox->h2fq_lock); + + /* don't check for notification response */ + if (msg->hdr.flags & OCTEP_CTRL_MBOX_MSG_HDR_FLAG_NOTIFY) + return 0; + + expire = jiffies + timeout; + while (true) { + *word0 = readq(qidx); + if (msg->hdr.flags == OCTEP_CTRL_MBOX_MSG_HDR_FLAG_RESP) + break; + schedule_timeout_interruptible(period); + if (signal_pending(current) || time_after(jiffies, expire)) { + pr_info("octep_ctrl_mbox: Timed out\n"); + return -EBUSY; + } + } + mbuf = (u64 *)msg->msg; + for (i = 1; i <= msg->hdr.sizew; i++) + *mbuf++ = readq(qidx + (i * 8)); + + return 0; +} + +int octep_ctrl_mbox_recv(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg) +{ + struct octep_ctrl_mbox_q *q; + u32 count, pi, ci; + u8 __iomem *qidx; + u64 *mbuf; + int i; + + if (!mbox || !msg) + return -EINVAL; + + q = &mbox->f2hq; + pi = readl(q->hw_prod); + ci = readl(q->hw_cons); + count = octep_ctrl_mbox_circq_depth(pi, ci, q->mask); + if (!count) + return -EAGAIN; + + qidx = OCTEP_CTRL_MBOX_Q_OFFSET(q->hw_q, ci); + mbuf = (u64 *)msg->msg; + + mutex_lock(&mbox->f2hq_lock); + + msg->hdr.word0 = readq(qidx); + for (i = 1; i <= msg->hdr.sizew; i++) + *mbuf++ = readq(qidx + (i * 8)); + + ci = octep_ctrl_mbox_circq_inc(ci, q->mask); + writel(ci, q->hw_cons); + + mutex_unlock(&mbox->f2hq_lock); + + if (msg->hdr.flags != OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ || !mbox->process_req) + return 0; + + mbox->process_req(mbox->user_ctx, msg); + mbuf = (u64 *)msg->msg; + for (i = 1; i <= msg->hdr.sizew; i++) + writeq(*mbuf++, (qidx + (i * 8))); + + writeq(msg->hdr.word0, qidx); + + return 0; +} + +int octep_ctrl_mbox_uninit(struct octep_ctrl_mbox *mbox) +{ + if (!mbox) + return -EINVAL; + + writeq(OCTEP_CTRL_MBOX_STATUS_UNINIT, + OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem)); + /* ensure uninit state is written before uninitialization */ + wmb(); + + mutex_destroy(&mbox->h2fq_lock); + mutex_destroy(&mbox->f2hq_lock); + + writeq(OCTEP_CTRL_MBOX_STATUS_INVALID, + OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem)); + + pr_info("Octep ctrl mbox : Uninit successful.\n"); + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h new file mode 100644 index 000000000000..2dc5753cfec6 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h @@ -0,0 +1,170 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + #ifndef __OCTEP_CTRL_MBOX_H__ +#define __OCTEP_CTRL_MBOX_H__ + +/* barmem structure + * |===========================================| + * |Info (16 + 120 + 120 = 256 bytes) | + * |-------------------------------------------| + * |magic number (8 bytes) | + * |bar memory size (4 bytes) | + * |reserved (4 bytes) | + * |-------------------------------------------| + * |host version (8 bytes) | + * |host status (8 bytes) | + * |host reserved (104 bytes) | + * |-------------------------------------------| + * |fw version (8 bytes) | + * |fw status (8 bytes) | + * |fw reserved (104 bytes) | + * |===========================================| + * |Host to Fw Queue info (16 bytes) | + * |-------------------------------------------| + * |producer index (4 bytes) | + * |consumer index (4 bytes) | + * |element size (4 bytes) | + * |element count (4 bytes) | + * |===========================================| + * |Fw to Host Queue info (16 bytes) | + * |-------------------------------------------| + * |producer index (4 bytes) | + * |consumer index (4 bytes) | + * |element size (4 bytes) | + * |element count (4 bytes) | + * |===========================================| + * |Host to Fw Queue | + * |-------------------------------------------| + * |((elem_sz + hdr(8 bytes)) * elem_cnt) bytes| + * |===========================================| + * |===========================================| + * |Fw to Host Queue | + * |-------------------------------------------| + * |((elem_sz + hdr(8 bytes)) * elem_cnt) bytes| + * |===========================================| + */ + +#define OCTEP_CTRL_MBOX_MAGIC_NUMBER 0xdeaddeadbeefbeefull + +/* Size of mbox info in bytes */ +#define OCTEP_CTRL_MBOX_INFO_SZ 256 +/* Size of mbox host to target queue info in bytes */ +#define OCTEP_CTRL_MBOX_H2FQ_INFO_SZ 16 +/* Size of mbox target to host queue info in bytes */ +#define OCTEP_CTRL_MBOX_F2HQ_INFO_SZ 16 +/* Size of mbox queue in bytes */ +#define OCTEP_CTRL_MBOX_Q_SZ(sz, cnt) (((sz) + 8) * (cnt)) +/* Size of mbox in bytes */ +#define OCTEP_CTRL_MBOX_SZ(hsz, hcnt, fsz, fcnt) (OCTEP_CTRL_MBOX_INFO_SZ + \ + OCTEP_CTRL_MBOX_H2FQ_INFO_SZ + \ + OCTEP_CTRL_MBOX_F2HQ_INFO_SZ + \ + OCTEP_CTRL_MBOX_Q_SZ(hsz, hcnt) + \ + OCTEP_CTRL_MBOX_Q_SZ(fsz, fcnt)) + +/* Valid request message */ +#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ BIT(0) +/* Valid response message */ +#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_RESP BIT(1) +/* Valid notification, no response required */ +#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_NOTIFY BIT(2) + +enum octep_ctrl_mbox_status { + OCTEP_CTRL_MBOX_STATUS_INVALID = 0, + OCTEP_CTRL_MBOX_STATUS_INIT, + OCTEP_CTRL_MBOX_STATUS_READY, + OCTEP_CTRL_MBOX_STATUS_UNINIT +}; + +/* mbox message */ +union octep_ctrl_mbox_msg_hdr { + u64 word0; + struct { + /* OCTEP_CTRL_MBOX_MSG_HDR_FLAG_* */ + u32 flags; + /* size of message in words excluding header */ + u32 sizew; + }; +}; + +/* mbox message */ +struct octep_ctrl_mbox_msg { + /* mbox transaction header */ + union octep_ctrl_mbox_msg_hdr hdr; + /* pointer to message buffer */ + void *msg; +}; + +/* Mbox queue */ +struct octep_ctrl_mbox_q { + /* q element size, should be aligned to unsigned long */ + u16 elem_sz; + /* q element count, should be power of 2 */ + u16 elem_cnt; + /* q mask */ + u16 mask; + /* producer address in bar mem */ + u8 __iomem *hw_prod; + /* consumer address in bar mem */ + u8 __iomem *hw_cons; + /* q base address in bar mem */ + u8 __iomem *hw_q; +}; + +struct octep_ctrl_mbox { + /* host driver version */ + u64 version; + /* size of bar memory */ + u32 barmem_sz; + /* pointer to BAR memory */ + u8 __iomem *barmem; + /* user context for callback, can be null */ + void *user_ctx; + /* callback handler for processing request, called from octep_ctrl_mbox_recv */ + int (*process_req)(void *user_ctx, struct octep_ctrl_mbox_msg *msg); + /* host-to-fw queue */ + struct octep_ctrl_mbox_q h2fq; + /* fw-to-host queue */ + struct octep_ctrl_mbox_q f2hq; + /* lock for h2fq */ + struct mutex h2fq_lock; + /* lock for f2hq */ + struct mutex f2hq_lock; +}; + +/* Initialize control mbox. + * + * @param mbox: non-null pointer to struct octep_ctrl_mbox. + * + * return value: 0 on success, -errno on failure. + */ +int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox); + +/* Send mbox message. + * + * @param mbox: non-null pointer to struct octep_ctrl_mbox. + * + * return value: 0 on success, -errno on failure. + */ +int octep_ctrl_mbox_send(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg); + +/* Retrieve mbox message. + * + * @param mbox: non-null pointer to struct octep_ctrl_mbox. + * + * return value: 0 on success, -errno on failure. + */ +int octep_ctrl_mbox_recv(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg); + +/* Uninitialize control mbox. + * + * @param ep: non-null pointer to struct octep_ctrl_mbox. + * + * return value: 0 on success, -errno on failure. + */ +int octep_ctrl_mbox_uninit(struct octep_ctrl_mbox *mbox); + +#endif /* __OCTEP_CTRL_MBOX_H__ */ diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c new file mode 100644 index 000000000000..7c00c896ab98 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ +#include <linux/string.h> +#include <linux/types.h> +#include <linux/etherdevice.h> +#include <linux/pci.h> + +#include "octep_config.h" +#include "octep_main.h" +#include "octep_ctrl_net.h" + +int octep_get_link_status(struct octep_device *oct) +{ + struct octep_ctrl_net_h2f_req req = {}; + struct octep_ctrl_net_h2f_resp *resp; + struct octep_ctrl_mbox_msg msg = {}; + int err; + + req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS; + req.link.cmd = OCTEP_CTRL_NET_CMD_GET; + + msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ; + msg.hdr.sizew = OCTEP_CTRL_NET_H2F_STATE_REQ_SZW; + msg.msg = &req; + err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); + if (err) + return err; + + resp = (struct octep_ctrl_net_h2f_resp *)&req; + return resp->link.state; +} + +void octep_set_link_status(struct octep_device *oct, bool up) +{ + struct octep_ctrl_net_h2f_req req = {}; + struct octep_ctrl_mbox_msg msg = {}; + + req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS; + req.link.cmd = OCTEP_CTRL_NET_CMD_SET; + req.link.state = (up) ? OCTEP_CTRL_NET_STATE_UP : OCTEP_CTRL_NET_STATE_DOWN; + + msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ; + msg.hdr.sizew = OCTEP_CTRL_NET_H2F_STATE_REQ_SZW; + msg.msg = &req; + octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); +} + +void octep_set_rx_state(struct octep_device *oct, bool up) +{ + struct octep_ctrl_net_h2f_req req = {}; + struct octep_ctrl_mbox_msg msg = {}; + + req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_RX_STATE; + req.link.cmd = OCTEP_CTRL_NET_CMD_SET; + req.link.state = (up) ? OCTEP_CTRL_NET_STATE_UP : OCTEP_CTRL_NET_STATE_DOWN; + + msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ; + msg.hdr.sizew = OCTEP_CTRL_NET_H2F_STATE_REQ_SZW; + msg.msg = &req; + octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); +} + +int octep_get_mac_addr(struct octep_device *oct, u8 *addr) +{ + struct octep_ctrl_net_h2f_req req = {}; + struct octep_ctrl_net_h2f_resp *resp; + struct octep_ctrl_mbox_msg msg = {}; + int err; + + req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_MAC; + req.link.cmd = OCTEP_CTRL_NET_CMD_GET; + + msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ; + msg.hdr.sizew = OCTEP_CTRL_NET_H2F_MAC_REQ_SZW; + msg.msg = &req; + err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); + if (err) + return err; + + resp = (struct octep_ctrl_net_h2f_resp *)&req; + memcpy(addr, resp->mac.addr, ETH_ALEN); + + return err; +} + +int octep_set_mac_addr(struct octep_device *oct, u8 *addr) +{ + struct octep_ctrl_net_h2f_req req = {}; + struct octep_ctrl_mbox_msg msg = {}; + + req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_MAC; + req.mac.cmd = OCTEP_CTRL_NET_CMD_SET; + memcpy(&req.mac.addr, addr, ETH_ALEN); + + msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ; + msg.hdr.sizew = OCTEP_CTRL_NET_H2F_MAC_REQ_SZW; + msg.msg = &req; + + return octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); +} + +int octep_set_mtu(struct octep_device *oct, int mtu) +{ + struct octep_ctrl_net_h2f_req req = {}; + struct octep_ctrl_mbox_msg msg = {}; + + req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_MTU; + req.mtu.cmd = OCTEP_CTRL_NET_CMD_SET; + req.mtu.val = mtu; + + msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ; + msg.hdr.sizew = OCTEP_CTRL_NET_H2F_MTU_REQ_SZW; + msg.msg = &req; + + return octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); +} + +int octep_get_if_stats(struct octep_device *oct) +{ + void __iomem *iface_rx_stats; + void __iomem *iface_tx_stats; + struct octep_ctrl_net_h2f_req req = {}; + struct octep_ctrl_mbox_msg msg = {}; + int err; + + req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_GET_IF_STATS; + req.mac.cmd = OCTEP_CTRL_NET_CMD_GET; + req.get_stats.offset = oct->ctrl_mbox_ifstats_offset; + + msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ; + msg.hdr.sizew = OCTEP_CTRL_NET_H2F_GET_STATS_REQ_SZW; + msg.msg = &req; + err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); + if (err) + return err; + + iface_rx_stats = oct->ctrl_mbox.barmem + oct->ctrl_mbox_ifstats_offset; + iface_tx_stats = oct->ctrl_mbox.barmem + oct->ctrl_mbox_ifstats_offset + + sizeof(struct octep_iface_rx_stats); + memcpy_fromio(&oct->iface_rx_stats, iface_rx_stats, sizeof(struct octep_iface_rx_stats)); + memcpy_fromio(&oct->iface_tx_stats, iface_tx_stats, sizeof(struct octep_iface_tx_stats)); + + return err; +} + +int octep_get_link_info(struct octep_device *oct) +{ + struct octep_ctrl_net_h2f_req req = {}; + struct octep_ctrl_net_h2f_resp *resp; + struct octep_ctrl_mbox_msg msg = {}; + int err; + + req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_INFO; + req.mac.cmd = OCTEP_CTRL_NET_CMD_GET; + + msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ; + msg.hdr.sizew = OCTEP_CTRL_NET_H2F_LINK_INFO_REQ_SZW; + msg.msg = &req; + err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); + if (err) + return err; + + resp = (struct octep_ctrl_net_h2f_resp *)&req; + oct->link_info.supported_modes = resp->link_info.supported_modes; + oct->link_info.advertised_modes = resp->link_info.advertised_modes; + oct->link_info.autoneg = resp->link_info.autoneg; + oct->link_info.pause = resp->link_info.pause; + oct->link_info.speed = resp->link_info.speed; + + return err; +} + +int octep_set_link_info(struct octep_device *oct, struct octep_iface_link_info *link_info) +{ + struct octep_ctrl_net_h2f_req req = {}; + struct octep_ctrl_mbox_msg msg = {}; + + req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_INFO; + req.link_info.cmd = OCTEP_CTRL_NET_CMD_SET; + req.link_info.info.advertised_modes = link_info->advertised_modes; + req.link_info.info.autoneg = link_info->autoneg; + req.link_info.info.pause = link_info->pause; + req.link_info.info.speed = link_info->speed; + + msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ; + msg.hdr.sizew = OCTEP_CTRL_NET_H2F_LINK_INFO_REQ_SZW; + msg.msg = &req; + + return octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); +} diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h new file mode 100644 index 000000000000..f23b58381322 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h @@ -0,0 +1,299 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ +#ifndef __OCTEP_CTRL_NET_H__ +#define __OCTEP_CTRL_NET_H__ + +/* Supported commands */ +enum octep_ctrl_net_cmd { + OCTEP_CTRL_NET_CMD_GET = 0, + OCTEP_CTRL_NET_CMD_SET, +}; + +/* Supported states */ +enum octep_ctrl_net_state { + OCTEP_CTRL_NET_STATE_DOWN = 0, + OCTEP_CTRL_NET_STATE_UP, +}; + +/* Supported replies */ +enum octep_ctrl_net_reply { + OCTEP_CTRL_NET_REPLY_OK = 0, + OCTEP_CTRL_NET_REPLY_GENERIC_FAIL, + OCTEP_CTRL_NET_REPLY_INVALID_PARAM, +}; + +/* Supported host to fw commands */ +enum octep_ctrl_net_h2f_cmd { + OCTEP_CTRL_NET_H2F_CMD_INVALID = 0, + OCTEP_CTRL_NET_H2F_CMD_MTU, + OCTEP_CTRL_NET_H2F_CMD_MAC, + OCTEP_CTRL_NET_H2F_CMD_GET_IF_STATS, + OCTEP_CTRL_NET_H2F_CMD_GET_XSTATS, + OCTEP_CTRL_NET_H2F_CMD_GET_Q_STATS, + OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS, + OCTEP_CTRL_NET_H2F_CMD_RX_STATE, + OCTEP_CTRL_NET_H2F_CMD_LINK_INFO, +}; + +/* Supported fw to host commands */ +enum octep_ctrl_net_f2h_cmd { + OCTEP_CTRL_NET_F2H_CMD_INVALID = 0, + OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS, +}; + +struct octep_ctrl_net_req_hdr { + /* sender id */ + u16 sender; + /* receiver id */ + u16 receiver; + /* octep_ctrl_net_h2t_cmd */ + u16 cmd; + /* reserved */ + u16 rsvd0; +}; + +/* get/set mtu request */ +struct octep_ctrl_net_h2f_req_cmd_mtu { + /* enum octep_ctrl_net_cmd */ + u16 cmd; + /* 0-65535 */ + u16 val; +}; + +/* get/set mac request */ +struct octep_ctrl_net_h2f_req_cmd_mac { + /* enum octep_ctrl_net_cmd */ + u16 cmd; + /* xx:xx:xx:xx:xx:xx */ + u8 addr[ETH_ALEN]; +}; + +/* get if_stats, xstats, q_stats request */ +struct octep_ctrl_net_h2f_req_cmd_get_stats { + /* offset into barmem where fw should copy over stats */ + u32 offset; +}; + +/* get/set link state, rx state */ +struct octep_ctrl_net_h2f_req_cmd_state { + /* enum octep_ctrl_net_cmd */ + u16 cmd; + /* enum octep_ctrl_net_state */ + u16 state; +}; + +/* link info */ +struct octep_ctrl_net_link_info { + /* Bitmap of Supported link speeds/modes */ + u64 supported_modes; + /* Bitmap of Advertised link speeds/modes */ + u64 advertised_modes; + /* Autonegotation state; bit 0=disabled; bit 1=enabled */ + u8 autoneg; + /* Pause frames setting. bit 0=disabled; bit 1=enabled */ + u8 pause; + /* Negotiated link speed in Mbps */ + u32 speed; +}; + +/* get/set link info */ +struct octep_ctrl_net_h2f_req_cmd_link_info { + /* enum octep_ctrl_net_cmd */ + u16 cmd; + /* struct octep_ctrl_net_link_info */ + struct octep_ctrl_net_link_info info; +}; + +/* Host to fw request data */ +struct octep_ctrl_net_h2f_req { + struct octep_ctrl_net_req_hdr hdr; + union { + struct octep_ctrl_net_h2f_req_cmd_mtu mtu; + struct octep_ctrl_net_h2f_req_cmd_mac mac; + struct octep_ctrl_net_h2f_req_cmd_get_stats get_stats; + struct octep_ctrl_net_h2f_req_cmd_state link; + struct octep_ctrl_net_h2f_req_cmd_state rx; + struct octep_ctrl_net_h2f_req_cmd_link_info link_info; + }; +} __packed; + +struct octep_ctrl_net_resp_hdr { + /* sender id */ + u16 sender; + /* receiver id */ + u16 receiver; + /* octep_ctrl_net_h2t_cmd */ + u16 cmd; + /* octep_ctrl_net_reply */ + u16 reply; +}; + +/* get mtu response */ +struct octep_ctrl_net_h2f_resp_cmd_mtu { + /* 0-65535 */ + u16 val; +}; + +/* get mac response */ +struct octep_ctrl_net_h2f_resp_cmd_mac { + /* xx:xx:xx:xx:xx:xx */ + u8 addr[ETH_ALEN]; +}; + +/* get link state, rx state response */ +struct octep_ctrl_net_h2f_resp_cmd_state { + /* enum octep_ctrl_net_state */ + u16 state; +}; + +/* Host to fw response data */ +struct octep_ctrl_net_h2f_resp { + struct octep_ctrl_net_resp_hdr hdr; + union { + struct octep_ctrl_net_h2f_resp_cmd_mtu mtu; + struct octep_ctrl_net_h2f_resp_cmd_mac mac; + struct octep_ctrl_net_h2f_resp_cmd_state link; + struct octep_ctrl_net_h2f_resp_cmd_state rx; + struct octep_ctrl_net_link_info link_info; + }; +} __packed; + +/* link state notofication */ +struct octep_ctrl_net_f2h_req_cmd_state { + /* enum octep_ctrl_net_state */ + u16 state; +}; + +/* Fw to host request data */ +struct octep_ctrl_net_f2h_req { + struct octep_ctrl_net_req_hdr hdr; + union { + struct octep_ctrl_net_f2h_req_cmd_state link; + }; +}; + +/* Fw to host response data */ +struct octep_ctrl_net_f2h_resp { + struct octep_ctrl_net_resp_hdr hdr; +}; + +/* Size of host to fw octep_ctrl_mbox queue element */ +union octep_ctrl_net_h2f_data_sz { + struct octep_ctrl_net_h2f_req h2f_req; + struct octep_ctrl_net_h2f_resp h2f_resp; +}; + +/* Size of fw to host octep_ctrl_mbox queue element */ +union octep_ctrl_net_f2h_data_sz { + struct octep_ctrl_net_f2h_req f2h_req; + struct octep_ctrl_net_f2h_resp f2h_resp; +}; + +/* size of host to fw data in words */ +#define OCTEP_CTRL_NET_H2F_DATA_SZW ((sizeof(union octep_ctrl_net_h2f_data_sz)) / \ + (sizeof(unsigned long))) + +/* size of fw to host data in words */ +#define OCTEP_CTRL_NET_F2H_DATA_SZW ((sizeof(union octep_ctrl_net_f2h_data_sz)) / \ + (sizeof(unsigned long))) + +/* size in words of get/set mtu request */ +#define OCTEP_CTRL_NET_H2F_MTU_REQ_SZW 2 +/* size in words of get/set mac request */ +#define OCTEP_CTRL_NET_H2F_MAC_REQ_SZW 2 +/* size in words of get stats request */ +#define OCTEP_CTRL_NET_H2F_GET_STATS_REQ_SZW 2 +/* size in words of get/set state request */ +#define OCTEP_CTRL_NET_H2F_STATE_REQ_SZW 2 +/* size in words of get/set link info request */ +#define OCTEP_CTRL_NET_H2F_LINK_INFO_REQ_SZW 4 + +/* size in words of get mtu response */ +#define OCTEP_CTRL_NET_H2F_GET_MTU_RESP_SZW 2 +/* size in words of set mtu response */ +#define OCTEP_CTRL_NET_H2F_SET_MTU_RESP_SZW 1 +/* size in words of get mac response */ +#define OCTEP_CTRL_NET_H2F_GET_MAC_RESP_SZW 2 +/* size in words of set mac response */ +#define OCTEP_CTRL_NET_H2F_SET_MAC_RESP_SZW 1 +/* size in words of get state request */ +#define OCTEP_CTRL_NET_H2F_GET_STATE_RESP_SZW 2 +/* size in words of set state request */ +#define OCTEP_CTRL_NET_H2F_SET_STATE_RESP_SZW 1 +/* size in words of get link info request */ +#define OCTEP_CTRL_NET_H2F_GET_LINK_INFO_RESP_SZW 4 +/* size in words of set link info request */ +#define OCTEP_CTRL_NET_H2F_SET_LINK_INFO_RESP_SZW 1 + +/** Get link status from firmware. + * + * @param oct: non-null pointer to struct octep_device. + * + * return value: link status 0=down, 1=up. + */ +int octep_get_link_status(struct octep_device *oct); + +/** Set link status in firmware. + * + * @param oct: non-null pointer to struct octep_device. + * @param up: boolean status. + */ +void octep_set_link_status(struct octep_device *oct, bool up); + +/** Set rx state in firmware. + * + * @param oct: non-null pointer to struct octep_device. + * @param up: boolean status. + */ +void octep_set_rx_state(struct octep_device *oct, bool up); + +/** Get mac address from firmware. + * + * @param oct: non-null pointer to struct octep_device. + * @param addr: non-null pointer to mac address. + * + * return value: 0 on success, -errno on failure. + */ +int octep_get_mac_addr(struct octep_device *oct, u8 *addr); + +/** Set mac address in firmware. + * + * @param oct: non-null pointer to struct octep_device. + * @param addr: non-null pointer to mac address. + */ +int octep_set_mac_addr(struct octep_device *oct, u8 *addr); + +/** Set mtu in firmware. + * + * @param oct: non-null pointer to struct octep_device. + * @param mtu: mtu. + */ +int octep_set_mtu(struct octep_device *oct, int mtu); + +/** Get interface statistics from firmware. + * + * @param oct: non-null pointer to struct octep_device. + * + * return value: 0 on success, -errno on failure. + */ +int octep_get_if_stats(struct octep_device *oct); + +/** Get link info from firmware. + * + * @param oct: non-null pointer to struct octep_device. + * + * return value: 0 on success, -errno on failure. + */ +int octep_get_link_info(struct octep_device *oct); + +/** Set link info in firmware. + * + * @param oct: non-null pointer to struct octep_device. + */ +int octep_set_link_info(struct octep_device *oct, struct octep_iface_link_info *link_info); + +#endif /* __OCTEP_CTRL_NET_H__ */ diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c new file mode 100644 index 000000000000..87ef129b269a --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c @@ -0,0 +1,463 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/ethtool.h> + +#include "octep_config.h" +#include "octep_main.h" +#include "octep_ctrl_net.h" + +static const char octep_gstrings_global_stats[][ETH_GSTRING_LEN] = { + "rx_packets", + "tx_packets", + "rx_bytes", + "tx_bytes", + "rx_alloc_errors", + "tx_busy_errors", + "rx_dropped", + "tx_dropped", + "tx_hw_pkts", + "tx_hw_octs", + "tx_hw_bcast", + "tx_hw_mcast", + "tx_hw_underflow", + "tx_hw_control", + "tx_less_than_64", + "tx_equal_64", + "tx_equal_65_to_127", + "tx_equal_128_to_255", + "tx_equal_256_to_511", + "tx_equal_512_to_1023", + "tx_equal_1024_to_1518", + "tx_greater_than_1518", + "rx_hw_pkts", + "rx_hw_bytes", + "rx_hw_bcast", + "rx_hw_mcast", + "rx_pause_pkts", + "rx_pause_bytes", + "rx_dropped_pkts_fifo_full", + "rx_dropped_bytes_fifo_full", + "rx_err_pkts", +}; + +#define OCTEP_GLOBAL_STATS_CNT (sizeof(octep_gstrings_global_stats) / ETH_GSTRING_LEN) + +static const char octep_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = { + "tx_packets_posted[Q-%u]", + "tx_packets_completed[Q-%u]", + "tx_bytes[Q-%u]", + "tx_busy[Q-%u]", +}; + +#define OCTEP_TX_Q_STATS_CNT (sizeof(octep_gstrings_tx_q_stats) / ETH_GSTRING_LEN) + +static const char octep_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = { + "rx_packets[Q-%u]", + "rx_bytes[Q-%u]", + "rx_alloc_errors[Q-%u]", +}; + +#define OCTEP_RX_Q_STATS_CNT (sizeof(octep_gstrings_rx_q_stats) / ETH_GSTRING_LEN) + +static void octep_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct octep_device *oct = netdev_priv(netdev); + + strscpy(info->driver, OCTEP_DRV_NAME, sizeof(info->driver)); + strscpy(info->bus_info, pci_name(oct->pdev), sizeof(info->bus_info)); +} + +static void octep_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + struct octep_device *oct = netdev_priv(netdev); + u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); + char *strings = (char *)data; + int i, j; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < OCTEP_GLOBAL_STATS_CNT; i++) { + snprintf(strings, ETH_GSTRING_LEN, + octep_gstrings_global_stats[i]); + strings += ETH_GSTRING_LEN; + } + + for (i = 0; i < num_queues; i++) { + for (j = 0; j < OCTEP_TX_Q_STATS_CNT; j++) { + snprintf(strings, ETH_GSTRING_LEN, + octep_gstrings_tx_q_stats[j], i); + strings += ETH_GSTRING_LEN; + } + } + + for (i = 0; i < num_queues; i++) { + for (j = 0; j < OCTEP_RX_Q_STATS_CNT; j++) { + snprintf(strings, ETH_GSTRING_LEN, + octep_gstrings_rx_q_stats[j], i); + strings += ETH_GSTRING_LEN; + } + } + break; + default: + break; + } +} + +static int octep_get_sset_count(struct net_device *netdev, int sset) +{ + struct octep_device *oct = netdev_priv(netdev); + u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); + + switch (sset) { + case ETH_SS_STATS: + return OCTEP_GLOBAL_STATS_CNT + (num_queues * + (OCTEP_TX_Q_STATS_CNT + OCTEP_RX_Q_STATS_CNT)); + break; + default: + return -EOPNOTSUPP; + } +} + +static void +octep_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct octep_device *oct = netdev_priv(netdev); + struct octep_iface_tx_stats *iface_tx_stats; + struct octep_iface_rx_stats *iface_rx_stats; + u64 rx_packets, rx_bytes; + u64 tx_packets, tx_bytes; + u64 rx_alloc_errors, tx_busy_errors; + int q, i; + + rx_packets = 0; + rx_bytes = 0; + tx_packets = 0; + tx_bytes = 0; + rx_alloc_errors = 0; + tx_busy_errors = 0; + tx_packets = 0; + tx_bytes = 0; + rx_packets = 0; + rx_bytes = 0; + + octep_get_if_stats(oct); + iface_tx_stats = &oct->iface_tx_stats; + iface_rx_stats = &oct->iface_rx_stats; + + for (q = 0; q < oct->num_oqs; q++) { + struct octep_iq *iq = oct->iq[q]; + struct octep_oq *oq = oct->oq[q]; + + tx_packets += iq->stats.instr_completed; + tx_bytes += iq->stats.bytes_sent; + tx_busy_errors += iq->stats.tx_busy; + + rx_packets += oq->stats.packets; + rx_bytes += oq->stats.bytes; + rx_alloc_errors += oq->stats.alloc_failures; + } + i = 0; + data[i++] = rx_packets; + data[i++] = tx_packets; + data[i++] = rx_bytes; + data[i++] = tx_bytes; + data[i++] = rx_alloc_errors; + data[i++] = tx_busy_errors; + data[i++] = iface_rx_stats->dropped_pkts_fifo_full + + iface_rx_stats->err_pkts; + data[i++] = iface_tx_stats->xscol + + iface_tx_stats->xsdef; + data[i++] = iface_tx_stats->pkts; + data[i++] = iface_tx_stats->octs; + data[i++] = iface_tx_stats->bcst; + data[i++] = iface_tx_stats->mcst; + data[i++] = iface_tx_stats->undflw; + data[i++] = iface_tx_stats->ctl; + data[i++] = iface_tx_stats->hist_lt64; + data[i++] = iface_tx_stats->hist_eq64; + data[i++] = iface_tx_stats->hist_65to127; + data[i++] = iface_tx_stats->hist_128to255; + data[i++] = iface_tx_stats->hist_256to511; + data[i++] = iface_tx_stats->hist_512to1023; + data[i++] = iface_tx_stats->hist_1024to1518; + data[i++] = iface_tx_stats->hist_gt1518; + data[i++] = iface_rx_stats->pkts; + data[i++] = iface_rx_stats->octets; + data[i++] = iface_rx_stats->mcast_pkts; + data[i++] = iface_rx_stats->bcast_pkts; + data[i++] = iface_rx_stats->pause_pkts; + data[i++] = iface_rx_stats->pause_octets; + data[i++] = iface_rx_stats->dropped_pkts_fifo_full; + data[i++] = iface_rx_stats->dropped_octets_fifo_full; + data[i++] = iface_rx_stats->err_pkts; + + /* Per Tx Queue stats */ + for (q = 0; q < oct->num_iqs; q++) { + struct octep_iq *iq = oct->iq[q]; + + data[i++] = iq->stats.instr_posted; + data[i++] = iq->stats.instr_completed; + data[i++] = iq->stats.bytes_sent; + data[i++] = iq->stats.tx_busy; + } + + /* Per Rx Queue stats */ + for (q = 0; q < oct->num_oqs; q++) { + struct octep_oq *oq = oct->oq[q]; + + data[i++] = oq->stats.packets; + data[i++] = oq->stats.bytes; + data[i++] = oq->stats.alloc_failures; + } +} + +#define OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(octep_speeds, ksettings, name) \ +{ \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_T)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseT_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_R)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseR_FEC); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_CR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseCR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_KR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseKR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_LR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseLR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_SR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseSR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_CR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseCR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_KR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseKR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_SR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseSR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_CR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseCR4_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_KR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseKR4_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_LR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseLR4_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_SR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseSR4_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_CR2)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR2_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_KR2)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR2_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_SR2)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR2_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_CR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_KR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_LR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseLR_ER_FR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_SR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_CR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseCR4_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_KR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseKR4_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_LR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseLR4_ER4_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_SR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseSR4_Full); \ +} + +static int octep_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct octep_device *oct = netdev_priv(netdev); + struct octep_iface_link_info *link_info; + u32 advertised_modes, supported_modes; + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + octep_get_link_info(oct); + + advertised_modes = oct->link_info.advertised_modes; + supported_modes = oct->link_info.supported_modes; + link_info = &oct->link_info; + + OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(supported_modes, cmd, supported); + OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(advertised_modes, cmd, advertising); + + if (link_info->autoneg) { + if (link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_SUPPORTED) + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + if (link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_ADVERTISED) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); + cmd->base.autoneg = AUTONEG_ENABLE; + } else { + cmd->base.autoneg = AUTONEG_DISABLE; + } + } else { + cmd->base.autoneg = AUTONEG_DISABLE; + } + + if (link_info->pause) { + if (link_info->pause & OCTEP_LINK_MODE_PAUSE_SUPPORTED) + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + if (link_info->pause & OCTEP_LINK_MODE_PAUSE_ADVERTISED) + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + } + + cmd->base.port = PORT_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + + if (netif_carrier_ok(netdev)) { + cmd->base.speed = link_info->speed; + cmd->base.duplex = DUPLEX_FULL; + } else { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + return 0; +} + +static int octep_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct octep_device *oct = netdev_priv(netdev); + struct octep_iface_link_info link_info_new; + struct octep_iface_link_info *link_info; + u64 advertised = 0; + u8 autoneg = 0; + int err; + + link_info = &oct->link_info; + memcpy(&link_info_new, link_info, sizeof(struct octep_iface_link_info)); + + /* Only Full duplex is supported; + * Assume full duplex when duplex is unknown. + */ + if (cmd->base.duplex != DUPLEX_FULL && + cmd->base.duplex != DUPLEX_UNKNOWN) + return -EOPNOTSUPP; + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + if (!(link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_SUPPORTED)) + return -EOPNOTSUPP; + autoneg = 1; + } + + if (!bitmap_subset(cmd->link_modes.advertising, + cmd->link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS)) + return -EINVAL; + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseT_Full)) + advertised |= BIT(OCTEP_LINK_MODE_10GBASE_T); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseR_FEC)) + advertised |= BIT(OCTEP_LINK_MODE_10GBASE_R); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseCR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_10GBASE_CR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseKR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_10GBASE_KR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseLR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_10GBASE_LR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseSR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_10GBASE_SR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 25000baseCR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_25GBASE_CR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 25000baseKR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_25GBASE_KR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 25000baseSR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_25GBASE_SR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 40000baseCR4_Full)) + advertised |= BIT(OCTEP_LINK_MODE_40GBASE_CR4); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 40000baseKR4_Full)) + advertised |= BIT(OCTEP_LINK_MODE_40GBASE_KR4); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 40000baseLR4_Full)) + advertised |= BIT(OCTEP_LINK_MODE_40GBASE_LR4); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 40000baseSR4_Full)) + advertised |= BIT(OCTEP_LINK_MODE_40GBASE_SR4); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 50000baseCR2_Full)) + advertised |= BIT(OCTEP_LINK_MODE_50GBASE_CR2); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 50000baseKR2_Full)) + advertised |= BIT(OCTEP_LINK_MODE_50GBASE_KR2); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 50000baseSR2_Full)) + advertised |= BIT(OCTEP_LINK_MODE_50GBASE_SR2); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 50000baseCR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_50GBASE_CR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 50000baseKR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_50GBASE_KR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 50000baseLR_ER_FR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_50GBASE_LR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 50000baseSR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_50GBASE_SR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 100000baseCR4_Full)) + advertised |= BIT(OCTEP_LINK_MODE_100GBASE_CR4); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 100000baseKR4_Full)) + advertised |= BIT(OCTEP_LINK_MODE_100GBASE_KR4); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 100000baseLR4_ER4_Full)) + advertised |= BIT(OCTEP_LINK_MODE_100GBASE_LR4); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 100000baseSR4_Full)) + advertised |= BIT(OCTEP_LINK_MODE_100GBASE_SR4); + + if (advertised == link_info->advertised_modes && + cmd->base.speed == link_info->speed && + cmd->base.autoneg == link_info->autoneg) + return 0; + + link_info_new.advertised_modes = advertised; + link_info_new.speed = cmd->base.speed; + link_info_new.autoneg = autoneg; + + err = octep_set_link_info(oct, &link_info_new); + if (err) + return err; + + memcpy(link_info, &link_info_new, sizeof(struct octep_iface_link_info)); + return 0; +} + +static const struct ethtool_ops octep_ethtool_ops = { + .get_drvinfo = octep_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_strings = octep_get_strings, + .get_sset_count = octep_get_sset_count, + .get_ethtool_stats = octep_get_ethtool_stats, + .get_link_ksettings = octep_get_link_ksettings, + .set_link_ksettings = octep_set_link_ksettings, +}; + +void octep_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &octep_ethtool_ops; +} diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c new file mode 100644 index 000000000000..e020c81f3455 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -0,0 +1,1176 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#include <linux/types.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/aer.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/rtnetlink.h> +#include <linux/vmalloc.h> + +#include "octep_config.h" +#include "octep_main.h" +#include "octep_ctrl_net.h" + +struct workqueue_struct *octep_wq; + +/* Supported Devices */ +static const struct pci_device_id octep_pci_id_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_PF)}, + {0, }, +}; +MODULE_DEVICE_TABLE(pci, octep_pci_id_tbl); + +MODULE_AUTHOR("Veerasenareddy Burru <vburru@marvell.com>"); +MODULE_DESCRIPTION(OCTEP_DRV_STRING); +MODULE_LICENSE("GPL"); + +/** + * octep_alloc_ioq_vectors() - Allocate Tx/Rx Queue interrupt info. + * + * @oct: Octeon device private data structure. + * + * Allocate resources to hold per Tx/Rx queue interrupt info. + * This is the information passed to interrupt handler, from which napi poll + * is scheduled and includes quick access to private data of Tx/Rx queue + * corresponding to the interrupt being handled. + * + * Return: 0, on successful allocation of resources for all queue interrupts. + * -1, if failed to allocate any resource. + */ +static int octep_alloc_ioq_vectors(struct octep_device *oct) +{ + int i; + struct octep_ioq_vector *ioq_vector; + + for (i = 0; i < oct->num_oqs; i++) { + oct->ioq_vector[i] = vzalloc(sizeof(*oct->ioq_vector[i])); + if (!oct->ioq_vector[i]) + goto free_ioq_vector; + + ioq_vector = oct->ioq_vector[i]; + ioq_vector->iq = oct->iq[i]; + ioq_vector->oq = oct->oq[i]; + ioq_vector->octep_dev = oct; + } + + dev_info(&oct->pdev->dev, "Allocated %d IOQ vectors\n", oct->num_oqs); + return 0; + +free_ioq_vector: + while (i) { + i--; + vfree(oct->ioq_vector[i]); + oct->ioq_vector[i] = NULL; + } + return -1; +} + +/** + * octep_free_ioq_vectors() - Free Tx/Rx Queue interrupt vector info. + * + * @oct: Octeon device private data structure. + */ +static void octep_free_ioq_vectors(struct octep_device *oct) +{ + int i; + + for (i = 0; i < oct->num_oqs; i++) { + if (oct->ioq_vector[i]) { + vfree(oct->ioq_vector[i]); + oct->ioq_vector[i] = NULL; + } + } + netdev_info(oct->netdev, "Freed IOQ Vectors\n"); +} + +/** + * octep_enable_msix_range() - enable MSI-x interrupts. + * + * @oct: Octeon device private data structure. + * + * Allocate and enable all MSI-x interrupts (queue and non-queue interrupts) + * for the Octeon device. + * + * Return: 0, on successfully enabling all MSI-x interrupts. + * -1, if failed to enable any MSI-x interrupt. + */ +static int octep_enable_msix_range(struct octep_device *oct) +{ + int num_msix, msix_allocated; + int i; + + /* Generic interrupts apart from input/output queues */ + num_msix = oct->num_oqs + CFG_GET_NON_IOQ_MSIX(oct->conf); + oct->msix_entries = kcalloc(num_msix, + sizeof(struct msix_entry), GFP_KERNEL); + if (!oct->msix_entries) + goto msix_alloc_err; + + for (i = 0; i < num_msix; i++) + oct->msix_entries[i].entry = i; + + msix_allocated = pci_enable_msix_range(oct->pdev, oct->msix_entries, + num_msix, num_msix); + if (msix_allocated != num_msix) { + dev_err(&oct->pdev->dev, + "Failed to enable %d msix irqs; got only %d\n", + num_msix, msix_allocated); + goto enable_msix_err; + } + oct->num_irqs = msix_allocated; + dev_info(&oct->pdev->dev, "MSI-X enabled successfully\n"); + + return 0; + +enable_msix_err: + if (msix_allocated > 0) + pci_disable_msix(oct->pdev); + kfree(oct->msix_entries); + oct->msix_entries = NULL; +msix_alloc_err: + return -1; +} + +/** + * octep_disable_msix() - disable MSI-x interrupts. + * + * @oct: Octeon device private data structure. + * + * Disable MSI-x on the Octeon device. + */ +static void octep_disable_msix(struct octep_device *oct) +{ + pci_disable_msix(oct->pdev); + kfree(oct->msix_entries); + oct->msix_entries = NULL; + dev_info(&oct->pdev->dev, "Disabled MSI-X\n"); +} + +/** + * octep_non_ioq_intr_handler() - common handler for all generic interrupts. + * + * @irq: Interrupt number. + * @data: interrupt data. + * + * this is common handler for all non-queue (generic) interrupts. + */ +static irqreturn_t octep_non_ioq_intr_handler(int irq, void *data) +{ + struct octep_device *oct = data; + + return oct->hw_ops.non_ioq_intr_handler(oct); +} + +/** + * octep_ioq_intr_handler() - handler for all Tx/Rx queue interrupts. + * + * @irq: Interrupt number. + * @data: interrupt data contains pointers to Tx/Rx queue private data + * and correspong NAPI context. + * + * this is common handler for all non-queue (generic) interrupts. + */ +static irqreturn_t octep_ioq_intr_handler(int irq, void *data) +{ + struct octep_ioq_vector *ioq_vector = data; + struct octep_device *oct = ioq_vector->octep_dev; + + return oct->hw_ops.ioq_intr_handler(ioq_vector); +} + +/** + * octep_request_irqs() - Register interrupt handlers. + * + * @oct: Octeon device private data structure. + * + * Register handlers for all queue and non-queue interrupts. + * + * Return: 0, on successful registration of all interrupt handlers. + * -1, on any error. + */ +static int octep_request_irqs(struct octep_device *oct) +{ + struct net_device *netdev = oct->netdev; + struct octep_ioq_vector *ioq_vector; + struct msix_entry *msix_entry; + char **non_ioq_msix_names; + int num_non_ioq_msix; + int ret, i; + + num_non_ioq_msix = CFG_GET_NON_IOQ_MSIX(oct->conf); + non_ioq_msix_names = CFG_GET_NON_IOQ_MSIX_NAMES(oct->conf); + + oct->non_ioq_irq_names = kcalloc(num_non_ioq_msix, + OCTEP_MSIX_NAME_SIZE, GFP_KERNEL); + if (!oct->non_ioq_irq_names) + goto alloc_err; + + /* First few MSI-X interrupts are non-queue interrupts */ + for (i = 0; i < num_non_ioq_msix; i++) { + char *irq_name; + + irq_name = &oct->non_ioq_irq_names[i * OCTEP_MSIX_NAME_SIZE]; + msix_entry = &oct->msix_entries[i]; + + snprintf(irq_name, OCTEP_MSIX_NAME_SIZE, + "%s-%s", netdev->name, non_ioq_msix_names[i]); + ret = request_irq(msix_entry->vector, + octep_non_ioq_intr_handler, 0, + irq_name, oct); + if (ret) { + netdev_err(netdev, + "request_irq failed for %s; err=%d", + irq_name, ret); + goto non_ioq_irq_err; + } + } + + /* Request IRQs for Tx/Rx queues */ + for (i = 0; i < oct->num_oqs; i++) { + ioq_vector = oct->ioq_vector[i]; + msix_entry = &oct->msix_entries[i + num_non_ioq_msix]; + + snprintf(ioq_vector->name, sizeof(ioq_vector->name), + "%s-q%d", netdev->name, i); + ret = request_irq(msix_entry->vector, + octep_ioq_intr_handler, 0, + ioq_vector->name, ioq_vector); + if (ret) { + netdev_err(netdev, + "request_irq failed for Q-%d; err=%d", + i, ret); + goto ioq_irq_err; + } + + cpumask_set_cpu(i % num_online_cpus(), + &ioq_vector->affinity_mask); + irq_set_affinity_hint(msix_entry->vector, + &ioq_vector->affinity_mask); + } + + return 0; +ioq_irq_err: + while (i > num_non_ioq_msix) { + --i; + irq_set_affinity_hint(oct->msix_entries[i].vector, NULL); + free_irq(oct->msix_entries[i].vector, oct->ioq_vector[i]); + } +non_ioq_irq_err: + while (i) { + --i; + free_irq(oct->msix_entries[i].vector, oct); + } +alloc_err: + return -1; +} + +/** + * octep_free_irqs() - free all registered interrupts. + * + * @oct: Octeon device private data structure. + * + * Free all queue and non-queue interrupts of the Octeon device. + */ +static void octep_free_irqs(struct octep_device *oct) +{ + int i; + + /* First few MSI-X interrupts are non queue interrupts; free them */ + for (i = 0; i < CFG_GET_NON_IOQ_MSIX(oct->conf); i++) + free_irq(oct->msix_entries[i].vector, oct); + kfree(oct->non_ioq_irq_names); + + /* Free IRQs for Input/Output (Tx/Rx) queues */ + for (i = CFG_GET_NON_IOQ_MSIX(oct->conf); i < oct->num_irqs; i++) { + irq_set_affinity_hint(oct->msix_entries[i].vector, NULL); + free_irq(oct->msix_entries[i].vector, + oct->ioq_vector[i - CFG_GET_NON_IOQ_MSIX(oct->conf)]); + } + netdev_info(oct->netdev, "IRQs freed\n"); +} + +/** + * octep_setup_irqs() - setup interrupts for the Octeon device. + * + * @oct: Octeon device private data structure. + * + * Allocate data structures to hold per interrupt information, allocate/enable + * MSI-x interrupt and register interrupt handlers. + * + * Return: 0, on successful allocation and registration of all interrupts. + * -1, on any error. + */ +static int octep_setup_irqs(struct octep_device *oct) +{ + if (octep_alloc_ioq_vectors(oct)) + goto ioq_vector_err; + + if (octep_enable_msix_range(oct)) + goto enable_msix_err; + + if (octep_request_irqs(oct)) + goto request_irq_err; + + return 0; + +request_irq_err: + octep_disable_msix(oct); +enable_msix_err: + octep_free_ioq_vectors(oct); +ioq_vector_err: + return -1; +} + +/** + * octep_clean_irqs() - free all interrupts and its resources. + * + * @oct: Octeon device private data structure. + */ +static void octep_clean_irqs(struct octep_device *oct) +{ + octep_free_irqs(oct); + octep_disable_msix(oct); + octep_free_ioq_vectors(oct); +} + +/** + * octep_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue. + * + * @iq: Octeon Tx queue data structure. + * @oq: Octeon Rx queue data structure. + */ +static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq) +{ + u32 pkts_pend = oq->pkts_pending; + + netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no); + if (iq->pkts_processed) { + writel(iq->pkts_processed, iq->inst_cnt_reg); + iq->pkt_in_done -= iq->pkts_processed; + iq->pkts_processed = 0; + } + if (oq->last_pkt_count - pkts_pend) { + writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg); + oq->last_pkt_count = pkts_pend; + } + + /* Flush the previous wrties before writing to RESEND bit */ + wmb(); + writeq(1UL << OCTEP_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg); + writeq(1UL << OCTEP_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg); +} + +/** + * octep_napi_poll() - NAPI poll function for Tx/Rx. + * + * @napi: pointer to napi context. + * @budget: max number of packets to be processed in single invocation. + */ +static int octep_napi_poll(struct napi_struct *napi, int budget) +{ + struct octep_ioq_vector *ioq_vector = + container_of(napi, struct octep_ioq_vector, napi); + u32 tx_pending, rx_done; + + tx_pending = octep_iq_process_completions(ioq_vector->iq, budget); + rx_done = octep_oq_process_rx(ioq_vector->oq, budget); + + /* need more polling if tx completion processing is still pending or + * processed at least 'budget' number of rx packets. + */ + if (tx_pending || rx_done >= budget) + return budget; + + napi_complete(napi); + octep_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq); + return rx_done; +} + +/** + * octep_napi_add() - Add NAPI poll for all Tx/Rx queues. + * + * @oct: Octeon device private data structure. + */ +static void octep_napi_add(struct octep_device *oct) +{ + int i; + + for (i = 0; i < oct->num_oqs; i++) { + netdev_dbg(oct->netdev, "Adding NAPI on Q-%d\n", i); + netif_napi_add(oct->netdev, &oct->ioq_vector[i]->napi, + octep_napi_poll, 64); + oct->oq[i]->napi = &oct->ioq_vector[i]->napi; + } +} + +/** + * octep_napi_delete() - delete NAPI poll callback for all Tx/Rx queues. + * + * @oct: Octeon device private data structure. + */ +static void octep_napi_delete(struct octep_device *oct) +{ + int i; + + for (i = 0; i < oct->num_oqs; i++) { + netdev_dbg(oct->netdev, "Deleting NAPI on Q-%d\n", i); + netif_napi_del(&oct->ioq_vector[i]->napi); + oct->oq[i]->napi = NULL; + } +} + +/** + * octep_napi_enable() - enable NAPI for all Tx/Rx queues. + * + * @oct: Octeon device private data structure. + */ +static void octep_napi_enable(struct octep_device *oct) +{ + int i; + + for (i = 0; i < oct->num_oqs; i++) { + netdev_dbg(oct->netdev, "Enabling NAPI on Q-%d\n", i); + napi_enable(&oct->ioq_vector[i]->napi); + } +} + +/** + * octep_napi_disable() - disable NAPI for all Tx/Rx queues. + * + * @oct: Octeon device private data structure. + */ +static void octep_napi_disable(struct octep_device *oct) +{ + int i; + + for (i = 0; i < oct->num_oqs; i++) { + netdev_dbg(oct->netdev, "Disabling NAPI on Q-%d\n", i); + napi_disable(&oct->ioq_vector[i]->napi); + } +} + +static void octep_link_up(struct net_device *netdev) +{ + netif_carrier_on(netdev); + netif_tx_start_all_queues(netdev); +} + +/** + * octep_open() - start the octeon network device. + * + * @netdev: pointer to kernel network device. + * + * setup Tx/Rx queues, interrupts and enable hardware operation of Tx/Rx queues + * and interrupts.. + * + * Return: 0, on successfully setting up device and bring it up. + * -1, on any error. + */ +static int octep_open(struct net_device *netdev) +{ + struct octep_device *oct = netdev_priv(netdev); + int err, ret; + + netdev_info(netdev, "Starting netdev ...\n"); + netif_carrier_off(netdev); + + oct->hw_ops.reset_io_queues(oct); + + if (octep_setup_iqs(oct)) + goto setup_iq_err; + if (octep_setup_oqs(oct)) + goto setup_oq_err; + if (octep_setup_irqs(oct)) + goto setup_irq_err; + + err = netif_set_real_num_tx_queues(netdev, oct->num_oqs); + if (err) + goto set_queues_err; + err = netif_set_real_num_rx_queues(netdev, oct->num_iqs); + if (err) + goto set_queues_err; + + octep_napi_add(oct); + octep_napi_enable(oct); + + oct->link_info.admin_up = 1; + octep_set_rx_state(oct, true); + + ret = octep_get_link_status(oct); + if (!ret) + octep_set_link_status(oct, true); + + /* Enable the input and output queues for this Octeon device */ + oct->hw_ops.enable_io_queues(oct); + + /* Enable Octeon device interrupts */ + oct->hw_ops.enable_interrupts(oct); + + octep_oq_dbell_init(oct); + + ret = octep_get_link_status(oct); + if (ret) + octep_link_up(netdev); + + return 0; + +set_queues_err: + octep_napi_disable(oct); + octep_napi_delete(oct); + octep_clean_irqs(oct); +setup_irq_err: + octep_free_oqs(oct); +setup_oq_err: + octep_free_iqs(oct); +setup_iq_err: + return -1; +} + +/** + * octep_stop() - stop the octeon network device. + * + * @netdev: pointer to kernel network device. + * + * stop the device Tx/Rx operations, bring down the link and + * free up all resources allocated for Tx/Rx queues and interrupts. + */ +static int octep_stop(struct net_device *netdev) +{ + struct octep_device *oct = netdev_priv(netdev); + + netdev_info(netdev, "Stopping the device ...\n"); + + /* Stop Tx from stack */ + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + octep_set_link_status(oct, false); + octep_set_rx_state(oct, false); + + oct->link_info.admin_up = 0; + oct->link_info.oper_up = 0; + + oct->hw_ops.disable_interrupts(oct); + octep_napi_disable(oct); + octep_napi_delete(oct); + + octep_clean_irqs(oct); + octep_clean_iqs(oct); + + oct->hw_ops.disable_io_queues(oct); + oct->hw_ops.reset_io_queues(oct); + octep_free_oqs(oct); + octep_free_iqs(oct); + netdev_info(netdev, "Device stopped !!\n"); + return 0; +} + +/** + * octep_iq_full_check() - check if a Tx queue is full. + * + * @iq: Octeon Tx queue data structure. + * + * Return: 0, if the Tx queue is not full. + * 1, if the Tx queue is full. + */ +static inline int octep_iq_full_check(struct octep_iq *iq) +{ + if (likely((iq->max_count - atomic_read(&iq->instr_pending)) >= + OCTEP_WAKE_QUEUE_THRESHOLD)) + return 0; + + /* Stop the queue if unable to send */ + netif_stop_subqueue(iq->netdev, iq->q_no); + + /* check again and restart the queue, in case NAPI has just freed + * enough Tx ring entries. + */ + if (unlikely((iq->max_count - atomic_read(&iq->instr_pending)) >= + OCTEP_WAKE_QUEUE_THRESHOLD)) { + netif_start_subqueue(iq->netdev, iq->q_no); + iq->stats.restart_cnt++; + return 0; + } + + return 1; +} + +/** + * octep_start_xmit() - Enqueue packet to Octoen hardware Tx Queue. + * + * @skb: packet skbuff pointer. + * @netdev: kernel network device. + * + * Return: NETDEV_TX_BUSY, if Tx Queue is full. + * NETDEV_TX_OK, if successfully enqueued to hardware Tx queue. + */ +static netdev_tx_t octep_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct octep_device *oct = netdev_priv(netdev); + struct octep_tx_sglist_desc *sglist; + struct octep_tx_buffer *tx_buffer; + struct octep_tx_desc_hw *hw_desc; + struct skb_shared_info *shinfo; + struct octep_instr_hdr *ih; + struct octep_iq *iq; + skb_frag_t *frag; + u16 nr_frags, si; + u16 q_no, wi; + + q_no = skb_get_queue_mapping(skb); + if (q_no >= oct->num_iqs) { + netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no); + q_no = q_no % oct->num_iqs; + } + + iq = oct->iq[q_no]; + if (octep_iq_full_check(iq)) { + iq->stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + shinfo = skb_shinfo(skb); + nr_frags = shinfo->nr_frags; + + wi = iq->host_write_index; + hw_desc = &iq->desc_ring[wi]; + hw_desc->ih64 = 0; + + tx_buffer = iq->buff_info + wi; + tx_buffer->skb = skb; + + ih = &hw_desc->ih; + ih->tlen = skb->len; + ih->pkind = oct->pkind; + + if (!nr_frags) { + tx_buffer->gather = 0; + tx_buffer->dma = dma_map_single(iq->dev, skb->data, + skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(iq->dev, tx_buffer->dma)) + goto dma_map_err; + hw_desc->dptr = tx_buffer->dma; + } else { + /* Scatter/Gather */ + dma_addr_t dma; + u16 len; + + sglist = tx_buffer->sglist; + + ih->gsz = nr_frags + 1; + ih->gather = 1; + tx_buffer->gather = 1; + + len = skb_headlen(skb); + dma = dma_map_single(iq->dev, skb->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(iq->dev, dma)) + goto dma_map_err; + + dma_sync_single_for_cpu(iq->dev, tx_buffer->sglist_dma, + OCTEP_SGLIST_SIZE_PER_PKT, + DMA_TO_DEVICE); + memset(sglist, 0, OCTEP_SGLIST_SIZE_PER_PKT); + sglist[0].len[3] = len; + sglist[0].dma_ptr[0] = dma; + + si = 1; /* entry 0 is main skb, mapped above */ + frag = &shinfo->frags[0]; + while (nr_frags--) { + len = skb_frag_size(frag); + dma = skb_frag_dma_map(iq->dev, frag, 0, + len, DMA_TO_DEVICE); + if (dma_mapping_error(iq->dev, dma)) + goto dma_map_sg_err; + + sglist[si >> 2].len[3 - (si & 3)] = len; + sglist[si >> 2].dma_ptr[si & 3] = dma; + + frag++; + si++; + } + dma_sync_single_for_device(iq->dev, tx_buffer->sglist_dma, + OCTEP_SGLIST_SIZE_PER_PKT, + DMA_TO_DEVICE); + + hw_desc->dptr = tx_buffer->sglist_dma; + } + + /* Flush the hw descriptor before writing to doorbell */ + wmb(); + + /* Ring Doorbell to notify the NIC there is a new packet */ + writel(1, iq->doorbell_reg); + atomic_inc(&iq->instr_pending); + wi++; + if (wi == iq->max_count) + wi = 0; + iq->host_write_index = wi; + + netdev_tx_sent_queue(iq->netdev_q, skb->len); + iq->stats.instr_posted++; + skb_tx_timestamp(skb); + return NETDEV_TX_OK; + +dma_map_sg_err: + if (si > 0) { + dma_unmap_single(iq->dev, sglist[0].dma_ptr[0], + sglist[0].len[0], DMA_TO_DEVICE); + sglist[0].len[0] = 0; + } + while (si > 1) { + dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3], + sglist[si >> 2].len[si & 3], DMA_TO_DEVICE); + sglist[si >> 2].len[si & 3] = 0; + si--; + } + tx_buffer->gather = 0; +dma_map_err: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +/** + * octep_get_stats64() - Get Octeon network device statistics. + * + * @netdev: kernel network device. + * @stats: pointer to stats structure to be filled in. + */ +static void octep_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + u64 tx_packets, tx_bytes, rx_packets, rx_bytes; + struct octep_device *oct = netdev_priv(netdev); + int q; + + octep_get_if_stats(oct); + tx_packets = 0; + tx_bytes = 0; + rx_packets = 0; + rx_bytes = 0; + for (q = 0; q < oct->num_oqs; q++) { + struct octep_iq *iq = oct->iq[q]; + struct octep_oq *oq = oct->oq[q]; + + tx_packets += iq->stats.instr_completed; + tx_bytes += iq->stats.bytes_sent; + rx_packets += oq->stats.packets; + rx_bytes += oq->stats.bytes; + } + stats->tx_packets = tx_packets; + stats->tx_bytes = tx_bytes; + stats->rx_packets = rx_packets; + stats->rx_bytes = rx_bytes; + stats->multicast = oct->iface_rx_stats.mcast_pkts; + stats->rx_errors = oct->iface_rx_stats.err_pkts; + stats->collisions = oct->iface_tx_stats.xscol; + stats->tx_fifo_errors = oct->iface_tx_stats.undflw; +} + +/** + * octep_tx_timeout_task - work queue task to Handle Tx queue timeout. + * + * @work: pointer to Tx queue timeout work_struct + * + * Stop and start the device so that it frees up all queue resources + * and restarts the queues, that potentially clears a Tx queue timeout + * condition. + **/ +static void octep_tx_timeout_task(struct work_struct *work) +{ + struct octep_device *oct = container_of(work, struct octep_device, + tx_timeout_task); + struct net_device *netdev = oct->netdev; + + rtnl_lock(); + if (netif_running(netdev)) { + octep_stop(netdev); + octep_open(netdev); + } + rtnl_unlock(); +} + +/** + * octep_tx_timeout() - Handle Tx Queue timeout. + * + * @netdev: pointer to kernel network device. + * @txqueue: Timed out Tx queue number. + * + * Schedule a work to handle Tx queue timeout. + */ +static void octep_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + struct octep_device *oct = netdev_priv(netdev); + + queue_work(octep_wq, &oct->tx_timeout_task); +} + +static int octep_set_mac(struct net_device *netdev, void *p) +{ + struct octep_device *oct = netdev_priv(netdev); + struct sockaddr *addr = (struct sockaddr *)p; + int err; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + err = octep_set_mac_addr(oct, addr->sa_data); + if (err) + return err; + + memcpy(oct->mac_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(netdev, addr->sa_data); + + return 0; +} + +static int octep_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct octep_device *oct = netdev_priv(netdev); + struct octep_iface_link_info *link_info; + int err = 0; + + link_info = &oct->link_info; + if (link_info->mtu == new_mtu) + return 0; + + err = octep_set_mtu(oct, new_mtu); + if (!err) { + oct->link_info.mtu = new_mtu; + netdev->mtu = new_mtu; + } + + return err; +} + +static const struct net_device_ops octep_netdev_ops = { + .ndo_open = octep_open, + .ndo_stop = octep_stop, + .ndo_start_xmit = octep_start_xmit, + .ndo_get_stats64 = octep_get_stats64, + .ndo_tx_timeout = octep_tx_timeout, + .ndo_set_mac_address = octep_set_mac, + .ndo_change_mtu = octep_change_mtu, +}; + +/** + * octep_ctrl_mbox_task - work queue task to handle ctrl mbox messages. + * + * @work: pointer to ctrl mbox work_struct + * + * Poll ctrl mbox message queue and handle control messages from firmware. + **/ +static void octep_ctrl_mbox_task(struct work_struct *work) +{ + struct octep_device *oct = container_of(work, struct octep_device, + ctrl_mbox_task); + struct net_device *netdev = oct->netdev; + struct octep_ctrl_net_f2h_req req = {}; + struct octep_ctrl_mbox_msg msg; + int ret = 0; + + msg.msg = &req; + while (true) { + ret = octep_ctrl_mbox_recv(&oct->ctrl_mbox, &msg); + if (ret) + break; + + switch (req.hdr.cmd) { + case OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS: + if (netif_running(netdev)) { + if (req.link.state) { + dev_info(&oct->pdev->dev, "netif_carrier_on\n"); + netif_carrier_on(netdev); + } else { + dev_info(&oct->pdev->dev, "netif_carrier_off\n"); + netif_carrier_off(netdev); + } + } + break; + default: + pr_info("Unknown mbox req : %u\n", req.hdr.cmd); + break; + } + } +} + +/** + * octep_device_setup() - Setup Octeon Device. + * + * @oct: Octeon device private data structure. + * + * Setup Octeon device hardware operations, configuration, etc ... + */ +int octep_device_setup(struct octep_device *oct) +{ + struct octep_ctrl_mbox *ctrl_mbox; + struct pci_dev *pdev = oct->pdev; + int i, ret; + + /* allocate memory for oct->conf */ + oct->conf = kzalloc(sizeof(*oct->conf), GFP_KERNEL); + if (!oct->conf) + return -ENOMEM; + + /* Map BAR regions */ + for (i = 0; i < OCTEP_MMIO_REGIONS; i++) { + oct->mmio[i].hw_addr = + ioremap(pci_resource_start(oct->pdev, i * 2), + pci_resource_len(oct->pdev, i * 2)); + oct->mmio[i].mapped = 1; + } + + oct->chip_id = pdev->device; + oct->rev_id = pdev->revision; + dev_info(&pdev->dev, "chip_id = 0x%x\n", pdev->device); + + switch (oct->chip_id) { + case OCTEP_PCI_DEVICE_ID_CN93_PF: + dev_info(&pdev->dev, + "Setting up OCTEON CN93XX PF PASS%d.%d\n", + OCTEP_MAJOR_REV(oct), OCTEP_MINOR_REV(oct)); + octep_device_setup_cn93_pf(oct); + break; + default: + dev_err(&pdev->dev, + "%s: unsupported device\n", __func__); + goto unsupported_dev; + } + + oct->pkind = CFG_GET_IQ_PKIND(oct->conf); + + /* Initialize control mbox */ + ctrl_mbox = &oct->ctrl_mbox; + ctrl_mbox->barmem = CFG_GET_CTRL_MBOX_MEM_ADDR(oct->conf); + ret = octep_ctrl_mbox_init(ctrl_mbox); + if (ret) { + dev_err(&pdev->dev, "Failed to initialize control mbox\n"); + return -1; + } + oct->ctrl_mbox_ifstats_offset = OCTEP_CTRL_MBOX_SZ(ctrl_mbox->h2fq.elem_sz, + ctrl_mbox->h2fq.elem_cnt, + ctrl_mbox->f2hq.elem_sz, + ctrl_mbox->f2hq.elem_cnt); + + return 0; + +unsupported_dev: + return -1; +} + +/** + * octep_device_cleanup() - Cleanup Octeon Device. + * + * @oct: Octeon device private data structure. + * + * Cleanup Octeon device allocated resources. + */ +static void octep_device_cleanup(struct octep_device *oct) +{ + int i; + + dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n"); + + for (i = 0; i < OCTEP_MAX_VF; i++) { + if (oct->mbox[i]) + vfree(oct->mbox[i]); + oct->mbox[i] = NULL; + } + + octep_ctrl_mbox_uninit(&oct->ctrl_mbox); + + oct->hw_ops.soft_reset(oct); + for (i = 0; i < OCTEP_MMIO_REGIONS; i++) { + if (oct->mmio[i].mapped) + iounmap(oct->mmio[i].hw_addr); + } + + kfree(oct->conf); + oct->conf = NULL; +} + +/** + * octep_probe() - Octeon PCI device probe handler. + * + * @pdev: PCI device structure. + * @ent: entry in Octeon PCI device ID table. + * + * Initializes and enables the Octeon PCI device for network operations. + * Initializes Octeon private data structure and registers a network device. + */ +static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct octep_device *octep_dev = NULL; + struct net_device *netdev; + int err; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Failed to enable PCI device\n"); + return err; + } + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "Failed to set DMA mask !!\n"); + goto err_dma_mask; + } + + err = pci_request_mem_regions(pdev, OCTEP_DRV_NAME); + if (err) { + dev_err(&pdev->dev, "Failed to map PCI memory regions\n"); + goto err_pci_regions; + } + + pci_enable_pcie_error_reporting(pdev); + pci_set_master(pdev); + + netdev = alloc_etherdev_mq(sizeof(struct octep_device), + OCTEP_MAX_QUEUES); + if (!netdev) { + dev_err(&pdev->dev, "Failed to allocate netdev\n"); + err = -ENOMEM; + goto err_alloc_netdev; + } + SET_NETDEV_DEV(netdev, &pdev->dev); + + octep_dev = netdev_priv(netdev); + octep_dev->netdev = netdev; + octep_dev->pdev = pdev; + octep_dev->dev = &pdev->dev; + pci_set_drvdata(pdev, octep_dev); + + err = octep_device_setup(octep_dev); + if (err) { + dev_err(&pdev->dev, "Device setup failed\n"); + goto err_octep_config; + } + INIT_WORK(&octep_dev->tx_timeout_task, octep_tx_timeout_task); + INIT_WORK(&octep_dev->ctrl_mbox_task, octep_ctrl_mbox_task); + + netdev->netdev_ops = &octep_netdev_ops; + octep_set_ethtool_ops(netdev); + netif_carrier_off(netdev); + + netdev->hw_features = NETIF_F_SG; + netdev->features |= netdev->hw_features; + netdev->min_mtu = OCTEP_MIN_MTU; + netdev->max_mtu = OCTEP_MAX_MTU; + netdev->mtu = OCTEP_DEFAULT_MTU; + + octep_get_mac_addr(octep_dev, octep_dev->mac_addr); + eth_hw_addr_set(netdev, octep_dev->mac_addr); + + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "Failed to register netdev\n"); + goto register_dev_err; + } + dev_info(&pdev->dev, "Device probe successful\n"); + return 0; + +register_dev_err: + octep_device_cleanup(octep_dev); +err_octep_config: + free_netdev(netdev); +err_alloc_netdev: + pci_disable_pcie_error_reporting(pdev); + pci_release_mem_regions(pdev); +err_pci_regions: +err_dma_mask: + pci_disable_device(pdev); + return err; +} + +/** + * octep_remove() - Remove Octeon PCI device from driver control. + * + * @pdev: PCI device structure of the Octeon device. + * + * Cleanup all resources allocated for the Octeon device. + * Unregister from network device and disable the PCI device. + */ +static void octep_remove(struct pci_dev *pdev) +{ + struct octep_device *oct = pci_get_drvdata(pdev); + struct net_device *netdev; + + if (!oct) + return; + + cancel_work_sync(&oct->tx_timeout_task); + cancel_work_sync(&oct->ctrl_mbox_task); + netdev = oct->netdev; + if (netdev->reg_state == NETREG_REGISTERED) + unregister_netdev(netdev); + + octep_device_cleanup(oct); + pci_release_mem_regions(pdev); + free_netdev(netdev); + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); +} + +static struct pci_driver octep_driver = { + .name = OCTEP_DRV_NAME, + .id_table = octep_pci_id_tbl, + .probe = octep_probe, + .remove = octep_remove, +}; + +/** + * octep_init_module() - Module initialiation. + * + * create common resource for the driver and register PCI driver. + */ +static int __init octep_init_module(void) +{ + int ret; + + pr_info("%s: Loading %s ...\n", OCTEP_DRV_NAME, OCTEP_DRV_STRING); + + /* work queue for all deferred tasks */ + octep_wq = create_singlethread_workqueue(OCTEP_DRV_NAME); + if (!octep_wq) { + pr_err("%s: Failed to create common workqueue\n", + OCTEP_DRV_NAME); + return -ENOMEM; + } + + ret = pci_register_driver(&octep_driver); + if (ret < 0) { + pr_err("%s: Failed to register PCI driver; err=%d\n", + OCTEP_DRV_NAME, ret); + return ret; + } + + pr_info("%s: Loaded successfully !\n", OCTEP_DRV_NAME); + + return ret; +} + +/** + * octep_exit_module() - Module exit routine. + * + * unregister the driver with PCI subsystem and cleanup common resources. + */ +static void __exit octep_exit_module(void) +{ + pr_info("%s: Unloading ...\n", OCTEP_DRV_NAME); + + pci_unregister_driver(&octep_driver); + destroy_workqueue(octep_wq); + + pr_info("%s: Unloading complete\n", OCTEP_DRV_NAME); +} + +module_init(octep_init_module); +module_exit(octep_exit_module); diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h new file mode 100644 index 000000000000..025626a61383 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h @@ -0,0 +1,357 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#ifndef _OCTEP_MAIN_H_ +#define _OCTEP_MAIN_H_ + +#include "octep_tx.h" +#include "octep_rx.h" +#include "octep_ctrl_mbox.h" + +#define OCTEP_DRV_NAME "octeon_ep" +#define OCTEP_DRV_STRING "Marvell Octeon EndPoint NIC Driver" + +#define OCTEP_PCIID_CN93_PF 0xB200177d +#define OCTEP_PCIID_CN93_VF 0xB203177d + +#define OCTEP_PCI_DEVICE_ID_CN93_PF 0xB200 +#define OCTEP_PCI_DEVICE_ID_CN93_VF 0xB203 + +#define OCTEP_MAX_QUEUES 63 +#define OCTEP_MAX_IQ OCTEP_MAX_QUEUES +#define OCTEP_MAX_OQ OCTEP_MAX_QUEUES +#define OCTEP_MAX_VF 64 + +#define OCTEP_MAX_MSIX_VECTORS OCTEP_MAX_OQ + +/* Flags to disable and enable Interrupts */ +#define OCTEP_INPUT_INTR (1) +#define OCTEP_OUTPUT_INTR (2) +#define OCTEP_MBOX_INTR (4) +#define OCTEP_ALL_INTR 0xff + +#define OCTEP_IQ_INTR_RESEND_BIT 59 +#define OCTEP_OQ_INTR_RESEND_BIT 59 + +#define OCTEP_MMIO_REGIONS 3 +/* PCI address space mapping information. + * Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of + * Octeon gets mapped to different physical address spaces in + * the kernel. + */ +struct octep_mmio { + /* The physical address to which the PCI address space is mapped. */ + u8 __iomem *hw_addr; + + /* Flag indicating the mapping was successful. */ + int mapped; +}; + +struct octep_pci_win_regs { + u8 __iomem *pci_win_wr_addr; + u8 __iomem *pci_win_rd_addr; + u8 __iomem *pci_win_wr_data; + u8 __iomem *pci_win_rd_data; +}; + +struct octep_hw_ops { + void (*setup_iq_regs)(struct octep_device *oct, int q); + void (*setup_oq_regs)(struct octep_device *oct, int q); + void (*setup_mbox_regs)(struct octep_device *oct, int mbox); + + irqreturn_t (*non_ioq_intr_handler)(void *ioq_vector); + irqreturn_t (*ioq_intr_handler)(void *ioq_vector); + int (*soft_reset)(struct octep_device *oct); + void (*reinit_regs)(struct octep_device *oct); + u32 (*update_iq_read_idx)(struct octep_iq *iq); + + void (*enable_interrupts)(struct octep_device *oct); + void (*disable_interrupts)(struct octep_device *oct); + + void (*enable_io_queues)(struct octep_device *oct); + void (*disable_io_queues)(struct octep_device *oct); + void (*enable_iq)(struct octep_device *oct, int q); + void (*disable_iq)(struct octep_device *oct, int q); + void (*enable_oq)(struct octep_device *oct, int q); + void (*disable_oq)(struct octep_device *oct, int q); + void (*reset_io_queues)(struct octep_device *oct); + void (*dump_registers)(struct octep_device *oct); +}; + +/* Octeon mailbox data */ +struct octep_mbox_data { + u32 cmd; + u32 total_len; + u32 recv_len; + u32 rsvd; + u64 *data; +}; + +/* Octeon device mailbox */ +struct octep_mbox { + /* A spinlock to protect access to this q_mbox. */ + spinlock_t lock; + + u32 q_no; + u32 state; + + /* SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */ + u8 __iomem *mbox_int_reg; + + /* SLI_PKT_PF_VF_MBOX_SIG(0) for PF, + * SLI_PKT_PF_VF_MBOX_SIG(1) for VF. + */ + u8 __iomem *mbox_write_reg; + + /* SLI_PKT_PF_VF_MBOX_SIG(1) for PF, + * SLI_PKT_PF_VF_MBOX_SIG(0) for VF. + */ + u8 __iomem *mbox_read_reg; + + struct octep_mbox_data mbox_data; +}; + +/* Tx/Rx queue vector per interrupt. */ +struct octep_ioq_vector { + char name[OCTEP_MSIX_NAME_SIZE]; + struct napi_struct napi; + struct octep_device *octep_dev; + struct octep_iq *iq; + struct octep_oq *oq; + cpumask_t affinity_mask; +}; + +/* Octeon hardware/firmware offload capability flags. */ +#define OCTEP_CAP_TX_CHECKSUM BIT(0) +#define OCTEP_CAP_RX_CHECKSUM BIT(1) +#define OCTEP_CAP_TSO BIT(2) + +/* Link modes */ +enum octep_link_mode_bit_indices { + OCTEP_LINK_MODE_10GBASE_T = 0, + OCTEP_LINK_MODE_10GBASE_R, + OCTEP_LINK_MODE_10GBASE_CR, + OCTEP_LINK_MODE_10GBASE_KR, + OCTEP_LINK_MODE_10GBASE_LR, + OCTEP_LINK_MODE_10GBASE_SR, + OCTEP_LINK_MODE_25GBASE_CR, + OCTEP_LINK_MODE_25GBASE_KR, + OCTEP_LINK_MODE_25GBASE_SR, + OCTEP_LINK_MODE_40GBASE_CR4, + OCTEP_LINK_MODE_40GBASE_KR4, + OCTEP_LINK_MODE_40GBASE_LR4, + OCTEP_LINK_MODE_40GBASE_SR4, + OCTEP_LINK_MODE_50GBASE_CR2, + OCTEP_LINK_MODE_50GBASE_KR2, + OCTEP_LINK_MODE_50GBASE_SR2, + OCTEP_LINK_MODE_50GBASE_CR, + OCTEP_LINK_MODE_50GBASE_KR, + OCTEP_LINK_MODE_50GBASE_LR, + OCTEP_LINK_MODE_50GBASE_SR, + OCTEP_LINK_MODE_100GBASE_CR4, + OCTEP_LINK_MODE_100GBASE_KR4, + OCTEP_LINK_MODE_100GBASE_LR4, + OCTEP_LINK_MODE_100GBASE_SR4, + OCTEP_LINK_MODE_NBITS +}; + +/* Hardware interface link state information. */ +struct octep_iface_link_info { + /* Bitmap of Supported link speeds/modes. */ + u64 supported_modes; + + /* Bitmap of Advertised link speeds/modes. */ + u64 advertised_modes; + + /* Negotiated link speed in Mbps. */ + u32 speed; + + /* MTU */ + u16 mtu; + + /* Autonegotation state. */ +#define OCTEP_LINK_MODE_AUTONEG_SUPPORTED BIT(0) +#define OCTEP_LINK_MODE_AUTONEG_ADVERTISED BIT(1) + u8 autoneg; + + /* Pause frames setting. */ +#define OCTEP_LINK_MODE_PAUSE_SUPPORTED BIT(0) +#define OCTEP_LINK_MODE_PAUSE_ADVERTISED BIT(1) + u8 pause; + + /* Admin state of the link (ifconfig <iface> up/down */ + u8 admin_up; + + /* Operational state of the link: physical link is up down */ + u8 oper_up; +}; + +/* The Octeon device specific private data structure. + * Each Octeon device has this structure to represent all its components. + */ +struct octep_device { + struct octep_config *conf; + + /* Octeon Chip type. */ + u16 chip_id; + u16 rev_id; + + /* Device capabilities enabled */ + u64 caps_enabled; + /* Device capabilities supported */ + u64 caps_supported; + + /* Pointer to basic Linux device */ + struct device *dev; + /* Linux PCI device pointer */ + struct pci_dev *pdev; + /* Netdev corresponding to the Octeon device */ + struct net_device *netdev; + + /* memory mapped io range */ + struct octep_mmio mmio[OCTEP_MMIO_REGIONS]; + + /* MAC address */ + u8 mac_addr[ETH_ALEN]; + + /* Tx queues (IQ: Instruction Queue) */ + u16 num_iqs; + /* pkind value to be used in every Tx hardware descriptor */ + u8 pkind; + /* Pointers to Octeon Tx queues */ + struct octep_iq *iq[OCTEP_MAX_IQ]; + + /* Rx queues (OQ: Output Queue) */ + u16 num_oqs; + /* Pointers to Octeon Rx queues */ + struct octep_oq *oq[OCTEP_MAX_OQ]; + + /* Hardware port number of the PCIe interface */ + u16 pcie_port; + + /* PCI Window registers to access some hardware CSRs */ + struct octep_pci_win_regs pci_win_regs; + /* Hardware operations */ + struct octep_hw_ops hw_ops; + + /* IRQ info */ + u16 num_irqs; + u16 num_non_ioq_irqs; + char *non_ioq_irq_names; + struct msix_entry *msix_entries; + /* IOq information of it's corresponding MSI-X interrupt. */ + struct octep_ioq_vector *ioq_vector[OCTEP_MAX_QUEUES]; + + /* Hardware Interface Tx statistics */ + struct octep_iface_tx_stats iface_tx_stats; + /* Hardware Interface Rx statistics */ + struct octep_iface_rx_stats iface_rx_stats; + + /* Hardware Interface Link info like supported modes, aneg support */ + struct octep_iface_link_info link_info; + + /* Mailbox to talk to VFs */ + struct octep_mbox *mbox[OCTEP_MAX_VF]; + + /* Work entry to handle Tx timeout */ + struct work_struct tx_timeout_task; + + /* control mbox over pf */ + struct octep_ctrl_mbox ctrl_mbox; + + /* offset for iface stats */ + u32 ctrl_mbox_ifstats_offset; + + /* Work entry to handle ctrl mbox interrupt */ + struct work_struct ctrl_mbox_task; + +}; + +static inline u16 OCTEP_MAJOR_REV(struct octep_device *oct) +{ + u16 rev = (oct->rev_id & 0xC) >> 2; + + return (rev == 0) ? 1 : rev; +} + +static inline u16 OCTEP_MINOR_REV(struct octep_device *oct) +{ + return (oct->rev_id & 0x3); +} + +/* Octeon CSR read/write access APIs */ +#define octep_write_csr(octep_dev, reg_off, value) \ + writel(value, (octep_dev)->mmio[0].hw_addr + (reg_off)) + +#define octep_write_csr64(octep_dev, reg_off, val64) \ + writeq(val64, (octep_dev)->mmio[0].hw_addr + (reg_off)) + +#define octep_read_csr(octep_dev, reg_off) \ + readl((octep_dev)->mmio[0].hw_addr + (reg_off)) + +#define octep_read_csr64(octep_dev, reg_off) \ + readq((octep_dev)->mmio[0].hw_addr + (reg_off)) + +/* Read windowed register. + * @param oct - pointer to the Octeon device. + * @param addr - Address of the register to read. + * + * This routine is called to read from the indirectly accessed + * Octeon registers that are visible through a PCI BAR0 mapped window + * register. + * @return - 64 bit value read from the register. + */ +static inline u64 +OCTEP_PCI_WIN_READ(struct octep_device *oct, u64 addr) +{ + u64 val64; + + addr |= 1ull << 53; /* read 8 bytes */ + writeq(addr, oct->pci_win_regs.pci_win_rd_addr); + val64 = readq(oct->pci_win_regs.pci_win_rd_data); + + dev_dbg(&oct->pdev->dev, + "%s: reg: 0x%016llx val: 0x%016llx\n", __func__, addr, val64); + + return val64; +} + +/* Write windowed register. + * @param oct - pointer to the Octeon device. + * @param addr - Address of the register to write + * @param val - Value to write + * + * This routine is called to write to the indirectly accessed + * Octeon registers that are visible through a PCI BAR0 mapped window + * register. + * @return Nothing. + */ +static inline void +OCTEP_PCI_WIN_WRITE(struct octep_device *oct, u64 addr, u64 val) +{ + writeq(addr, oct->pci_win_regs.pci_win_wr_addr); + writeq(val, oct->pci_win_regs.pci_win_wr_data); + + dev_dbg(&oct->pdev->dev, + "%s: reg: 0x%016llx val: 0x%016llx\n", __func__, addr, val); +} + +extern struct workqueue_struct *octep_wq; + +int octep_device_setup(struct octep_device *oct); +int octep_setup_iqs(struct octep_device *oct); +void octep_free_iqs(struct octep_device *oct); +void octep_clean_iqs(struct octep_device *oct); +int octep_setup_oqs(struct octep_device *oct); +void octep_free_oqs(struct octep_device *oct); +void octep_oq_dbell_init(struct octep_device *oct); +void octep_device_setup_cn93_pf(struct octep_device *oct); +int octep_iq_process_completions(struct octep_iq *iq, u16 budget); +int octep_oq_process_rx(struct octep_oq *oq, int budget); +void octep_set_ethtool_ops(struct net_device *netdev); + +#endif /* _OCTEP_MAIN_H_ */ diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h new file mode 100644 index 000000000000..cc51149790ff --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h @@ -0,0 +1,367 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#ifndef _OCTEP_REGS_CN9K_PF_H_ +#define _OCTEP_REGS_CN9K_PF_H_ + +/* ############################ RST ######################### */ +#define CN93_RST_BOOT 0x000087E006001600ULL +#define CN93_RST_CORE_DOMAIN_W1S 0x000087E006001820ULL +#define CN93_RST_CORE_DOMAIN_W1C 0x000087E006001828ULL + +#define CN93_CONFIG_XPANSION_BAR 0x38 +#define CN93_CONFIG_PCIE_CAP 0x70 +#define CN93_CONFIG_PCIE_DEVCAP 0x74 +#define CN93_CONFIG_PCIE_DEVCTL 0x78 +#define CN93_CONFIG_PCIE_LINKCAP 0x7C +#define CN93_CONFIG_PCIE_LINKCTL 0x80 +#define CN93_CONFIG_PCIE_SLOTCAP 0x84 +#define CN93_CONFIG_PCIE_SLOTCTL 0x88 + +#define CN93_PCIE_SRIOV_FDL 0x188 /* 0x98 */ +#define CN93_PCIE_SRIOV_FDL_BIT_POS 0x10 +#define CN93_PCIE_SRIOV_FDL_MASK 0xFF + +#define CN93_CONFIG_PCIE_FLTMSK 0x720 + +/* ################# Offsets of RING, EPF, MAC ######################### */ +#define CN93_RING_OFFSET (0x1ULL << 17) +#define CN93_EPF_OFFSET (0x1ULL << 25) +#define CN93_MAC_OFFSET (0x1ULL << 4) +#define CN93_BIT_ARRAY_OFFSET (0x1ULL << 4) +#define CN93_EPVF_RING_OFFSET (0x1ULL << 4) + +/* ################# Scratch Registers ######################### */ +#define CN93_SDP_EPF_SCRATCH 0x205E0 + +/* ################# Window Registers ######################### */ +#define CN93_SDP_WIN_WR_ADDR64 0x20000 +#define CN93_SDP_WIN_RD_ADDR64 0x20010 +#define CN93_SDP_WIN_WR_DATA64 0x20020 +#define CN93_SDP_WIN_WR_MASK_REG 0x20030 +#define CN93_SDP_WIN_RD_DATA64 0x20040 + +#define CN93_SDP_MAC_NUMBER 0x2C100 + +/* ################# Global Previliged registers ######################### */ +#define CN93_SDP_EPF_RINFO 0x205F0 + +#define CN93_SDP_EPF_RINFO_SRN(val) ((val) & 0xFF) +#define CN93_SDP_EPF_RINFO_RPVF(val) (((val) >> 32) & 0xF) +#define CN93_SDP_EPF_RINFO_NVFS(val) (((val) >> 48) && 0xFF) + +/* SDP Function select */ +#define CN93_SDP_FUNC_SEL_EPF_BIT_POS 8 +#define CN93_SDP_FUNC_SEL_FUNC_BIT_POS 0 + +/* ##### RING IN (Into device from PCI: Tx Ring) REGISTERS #### */ +#define CN93_SDP_R_IN_CONTROL_START 0x10000 +#define CN93_SDP_R_IN_ENABLE_START 0x10010 +#define CN93_SDP_R_IN_INSTR_BADDR_START 0x10020 +#define CN93_SDP_R_IN_INSTR_RSIZE_START 0x10030 +#define CN93_SDP_R_IN_INSTR_DBELL_START 0x10040 +#define CN93_SDP_R_IN_CNTS_START 0x10050 +#define CN93_SDP_R_IN_INT_LEVELS_START 0x10060 +#define CN93_SDP_R_IN_PKT_CNT_START 0x10080 +#define CN93_SDP_R_IN_BYTE_CNT_START 0x10090 + +#define CN93_SDP_R_IN_CONTROL(ring) \ + (CN93_SDP_R_IN_CONTROL_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_ENABLE(ring) \ + (CN93_SDP_R_IN_ENABLE_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_INSTR_BADDR(ring) \ + (CN93_SDP_R_IN_INSTR_BADDR_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_INSTR_RSIZE(ring) \ + (CN93_SDP_R_IN_INSTR_RSIZE_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_INSTR_DBELL(ring) \ + (CN93_SDP_R_IN_INSTR_DBELL_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_CNTS(ring) \ + (CN93_SDP_R_IN_CNTS_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_INT_LEVELS(ring) \ + (CN93_SDP_R_IN_INT_LEVELS_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_PKT_CNT(ring) \ + (CN93_SDP_R_IN_PKT_CNT_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_BYTE_CNT(ring) \ + (CN93_SDP_R_IN_BYTE_CNT_START + ((ring) * CN93_RING_OFFSET)) + +/* Rings per Virtual Function */ +#define CN93_R_IN_CTL_RPVF_MASK (0xF) +#define CN93_R_IN_CTL_RPVF_POS (48) + +/* Number of instructions to be read in one MAC read request. + * setting to Max value(4) + */ +#define CN93_R_IN_CTL_IDLE (0x1ULL << 28) +#define CN93_R_IN_CTL_RDSIZE (0x3ULL << 25) +#define CN93_R_IN_CTL_IS_64B (0x1ULL << 24) +#define CN93_R_IN_CTL_D_NSR (0x1ULL << 8) +#define CN93_R_IN_CTL_D_ESR (0x1ULL << 6) +#define CN93_R_IN_CTL_D_ROR (0x1ULL << 5) +#define CN93_R_IN_CTL_NSR (0x1ULL << 3) +#define CN93_R_IN_CTL_ESR (0x1ULL << 1) +#define CN93_R_IN_CTL_ROR (0x1ULL << 0) + +#define CN93_R_IN_CTL_MASK (CN93_R_IN_CTL_RDSIZE | CN93_R_IN_CTL_IS_64B) + +/* ##### RING OUT (out from device to PCI host: Rx Ring) REGISTERS #### */ +#define CN93_SDP_R_OUT_CNTS_START 0x10100 +#define CN93_SDP_R_OUT_INT_LEVELS_START 0x10110 +#define CN93_SDP_R_OUT_SLIST_BADDR_START 0x10120 +#define CN93_SDP_R_OUT_SLIST_RSIZE_START 0x10130 +#define CN93_SDP_R_OUT_SLIST_DBELL_START 0x10140 +#define CN93_SDP_R_OUT_CONTROL_START 0x10150 +#define CN93_SDP_R_OUT_ENABLE_START 0x10160 +#define CN93_SDP_R_OUT_PKT_CNT_START 0x10180 +#define CN93_SDP_R_OUT_BYTE_CNT_START 0x10190 + +#define CN93_SDP_R_OUT_CONTROL(ring) \ + (CN93_SDP_R_OUT_CONTROL_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_ENABLE(ring) \ + (CN93_SDP_R_OUT_ENABLE_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_SLIST_BADDR(ring) \ + (CN93_SDP_R_OUT_SLIST_BADDR_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_SLIST_RSIZE(ring) \ + (CN93_SDP_R_OUT_SLIST_RSIZE_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_SLIST_DBELL(ring) \ + (CN93_SDP_R_OUT_SLIST_DBELL_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_CNTS(ring) \ + (CN93_SDP_R_OUT_CNTS_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_INT_LEVELS(ring) \ + (CN93_SDP_R_OUT_INT_LEVELS_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_PKT_CNT(ring) \ + (CN93_SDP_R_OUT_PKT_CNT_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_BYTE_CNT(ring) \ + (CN93_SDP_R_OUT_BYTE_CNT_START + ((ring) * CN93_RING_OFFSET)) + +/*------------------ R_OUT Masks ----------------*/ +#define CN93_R_OUT_INT_LEVELS_BMODE BIT_ULL(63) +#define CN93_R_OUT_INT_LEVELS_TIMET (32) + +#define CN93_R_OUT_CTL_IDLE BIT_ULL(40) +#define CN93_R_OUT_CTL_ES_I BIT_ULL(34) +#define CN93_R_OUT_CTL_NSR_I BIT_ULL(33) +#define CN93_R_OUT_CTL_ROR_I BIT_ULL(32) +#define CN93_R_OUT_CTL_ES_D BIT_ULL(30) +#define CN93_R_OUT_CTL_NSR_D BIT_ULL(29) +#define CN93_R_OUT_CTL_ROR_D BIT_ULL(28) +#define CN93_R_OUT_CTL_ES_P BIT_ULL(26) +#define CN93_R_OUT_CTL_NSR_P BIT_ULL(25) +#define CN93_R_OUT_CTL_ROR_P BIT_ULL(24) +#define CN93_R_OUT_CTL_IMODE BIT_ULL(23) + +/* ############### Interrupt Moderation Registers ############### */ +#define CN93_SDP_R_IN_INT_MDRT_CTL0_START 0x10280 +#define CN93_SDP_R_IN_INT_MDRT_CTL1_START 0x102A0 +#define CN93_SDP_R_IN_INT_MDRT_DBG_START 0x102C0 + +#define CN93_SDP_R_OUT_INT_MDRT_CTL0_START 0x10380 +#define CN93_SDP_R_OUT_INT_MDRT_CTL1_START 0x103A0 +#define CN93_SDP_R_OUT_INT_MDRT_DBG_START 0x103C0 + +#define CN93_SDP_R_IN_INT_MDRT_CTL0(ring) \ + (CN93_SDP_R_IN_INT_MDRT_CTL0_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_INT_MDRT_CTL1(ring) \ + (CN93_SDP_R_IN_INT_MDRT_CTL1_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_INT_MDRT_DBG(ring) \ + (CN93_SDP_R_IN_INT_MDRT_DBG_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_INT_MDRT_CTL0(ring) \ + (CN93_SDP_R_OUT_INT_MDRT_CTL0_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_INT_MDRT_CTL1(ring) \ + (CN93_SDP_R_OUT_INT_MDRT_CTL1_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_INT_MDRT_DBG(ring) \ + (CN93_SDP_R_OUT_INT_MDRT_DBG_START + ((ring) * CN93_RING_OFFSET)) + +/* ##################### Mail Box Registers ########################## */ +/* INT register for VF. when a MBOX write from PF happed to a VF, + * corresponding bit will be set in this register as well as in + * PF_VF_INT register. + * + * This is a RO register, the int can be cleared by writing 1 to PF_VF_INT + */ +/* Basically first 3 are from PF to VF. The last one is data from VF to PF */ +#define CN93_SDP_R_MBOX_PF_VF_DATA_START 0x10210 +#define CN93_SDP_R_MBOX_PF_VF_INT_START 0x10220 +#define CN93_SDP_R_MBOX_VF_PF_DATA_START 0x10230 + +#define CN93_SDP_R_MBOX_PF_VF_DATA(ring) \ + (CN93_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_MBOX_PF_VF_INT(ring) \ + (CN93_SDP_R_MBOX_PF_VF_INT_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_MBOX_VF_PF_DATA(ring) \ + (CN93_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CN93_RING_OFFSET)) + +/* ##################### Interrupt Registers ########################## */ +#define CN93_SDP_R_ERR_TYPE_START 0x10400 + +#define CN93_SDP_R_ERR_TYPE(ring) \ + (CN93_SDP_R_ERR_TYPE_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_MBOX_ISM_START 0x10500 +#define CN93_SDP_R_OUT_CNTS_ISM_START 0x10510 +#define CN93_SDP_R_IN_CNTS_ISM_START 0x10520 + +#define CN93_SDP_R_MBOX_ISM(ring) \ + (CN93_SDP_R_MBOX_ISM_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_CNTS_ISM(ring) \ + (CN93_SDP_R_OUT_CNTS_ISM_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_CNTS_ISM(ring) \ + (CN93_SDP_R_IN_CNTS_ISM_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_EPF_MBOX_RINT_START 0x20100 +#define CN93_SDP_EPF_MBOX_RINT_W1S_START 0x20120 +#define CN93_SDP_EPF_MBOX_RINT_ENA_W1C_START 0x20140 +#define CN93_SDP_EPF_MBOX_RINT_ENA_W1S_START 0x20160 + +#define CN93_SDP_EPF_VFIRE_RINT_START 0x20180 +#define CN93_SDP_EPF_VFIRE_RINT_W1S_START 0x201A0 +#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1C_START 0x201C0 +#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1S_START 0x201E0 + +#define CN93_SDP_EPF_IRERR_RINT 0x20200 +#define CN93_SDP_EPF_IRERR_RINT_W1S 0x20210 +#define CN93_SDP_EPF_IRERR_RINT_ENA_W1C 0x20220 +#define CN93_SDP_EPF_IRERR_RINT_ENA_W1S 0x20230 + +#define CN93_SDP_EPF_VFORE_RINT_START 0x20240 +#define CN93_SDP_EPF_VFORE_RINT_W1S_START 0x20260 +#define CN93_SDP_EPF_VFORE_RINT_ENA_W1C_START 0x20280 +#define CN93_SDP_EPF_VFORE_RINT_ENA_W1S_START 0x202A0 + +#define CN93_SDP_EPF_ORERR_RINT 0x20320 +#define CN93_SDP_EPF_ORERR_RINT_W1S 0x20330 +#define CN93_SDP_EPF_ORERR_RINT_ENA_W1C 0x20340 +#define CN93_SDP_EPF_ORERR_RINT_ENA_W1S 0x20350 + +#define CN93_SDP_EPF_OEI_RINT 0x20360 +#define CN93_SDP_EPF_OEI_RINT_W1S 0x20370 +#define CN93_SDP_EPF_OEI_RINT_ENA_W1C 0x20380 +#define CN93_SDP_EPF_OEI_RINT_ENA_W1S 0x20390 + +#define CN93_SDP_EPF_DMA_RINT 0x20400 +#define CN93_SDP_EPF_DMA_RINT_W1S 0x20410 +#define CN93_SDP_EPF_DMA_RINT_ENA_W1C 0x20420 +#define CN93_SDP_EPF_DMA_RINT_ENA_W1S 0x20430 + +#define CN93_SDP_EPF_DMA_INT_LEVEL_START 0x20440 +#define CN93_SDP_EPF_DMA_CNT_START 0x20460 +#define CN93_SDP_EPF_DMA_TIM_START 0x20480 + +#define CN93_SDP_EPF_MISC_RINT 0x204A0 +#define CN93_SDP_EPF_MISC_RINT_W1S 0x204B0 +#define CN93_SDP_EPF_MISC_RINT_ENA_W1C 0x204C0 +#define CN93_SDP_EPF_MISC_RINT_ENA_W1S 0x204D0 + +#define CN93_SDP_EPF_DMA_VF_RINT_START 0x204E0 +#define CN93_SDP_EPF_DMA_VF_RINT_W1S_START 0x20500 +#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C_START 0x20520 +#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S_START 0x20540 + +#define CN93_SDP_EPF_PP_VF_RINT_START 0x20560 +#define CN93_SDP_EPF_PP_VF_RINT_W1S_START 0x20580 +#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1C_START 0x205A0 +#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1S_START 0x205C0 + +#define CN93_SDP_EPF_MBOX_RINT(index) \ + (CN93_SDP_EPF_MBOX_RINT_START + ((index) * CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_MBOX_RINT_W1S(index) \ + (CN93_SDP_EPF_MBOX_RINT_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_MBOX_RINT_ENA_W1C(index) \ + (CN93_SDP_EPF_MBOX_RINT_ENA_W1C_START + ((index) * CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_MBOX_RINT_ENA_W1S(index) \ + (CN93_SDP_EPF_MBOX_RINT_ENA_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET)) + +#define CN93_SDP_EPF_VFIRE_RINT(index) \ + (CN93_SDP_EPF_VFIRE_RINT_START + ((index) * CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_VFIRE_RINT_W1S(index) \ + (CN93_SDP_EPF_VFIRE_RINT_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1C(index) \ + (CN93_SDP_EPF_VFIRE_RINT_ENA_W1C_START + ((index) * CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1S(index) \ + (CN93_SDP_EPF_VFIRE_RINT_ENA_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET)) + +#define CN93_SDP_EPF_VFORE_RINT(index) \ + (CN93_SDP_EPF_VFORE_RINT_START + ((index) * CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_VFORE_RINT_W1S(index) \ + (CN93_SDP_EPF_VFORE_RINT_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_VFORE_RINT_ENA_W1C(index) \ + (CN93_SDP_EPF_VFORE_RINT_ENA_W1C_START + ((index) * CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_VFORE_RINT_ENA_W1S(index) \ + (CN93_SDP_EPF_VFORE_RINT_ENA_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET)) + +#define CN93_SDP_EPF_DMA_VF_RINT(index) \ + (CN93_SDP_EPF_DMA_VF_RINT_START + ((index) + CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_DMA_VF_RINT_W1S(index) \ + (CN93_SDP_EPF_DMA_VF_RINT_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C(index) \ + (CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C_START + ((index) + CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S(index) \ + (CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET)) + +#define CN93_SDP_EPF_PP_VF_RINT(index) \ + (CN93_SDP_EPF_PP_VF_RINT_START + ((index) + CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_PP_VF_RINT_W1S(index) \ + (CN93_SDP_EPF_PP_VF_RINT_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1C(index) \ + (CN93_SDP_EPF_PP_VF_RINT_ENA_W1C_START + ((index) + CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1S(index) \ + (CN93_SDP_EPF_PP_VF_RINT_ENA_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET)) + +/*------------------ Interrupt Masks ----------------*/ +#define CN93_INTR_R_SEND_ISM BIT_ULL(63) +#define CN93_INTR_R_OUT_INT BIT_ULL(62) +#define CN93_INTR_R_IN_INT BIT_ULL(61) +#define CN93_INTR_R_MBOX_INT BIT_ULL(60) +#define CN93_INTR_R_RESEND BIT_ULL(59) +#define CN93_INTR_R_CLR_TIM BIT_ULL(58) + +/* ####################### Ring Mapping Registers ################################## */ +#define CN93_SDP_EPVF_RING_START 0x26000 +#define CN93_SDP_IN_RING_TB_MAP_START 0x28000 +#define CN93_SDP_IN_RATE_LIMIT_START 0x2A000 +#define CN93_SDP_MAC_PF_RING_CTL_START 0x2C000 + +#define CN93_SDP_EPVF_RING(ring) \ + (CN93_SDP_EPVF_RING_START + ((ring) * CN93_EPVF_RING_OFFSET)) +#define CN93_SDP_IN_RING_TB_MAP(ring) \ + (CN93_SDP_N_RING_TB_MAP_START + ((ring) * CN93_EPVF_RING_OFFSET)) +#define CN93_SDP_IN_RATE_LIMIT(ring) \ + (CN93_SDP_IN_RATE_LIMIT_START + ((ring) * CN93_EPVF_RING_OFFSET)) +#define CN93_SDP_MAC_PF_RING_CTL(mac) \ + (CN93_SDP_MAC_PF_RING_CTL_START + ((mac) * CN93_MAC_OFFSET)) + +#define CN93_SDP_MAC_PF_RING_CTL_NPFS(val) ((val) & 0xF) +#define CN93_SDP_MAC_PF_RING_CTL_SRN(val) (((val) >> 8) & 0xFF) +#define CN93_SDP_MAC_PF_RING_CTL_RPPF(val) (((val) >> 16) & 0x3F) + +/* Number of non-queue interrupts in CN93xx */ +#define CN93_NUM_NON_IOQ_INTR 16 +#endif /* _OCTEP_REGS_CN9K_PF_H_ */ diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c new file mode 100644 index 000000000000..945947ec7723 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c @@ -0,0 +1,508 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#include <linux/pci.h> +#include <linux/etherdevice.h> +#include <linux/vmalloc.h> + +#include "octep_config.h" +#include "octep_main.h" + +static void octep_oq_reset_indices(struct octep_oq *oq) +{ + oq->host_read_idx = 0; + oq->host_refill_idx = 0; + oq->refill_count = 0; + oq->last_pkt_count = 0; + oq->pkts_pending = 0; +} + +/** + * octep_oq_fill_ring_buffers() - fill initial receive buffers for Rx ring. + * + * @oq: Octeon Rx queue data structure. + * + * Return: 0, if successfully filled receive buffers for all descriptors. + * -1, if failed to allocate a buffer or failed to map for DMA. + */ +static int octep_oq_fill_ring_buffers(struct octep_oq *oq) +{ + struct octep_oq_desc_hw *desc_ring = oq->desc_ring; + struct page *page; + u32 i; + + for (i = 0; i < oq->max_count; i++) { + page = dev_alloc_page(); + if (unlikely(!page)) { + dev_err(oq->dev, "Rx buffer alloc failed\n"); + goto rx_buf_alloc_err; + } + desc_ring[i].buffer_ptr = dma_map_page(oq->dev, page, 0, + PAGE_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(oq->dev, desc_ring[i].buffer_ptr)) { + dev_err(oq->dev, + "OQ-%d buffer alloc: DMA mapping error!\n", + oq->q_no); + put_page(page); + goto dma_map_err; + } + oq->buff_info[i].page = page; + } + + return 0; + +dma_map_err: +rx_buf_alloc_err: + while (i) { + i--; + dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, PAGE_SIZE, DMA_FROM_DEVICE); + put_page(oq->buff_info[i].page); + oq->buff_info[i].page = NULL; + } + + return -1; +} + +/** + * octep_oq_refill() - refill buffers for used Rx ring descriptors. + * + * @oct: Octeon device private data structure. + * @oq: Octeon Rx queue data structure. + * + * Return: number of descriptors successfully refilled with receive buffers. + */ +static int octep_oq_refill(struct octep_device *oct, struct octep_oq *oq) +{ + struct octep_oq_desc_hw *desc_ring = oq->desc_ring; + struct page *page; + u32 refill_idx, i; + + refill_idx = oq->host_refill_idx; + for (i = 0; i < oq->refill_count; i++) { + page = dev_alloc_page(); + if (unlikely(!page)) { + dev_err(oq->dev, "refill: rx buffer alloc failed\n"); + oq->stats.alloc_failures++; + break; + } + + desc_ring[refill_idx].buffer_ptr = dma_map_page(oq->dev, page, 0, + PAGE_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(oq->dev, desc_ring[refill_idx].buffer_ptr)) { + dev_err(oq->dev, + "OQ-%d buffer refill: DMA mapping error!\n", + oq->q_no); + put_page(page); + oq->stats.alloc_failures++; + break; + } + oq->buff_info[refill_idx].page = page; + refill_idx++; + if (refill_idx == oq->max_count) + refill_idx = 0; + } + oq->host_refill_idx = refill_idx; + oq->refill_count -= i; + + return i; +} + +/** + * octep_setup_oq() - Setup a Rx queue. + * + * @oct: Octeon device private data structure. + * @q_no: Rx queue number to be setup. + * + * Allocate resources for a Rx queue. + */ +static int octep_setup_oq(struct octep_device *oct, int q_no) +{ + struct octep_oq *oq; + u32 desc_ring_size; + + oq = vzalloc(sizeof(*oq)); + if (!oq) + goto create_oq_fail; + oct->oq[q_no] = oq; + + oq->octep_dev = oct; + oq->netdev = oct->netdev; + oq->dev = &oct->pdev->dev; + oq->q_no = q_no; + oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf); + oq->ring_size_mask = oq->max_count - 1; + oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf); + oq->max_single_buffer_size = oq->buffer_size - OCTEP_OQ_RESP_HW_SIZE; + + /* When the hardware/firmware supports additional capabilities, + * additional header is filled-in by Octeon after length field in + * Rx packets. this header contains additional packet information. + */ + if (oct->caps_enabled) + oq->max_single_buffer_size -= OCTEP_OQ_RESP_HW_EXT_SIZE; + + oq->refill_threshold = CFG_GET_OQ_REFILL_THRESHOLD(oct->conf); + + desc_ring_size = oq->max_count * OCTEP_OQ_DESC_SIZE; + oq->desc_ring = dma_alloc_coherent(oq->dev, desc_ring_size, + &oq->desc_ring_dma, GFP_KERNEL); + + if (unlikely(!oq->desc_ring)) { + dev_err(oq->dev, + "Failed to allocate DMA memory for OQ-%d !!\n", q_no); + goto desc_dma_alloc_err; + } + + oq->buff_info = (struct octep_rx_buffer *) + vzalloc(oq->max_count * OCTEP_OQ_RECVBUF_SIZE); + if (unlikely(!oq->buff_info)) { + dev_err(&oct->pdev->dev, + "Failed to allocate buffer info for OQ-%d\n", q_no); + goto buf_list_err; + } + + if (octep_oq_fill_ring_buffers(oq)) + goto oq_fill_buff_err; + + octep_oq_reset_indices(oq); + oct->hw_ops.setup_oq_regs(oct, q_no); + oct->num_oqs++; + + return 0; + +oq_fill_buff_err: + vfree(oq->buff_info); + oq->buff_info = NULL; +buf_list_err: + dma_free_coherent(oq->dev, desc_ring_size, + oq->desc_ring, oq->desc_ring_dma); + oq->desc_ring = NULL; +desc_dma_alloc_err: + vfree(oq); + oct->oq[q_no] = NULL; +create_oq_fail: + return -1; +} + +/** + * octep_oq_free_ring_buffers() - Free ring buffers. + * + * @oq: Octeon Rx queue data structure. + * + * Free receive buffers in unused Rx queue descriptors. + */ +static void octep_oq_free_ring_buffers(struct octep_oq *oq) +{ + struct octep_oq_desc_hw *desc_ring = oq->desc_ring; + int i; + + if (!oq->desc_ring || !oq->buff_info) + return; + + for (i = 0; i < oq->max_count; i++) { + if (oq->buff_info[i].page) { + dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, + PAGE_SIZE, DMA_FROM_DEVICE); + put_page(oq->buff_info[i].page); + oq->buff_info[i].page = NULL; + desc_ring[i].buffer_ptr = 0; + } + } + octep_oq_reset_indices(oq); +} + +/** + * octep_free_oq() - Free Rx queue resources. + * + * @oq: Octeon Rx queue data structure. + * + * Free all resources of a Rx queue. + */ +static int octep_free_oq(struct octep_oq *oq) +{ + struct octep_device *oct = oq->octep_dev; + int q_no = oq->q_no; + + octep_oq_free_ring_buffers(oq); + + if (oq->buff_info) + vfree(oq->buff_info); + + if (oq->desc_ring) + dma_free_coherent(oq->dev, + oq->max_count * OCTEP_OQ_DESC_SIZE, + oq->desc_ring, oq->desc_ring_dma); + + vfree(oq); + oct->oq[q_no] = NULL; + oct->num_oqs--; + return 0; +} + +/** + * octep_setup_oqs() - setup resources for all Rx queues. + * + * @oct: Octeon device private data structure. + */ +int octep_setup_oqs(struct octep_device *oct) +{ + int i, retval = 0; + + oct->num_oqs = 0; + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { + retval = octep_setup_oq(oct, i); + if (retval) { + dev_err(&oct->pdev->dev, + "Failed to setup OQ(RxQ)-%d.\n", i); + goto oq_setup_err; + } + dev_dbg(&oct->pdev->dev, "Successfully setup OQ(RxQ)-%d.\n", i); + } + + return 0; + +oq_setup_err: + while (i) { + i--; + octep_free_oq(oct->oq[i]); + } + return -1; +} + +/** + * octep_oq_dbell_init() - Initialize Rx queue doorbell. + * + * @oct: Octeon device private data structure. + * + * Write number of descriptors to Rx queue doorbell register. + */ +void octep_oq_dbell_init(struct octep_device *oct) +{ + int i; + + for (i = 0; i < oct->num_oqs; i++) + writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg); +} + +/** + * octep_free_oqs() - Free resources of all Rx queues. + * + * @oct: Octeon device private data structure. + */ +void octep_free_oqs(struct octep_device *oct) +{ + int i; + + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { + if (!oct->oq[i]) + continue; + octep_free_oq(oct->oq[i]); + dev_dbg(&oct->pdev->dev, + "Successfully freed OQ(RxQ)-%d.\n", i); + } +} + +/** + * octep_oq_check_hw_for_pkts() - Check for new Rx packets. + * + * @oct: Octeon device private data structure. + * @oq: Octeon Rx queue data structure. + * + * Return: packets received after previous check. + */ +static int octep_oq_check_hw_for_pkts(struct octep_device *oct, + struct octep_oq *oq) +{ + u32 pkt_count, new_pkts; + + pkt_count = readl(oq->pkts_sent_reg); + new_pkts = pkt_count - oq->last_pkt_count; + + /* Clear the hardware packets counter register if the rx queue is + * being processed continuously with-in a single interrupt and + * reached half its max value. + * this counter is not cleared every time read, to save write cycles. + */ + if (unlikely(pkt_count > 0xF0000000U)) { + writel(pkt_count, oq->pkts_sent_reg); + pkt_count = readl(oq->pkts_sent_reg); + new_pkts += pkt_count; + } + oq->last_pkt_count = pkt_count; + oq->pkts_pending += new_pkts; + return new_pkts; +} + +/** + * __octep_oq_process_rx() - Process hardware Rx queue and push to stack. + * + * @oct: Octeon device private data structure. + * @oq: Octeon Rx queue data structure. + * @pkts_to_process: number of packets to be processed. + * + * Process the new packets in Rx queue. + * Packets larger than single Rx buffer arrive in consecutive descriptors. + * But, count returned by the API only accounts full packets, not fragments. + * + * Return: number of packets processed and pushed to stack. + */ +static int __octep_oq_process_rx(struct octep_device *oct, + struct octep_oq *oq, u16 pkts_to_process) +{ + struct octep_oq_resp_hw_ext *resp_hw_ext = NULL; + struct octep_rx_buffer *buff_info; + struct octep_oq_resp_hw *resp_hw; + u32 pkt, rx_bytes, desc_used; + struct sk_buff *skb; + u16 data_offset; + u32 read_idx; + + read_idx = oq->host_read_idx; + rx_bytes = 0; + desc_used = 0; + for (pkt = 0; pkt < pkts_to_process; pkt++) { + buff_info = (struct octep_rx_buffer *)&oq->buff_info[read_idx]; + dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr, + PAGE_SIZE, DMA_FROM_DEVICE); + resp_hw = page_address(buff_info->page); + buff_info->page = NULL; + + /* Swap the length field that is in Big-Endian to CPU */ + buff_info->len = be64_to_cpu(resp_hw->length); + if (oct->caps_enabled & OCTEP_CAP_RX_CHECKSUM) { + /* Extended response header is immediately after + * response header (resp_hw) + */ + resp_hw_ext = (struct octep_oq_resp_hw_ext *) + (resp_hw + 1); + buff_info->len -= OCTEP_OQ_RESP_HW_EXT_SIZE; + /* Packet Data is immediately after + * extended response header. + */ + data_offset = OCTEP_OQ_RESP_HW_SIZE + + OCTEP_OQ_RESP_HW_EXT_SIZE; + } else { + /* Data is immediately after + * Hardware Rx response header. + */ + data_offset = OCTEP_OQ_RESP_HW_SIZE; + } + rx_bytes += buff_info->len; + + if (buff_info->len <= oq->max_single_buffer_size) { + skb = build_skb((void *)resp_hw, PAGE_SIZE); + skb_reserve(skb, data_offset); + skb_put(skb, buff_info->len); + read_idx++; + desc_used++; + if (read_idx == oq->max_count) + read_idx = 0; + } else { + struct skb_shared_info *shinfo; + u16 data_len; + + skb = build_skb((void *)resp_hw, PAGE_SIZE); + skb_reserve(skb, data_offset); + /* Head fragment includes response header(s); + * subsequent fragments contains only data. + */ + skb_put(skb, oq->max_single_buffer_size); + read_idx++; + desc_used++; + if (read_idx == oq->max_count) + read_idx = 0; + + shinfo = skb_shinfo(skb); + data_len = buff_info->len - oq->max_single_buffer_size; + while (data_len) { + dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr, + PAGE_SIZE, DMA_FROM_DEVICE); + buff_info = (struct octep_rx_buffer *) + &oq->buff_info[read_idx]; + if (data_len < oq->buffer_size) { + buff_info->len = data_len; + data_len = 0; + } else { + buff_info->len = oq->buffer_size; + data_len -= oq->buffer_size; + } + + skb_add_rx_frag(skb, shinfo->nr_frags, + buff_info->page, 0, + buff_info->len, + buff_info->len); + buff_info->page = NULL; + read_idx++; + desc_used++; + if (read_idx == oq->max_count) + read_idx = 0; + } + } + + skb->dev = oq->netdev; + skb->protocol = eth_type_trans(skb, skb->dev); + if (resp_hw_ext && + resp_hw_ext->csum_verified == OCTEP_CSUM_VERIFIED) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb->ip_summed = CHECKSUM_NONE; + napi_gro_receive(oq->napi, skb); + } + + oq->host_read_idx = read_idx; + oq->refill_count += desc_used; + oq->stats.packets += pkt; + oq->stats.bytes += rx_bytes; + + return pkt; +} + +/** + * octep_oq_process_rx() - Process Rx queue. + * + * @oq: Octeon Rx queue data structure. + * @budget: max number of packets can be processed in one invocation. + * + * Check for newly received packets and process them. + * Keeps checking for new packets until budget is used or no new packets seen. + * + * Return: number of packets processed. + */ +int octep_oq_process_rx(struct octep_oq *oq, int budget) +{ + u32 pkts_available, pkts_processed, total_pkts_processed; + struct octep_device *oct = oq->octep_dev; + + pkts_available = 0; + pkts_processed = 0; + total_pkts_processed = 0; + while (total_pkts_processed < budget) { + /* update pending count only when current one exhausted */ + if (oq->pkts_pending == 0) + octep_oq_check_hw_for_pkts(oct, oq); + pkts_available = min(budget - total_pkts_processed, + oq->pkts_pending); + if (!pkts_available) + break; + + pkts_processed = __octep_oq_process_rx(oct, oq, + pkts_available); + oq->pkts_pending -= pkts_processed; + total_pkts_processed += pkts_processed; + } + + if (oq->refill_count >= oq->refill_threshold) { + u32 desc_refilled = octep_oq_refill(oct, oq); + + /* flush pending writes before updating credits */ + wmb(); + writel(desc_refilled, oq->pkts_credit_reg); + } + + return total_pkts_processed; +} diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h new file mode 100644 index 000000000000..782a24f27f3e --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h @@ -0,0 +1,199 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#ifndef _OCTEP_RX_H_ +#define _OCTEP_RX_H_ + +/* struct octep_oq_desc_hw - Octeon Hardware OQ descriptor format. + * + * The descriptor ring is made of descriptors which have 2 64-bit values: + * + * @buffer_ptr: DMA address of the skb->data + * @info_ptr: DMA address of host memory, used to update pkt count by hw. + * This is currently unused to save pci writes. + */ +struct octep_oq_desc_hw { + dma_addr_t buffer_ptr; + u64 info_ptr; +}; + +#define OCTEP_OQ_DESC_SIZE (sizeof(struct octep_oq_desc_hw)) + +#define OCTEP_CSUM_L4_VERIFIED 0x1 +#define OCTEP_CSUM_IP_VERIFIED 0x2 +#define OCTEP_CSUM_VERIFIED (OCTEP_CSUM_L4_VERIFIED | OCTEP_CSUM_IP_VERIFIED) + +/* Extended Response Header in packet data received from Hardware. + * Includes metadata like checksum status. + * this is valid only if hardware/firmware published support for this. + * This is at offset 0 of packet data (skb->data). + */ +struct octep_oq_resp_hw_ext { + /* Reserved. */ + u64 reserved:62; + + /* checksum verified. */ + u64 csum_verified:2; +}; + +#define OCTEP_OQ_RESP_HW_EXT_SIZE (sizeof(struct octep_oq_resp_hw_ext)) + +/* Length of Rx packet DMA'ed by Octeon to Host. + * this is in bigendian; so need to be converted to cpu endian. + * Octeon writes this at the beginning of Rx buffer (skb->data). + */ +struct octep_oq_resp_hw { + /* The Length of the packet. */ + __be64 length; +}; + +#define OCTEP_OQ_RESP_HW_SIZE (sizeof(struct octep_oq_resp_hw)) + +/* Pointer to data buffer. + * Driver keeps a pointer to the data buffer that it made available to + * the Octeon device. Since the descriptor ring keeps physical (bus) + * addresses, this field is required for the driver to keep track of + * the virtual address pointers. The fields are operated by + * OS-dependent routines. + */ +struct octep_rx_buffer { + struct page *page; + + /* length from rx hardware descriptor after converting to cpu endian */ + u64 len; +}; + +#define OCTEP_OQ_RECVBUF_SIZE (sizeof(struct octep_rx_buffer)) + +/* Output Queue statistics. Each output queue has four stats fields. */ +struct octep_oq_stats { + /* Number of packets received from the Device. */ + u64 packets; + + /* Number of bytes received from the Device. */ + u64 bytes; + + /* Number of times failed to allocate buffers. */ + u64 alloc_failures; +}; + +#define OCTEP_OQ_STATS_SIZE (sizeof(struct octep_oq_stats)) + +/* Hardware interface Rx statistics */ +struct octep_iface_rx_stats { + /* Received packets */ + u64 pkts; + + /* Octets of received packets */ + u64 octets; + + /* Received PAUSE and Control packets */ + u64 pause_pkts; + + /* Received PAUSE and Control octets */ + u64 pause_octets; + + /* Filtered DMAC0 packets */ + u64 dmac0_pkts; + + /* Filtered DMAC0 octets */ + u64 dmac0_octets; + + /* Packets dropped due to RX FIFO full */ + u64 dropped_pkts_fifo_full; + + /* Octets dropped due to RX FIFO full */ + u64 dropped_octets_fifo_full; + + /* Error packets */ + u64 err_pkts; + + /* Filtered DMAC1 packets */ + u64 dmac1_pkts; + + /* Filtered DMAC1 octets */ + u64 dmac1_octets; + + /* NCSI-bound packets dropped */ + u64 ncsi_dropped_pkts; + + /* NCSI-bound octets dropped */ + u64 ncsi_dropped_octets; + + /* Multicast packets received. */ + u64 mcast_pkts; + + /* Broadcast packets received. */ + u64 bcast_pkts; + +}; + +/* The Descriptor Ring Output Queue structure. + * This structure has all the information required to implement a + * Octeon OQ. + */ +struct octep_oq { + u32 q_no; + + struct octep_device *octep_dev; + struct net_device *netdev; + struct device *dev; + + struct napi_struct *napi; + + /* The receive buffer list. This list has the virtual addresses + * of the buffers. + */ + struct octep_rx_buffer *buff_info; + + /* Pointer to the mapped packet credit register. + * Host writes number of info/buffer ptrs available to this register + */ + u8 __iomem *pkts_credit_reg; + + /* Pointer to the mapped packet sent register. + * Octeon writes the number of packets DMA'ed to host memory + * in this register. + */ + u8 __iomem *pkts_sent_reg; + + /* Statistics for this OQ. */ + struct octep_oq_stats stats; + + /* Packets pending to be processed */ + u32 pkts_pending; + u32 last_pkt_count; + + /* Index in the ring where the driver should read the next packet */ + u32 host_read_idx; + + /* Number of descriptors in this ring. */ + u32 max_count; + u32 ring_size_mask; + + /* The number of descriptors pending refill. */ + u32 refill_count; + + /* Index in the ring where the driver will refill the + * descriptor's buffer + */ + u32 host_refill_idx; + u32 refill_threshold; + + /* The size of each buffer pointed by the buffer pointer. */ + u32 buffer_size; + u32 max_single_buffer_size; + + /* The 8B aligned descriptor ring starts at this address. */ + struct octep_oq_desc_hw *desc_ring; + + /* DMA mapped address of the OQ descriptor ring. */ + dma_addr_t desc_ring_dma; +}; + +#define OCTEP_OQ_SIZE (sizeof(struct octep_oq)) +#endif /* _OCTEP_RX_H_ */ diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c new file mode 100644 index 000000000000..511552bc3e87 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c @@ -0,0 +1,335 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#include <linux/pci.h> +#include <linux/etherdevice.h> +#include <linux/vmalloc.h> + +#include "octep_config.h" +#include "octep_main.h" + +/* Reset various index of Tx queue data structure. */ +static void octep_iq_reset_indices(struct octep_iq *iq) +{ + iq->fill_cnt = 0; + iq->host_write_index = 0; + iq->octep_read_index = 0; + iq->flush_index = 0; + iq->pkts_processed = 0; + iq->pkt_in_done = 0; + atomic_set(&iq->instr_pending, 0); +} + +/** + * octep_iq_process_completions() - Process Tx queue completions. + * + * @iq: Octeon Tx queue data structure. + * @budget: max number of completions to be processed in one invocation. + */ +int octep_iq_process_completions(struct octep_iq *iq, u16 budget) +{ + u32 compl_pkts, compl_bytes, compl_sg; + struct octep_device *oct = iq->octep_dev; + struct octep_tx_buffer *tx_buffer; + struct skb_shared_info *shinfo; + u32 fi = iq->flush_index; + struct sk_buff *skb; + u8 frags, i; + + compl_pkts = 0; + compl_sg = 0; + compl_bytes = 0; + iq->octep_read_index = oct->hw_ops.update_iq_read_idx(iq); + + while (likely(budget && (fi != iq->octep_read_index))) { + tx_buffer = iq->buff_info + fi; + skb = tx_buffer->skb; + + fi++; + if (unlikely(fi == iq->max_count)) + fi = 0; + compl_bytes += skb->len; + compl_pkts++; + budget--; + + if (!tx_buffer->gather) { + dma_unmap_single(iq->dev, tx_buffer->dma, + tx_buffer->skb->len, DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + continue; + } + + /* Scatter/Gather */ + shinfo = skb_shinfo(skb); + frags = shinfo->nr_frags; + compl_sg++; + + dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0], + tx_buffer->sglist[0].len[0], DMA_TO_DEVICE); + + i = 1; /* entry 0 is main skb, unmapped above */ + while (frags--) { + dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3], + tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE); + i++; + } + + dev_kfree_skb_any(skb); + } + + iq->pkts_processed += compl_pkts; + atomic_sub(compl_pkts, &iq->instr_pending); + iq->stats.instr_completed += compl_pkts; + iq->stats.bytes_sent += compl_bytes; + iq->stats.sgentry_sent += compl_sg; + iq->flush_index = fi; + + netdev_tx_completed_queue(iq->netdev_q, compl_pkts, compl_bytes); + + if (unlikely(__netif_subqueue_stopped(iq->netdev, iq->q_no)) && + ((iq->max_count - atomic_read(&iq->instr_pending)) > + OCTEP_WAKE_QUEUE_THRESHOLD)) + netif_wake_subqueue(iq->netdev, iq->q_no); + return !budget; +} + +/** + * octep_iq_free_pending() - Free Tx buffers for pending completions. + * + * @iq: Octeon Tx queue data structure. + */ +static void octep_iq_free_pending(struct octep_iq *iq) +{ + struct octep_tx_buffer *tx_buffer; + struct skb_shared_info *shinfo; + u32 fi = iq->flush_index; + struct sk_buff *skb; + u8 frags, i; + + while (fi != iq->host_write_index) { + tx_buffer = iq->buff_info + fi; + skb = tx_buffer->skb; + + fi++; + if (unlikely(fi == iq->max_count)) + fi = 0; + + if (!tx_buffer->gather) { + dma_unmap_single(iq->dev, tx_buffer->dma, + tx_buffer->skb->len, DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + continue; + } + + /* Scatter/Gather */ + shinfo = skb_shinfo(skb); + frags = shinfo->nr_frags; + + dma_unmap_single(iq->dev, + tx_buffer->sglist[0].dma_ptr[0], + tx_buffer->sglist[0].len[0], + DMA_TO_DEVICE); + + i = 1; /* entry 0 is main skb, unmapped above */ + while (frags--) { + dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3], + tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE); + i++; + } + + dev_kfree_skb_any(skb); + } + + atomic_set(&iq->instr_pending, 0); + iq->flush_index = fi; + netdev_tx_reset_queue(netdev_get_tx_queue(iq->netdev, iq->q_no)); +} + +/** + * octep_clean_iqs() - Clean Tx queues to shutdown the device. + * + * @oct: Octeon device private data structure. + * + * Free the buffers in Tx queue descriptors pending completion and + * reset queue indices + */ +void octep_clean_iqs(struct octep_device *oct) +{ + int i; + + for (i = 0; i < oct->num_iqs; i++) { + octep_iq_free_pending(oct->iq[i]); + octep_iq_reset_indices(oct->iq[i]); + } +} + +/** + * octep_setup_iq() - Setup a Tx queue. + * + * @oct: Octeon device private data structure. + * @q_no: Tx queue number to be setup. + * + * Allocate resources for a Tx queue. + */ +static int octep_setup_iq(struct octep_device *oct, int q_no) +{ + u32 desc_ring_size, buff_info_size, sglist_size; + struct octep_iq *iq; + int i; + + iq = vzalloc(sizeof(*iq)); + if (!iq) + goto iq_alloc_err; + oct->iq[q_no] = iq; + + iq->octep_dev = oct; + iq->netdev = oct->netdev; + iq->dev = &oct->pdev->dev; + iq->q_no = q_no; + iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf); + iq->ring_size_mask = iq->max_count - 1; + iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf); + iq->netdev_q = netdev_get_tx_queue(iq->netdev, q_no); + + /* Allocate memory for hardware queue descriptors */ + desc_ring_size = OCTEP_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf); + iq->desc_ring = dma_alloc_coherent(iq->dev, desc_ring_size, + &iq->desc_ring_dma, GFP_KERNEL); + if (unlikely(!iq->desc_ring)) { + dev_err(iq->dev, + "Failed to allocate DMA memory for IQ-%d\n", q_no); + goto desc_dma_alloc_err; + } + + /* Allocate memory for hardware SGLIST descriptors */ + sglist_size = OCTEP_SGLIST_SIZE_PER_PKT * + CFG_GET_IQ_NUM_DESC(oct->conf); + iq->sglist = dma_alloc_coherent(iq->dev, sglist_size, + &iq->sglist_dma, GFP_KERNEL); + if (unlikely(!iq->sglist)) { + dev_err(iq->dev, + "Failed to allocate DMA memory for IQ-%d SGLIST\n", + q_no); + goto sglist_alloc_err; + } + + /* allocate memory to manage Tx packets pending completion */ + buff_info_size = OCTEP_IQ_TXBUFF_INFO_SIZE * iq->max_count; + iq->buff_info = vzalloc(buff_info_size); + if (!iq->buff_info) { + dev_err(iq->dev, + "Failed to allocate buff info for IQ-%d\n", q_no); + goto buff_info_err; + } + + /* Setup sglist addresses in tx_buffer entries */ + for (i = 0; i < CFG_GET_IQ_NUM_DESC(oct->conf); i++) { + struct octep_tx_buffer *tx_buffer; + + tx_buffer = &iq->buff_info[i]; + tx_buffer->sglist = + &iq->sglist[i * OCTEP_SGLIST_ENTRIES_PER_PKT]; + tx_buffer->sglist_dma = + iq->sglist_dma + (i * OCTEP_SGLIST_SIZE_PER_PKT); + } + + octep_iq_reset_indices(iq); + oct->hw_ops.setup_iq_regs(oct, q_no); + + oct->num_iqs++; + return 0; + +buff_info_err: + dma_free_coherent(iq->dev, sglist_size, iq->sglist, iq->sglist_dma); +sglist_alloc_err: + dma_free_coherent(iq->dev, desc_ring_size, + iq->desc_ring, iq->desc_ring_dma); +desc_dma_alloc_err: + vfree(iq); + oct->iq[q_no] = NULL; +iq_alloc_err: + return -1; +} + +/** + * octep_free_iq() - Free Tx queue resources. + * + * @iq: Octeon Tx queue data structure. + * + * Free all the resources allocated for a Tx queue. + */ +static void octep_free_iq(struct octep_iq *iq) +{ + struct octep_device *oct = iq->octep_dev; + u64 desc_ring_size, sglist_size; + int q_no = iq->q_no; + + desc_ring_size = OCTEP_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf); + + if (iq->buff_info) + vfree(iq->buff_info); + + if (iq->desc_ring) + dma_free_coherent(iq->dev, desc_ring_size, + iq->desc_ring, iq->desc_ring_dma); + + sglist_size = OCTEP_SGLIST_SIZE_PER_PKT * + CFG_GET_IQ_NUM_DESC(oct->conf); + if (iq->sglist) + dma_free_coherent(iq->dev, sglist_size, + iq->sglist, iq->sglist_dma); + + vfree(iq); + oct->iq[q_no] = NULL; + oct->num_iqs--; +} + +/** + * octep_setup_iqs() - setup resources for all Tx queues. + * + * @oct: Octeon device private data structure. + */ +int octep_setup_iqs(struct octep_device *oct) +{ + int i; + + oct->num_iqs = 0; + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { + if (octep_setup_iq(oct, i)) { + dev_err(&oct->pdev->dev, + "Failed to setup IQ(TxQ)-%d.\n", i); + goto iq_setup_err; + } + dev_dbg(&oct->pdev->dev, "Successfully setup IQ(TxQ)-%d.\n", i); + } + + return 0; + +iq_setup_err: + while (i) { + i--; + octep_free_iq(oct->iq[i]); + } + return -1; +} + +/** + * octep_free_iqs() - Free resources of all Tx queues. + * + * @oct: Octeon device private data structure. + */ +void octep_free_iqs(struct octep_device *oct) +{ + int i; + + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { + octep_free_iq(oct->iq[i]); + dev_dbg(&oct->pdev->dev, + "Successfully destroyed IQ(TxQ)-%d.\n", i); + } + oct->num_iqs = 0; +} diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h new file mode 100644 index 000000000000..2ef57980eb47 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h @@ -0,0 +1,284 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#ifndef _OCTEP_TX_H_ +#define _OCTEP_TX_H_ + +#define IQ_SEND_OK 0 +#define IQ_SEND_STOP 1 +#define IQ_SEND_FAILED -1 + +#define TX_BUFTYPE_NONE 0 +#define TX_BUFTYPE_NET 1 +#define TX_BUFTYPE_NET_SG 2 +#define NUM_TX_BUFTYPES 3 + +/* Hardware format for Scatter/Gather list */ +struct octep_tx_sglist_desc { + u16 len[4]; + dma_addr_t dma_ptr[4]; +}; + +/* Each Scatter/Gather entry sent to hardwar hold four pointers. + * So, number of entries required is (MAX_SKB_FRAGS + 1)/4, where '+1' + * is for main skb which also goes as a gather buffer to Octeon hardware. + * To allocate sufficient SGLIST entries for a packet with max fragments, + * align by adding 3 before calcuating max SGLIST entries per packet. + */ +#define OCTEP_SGLIST_ENTRIES_PER_PKT ((MAX_SKB_FRAGS + 1 + 3) / 4) +#define OCTEP_SGLIST_SIZE_PER_PKT \ + (OCTEP_SGLIST_ENTRIES_PER_PKT * sizeof(struct octep_tx_sglist_desc)) + +struct octep_tx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + struct octep_tx_sglist_desc *sglist; + dma_addr_t sglist_dma; + u8 gather; +}; + +#define OCTEP_IQ_TXBUFF_INFO_SIZE (sizeof(struct octep_tx_buffer)) + +/* Hardware interface Tx statistics */ +struct octep_iface_tx_stats { + /* Packets dropped due to excessive collisions */ + u64 xscol; + + /* Packets dropped due to excessive deferral */ + u64 xsdef; + + /* Packets sent that experienced multiple collisions before successful + * transmission + */ + u64 mcol; + + /* Packets sent that experienced a single collision before successful + * transmission + */ + u64 scol; + + /* Total octets sent on the interface */ + u64 octs; + + /* Total frames sent on the interface */ + u64 pkts; + + /* Packets sent with an octet count < 64 */ + u64 hist_lt64; + + /* Packets sent with an octet count == 64 */ + u64 hist_eq64; + + /* Packets sent with an octet count of 65–127 */ + u64 hist_65to127; + + /* Packets sent with an octet count of 128–255 */ + u64 hist_128to255; + + /* Packets sent with an octet count of 256–511 */ + u64 hist_256to511; + + /* Packets sent with an octet count of 512–1023 */ + u64 hist_512to1023; + + /* Packets sent with an octet count of 1024-1518 */ + u64 hist_1024to1518; + + /* Packets sent with an octet count of > 1518 */ + u64 hist_gt1518; + + /* Packets sent to a broadcast DMAC */ + u64 bcst; + + /* Packets sent to the multicast DMAC */ + u64 mcst; + + /* Packets sent that experienced a transmit underflow and were + * truncated + */ + u64 undflw; + + /* Control/PAUSE packets sent */ + u64 ctl; +}; + +/* Input Queue statistics. Each input queue has four stats fields. */ +struct octep_iq_stats { + /* Instructions posted to this queue. */ + u64 instr_posted; + + /* Instructions copied by hardware for processing. */ + u64 instr_completed; + + /* Instructions that could not be processed. */ + u64 instr_dropped; + + /* Bytes sent through this queue. */ + u64 bytes_sent; + + /* Gather entries sent through this queue. */ + u64 sgentry_sent; + + /* Number of transmit failures due to TX_BUSY */ + u64 tx_busy; + + /* Number of times the queue is restarted */ + u64 restart_cnt; +}; + +/* The instruction (input) queue. + * The input queue is used to post raw (instruction) mode data or packet + * data to Octeon device from the host. Each input queue (up to 4) for + * a Octeon device has one such structure to represent it. + */ +struct octep_iq { + u32 q_no; + + struct octep_device *octep_dev; + struct net_device *netdev; + struct device *dev; + struct netdev_queue *netdev_q; + + /* Index in input ring where driver should write the next packet */ + u16 host_write_index; + + /* Index in input ring where Octeon is expected to read next packet */ + u16 octep_read_index; + + /* This index aids in finding the window in the queue where Octeon + * has read the commands. + */ + u16 flush_index; + + /* Statistics for this input queue. */ + struct octep_iq_stats stats; + + /* This field keeps track of the instructions pending in this queue. */ + atomic_t instr_pending; + + /* Pointer to the Virtual Base addr of the input ring. */ + struct octep_tx_desc_hw *desc_ring; + + /* DMA mapped base address of the input descriptor ring. */ + dma_addr_t desc_ring_dma; + + /* Info of Tx buffers pending completion. */ + struct octep_tx_buffer *buff_info; + + /* Base pointer to Scatter/Gather lists for all ring descriptors. */ + struct octep_tx_sglist_desc *sglist; + + /* DMA mapped addr of Scatter Gather Lists */ + dma_addr_t sglist_dma; + + /* Octeon doorbell register for the ring. */ + u8 __iomem *doorbell_reg; + + /* Octeon instruction count register for this ring. */ + u8 __iomem *inst_cnt_reg; + + /* interrupt level register for this ring */ + u8 __iomem *intr_lvl_reg; + + /* Maximum no. of instructions in this queue. */ + u32 max_count; + u32 ring_size_mask; + + u32 pkt_in_done; + u32 pkts_processed; + + u32 status; + + /* Number of instructions pending to be posted to Octeon. */ + u32 fill_cnt; + + /* The max. number of instructions that can be held pending by the + * driver before ringing doorbell. + */ + u32 fill_threshold; +}; + +/* Hardware Tx Instruction Header */ +struct octep_instr_hdr { + /* Data Len */ + u64 tlen:16; + + /* Reserved */ + u64 rsvd:20; + + /* PKIND for SDP */ + u64 pkind:6; + + /* Front Data size */ + u64 fsz:6; + + /* No. of entries in gather list */ + u64 gsz:14; + + /* Gather indicator 1=gather*/ + u64 gather:1; + + /* Reserved3 */ + u64 reserved3:1; +}; + +/* Hardware Tx completion response header */ +struct octep_instr_resp_hdr { + /* Request ID */ + u64 rid:16; + + /* PCIe port to use for response */ + u64 pcie_port:3; + + /* Scatter indicator 1=scatter */ + u64 scatter:1; + + /* Size of Expected result OR no. of entries in scatter list */ + u64 rlenssz:14; + + /* Desired destination port for result */ + u64 dport:6; + + /* Opcode Specific parameters */ + u64 param:8; + + /* Opcode for the return packet */ + u64 opcode:16; +}; + +/* 64-byte Tx instruction format. + * Format of instruction for a 64-byte mode input queue. + * + * only first 16-bytes (dptr and ih) are mandatory; rest are optional + * and filled by the driver based on firmware/hardware capabilities. + * These optional headers together called Front Data and its size is + * described by ih->fsz. + */ +struct octep_tx_desc_hw { + /* Pointer where the input data is available. */ + u64 dptr; + + /* Instruction Header. */ + union { + struct octep_instr_hdr ih; + u64 ih64; + }; + + /* Pointer where the response for a RAW mode packet will be written + * by Octeon. + */ + u64 rptr; + + /* Input Instruction Response Header. */ + struct octep_instr_resp_hdr irh; + + /* Additional headers available in a 64-byte instruction. */ + u64 exhdr[4]; +}; + +#define OCTEP_IQ_DESC_SIZE (sizeof(struct octep_tx_desc_hw)) +#endif /* _OCTEP_TX_H_ */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index d1eddb769a41..2ad73b180276 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -248,7 +248,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) - return -ENOSPC; + return -ENOMEM; tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); @@ -407,7 +407,7 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) - return -ENOSPC; + return -ENOMEM; /* Get the maximum width of a column */ lf_str_size = get_max_column_width(rvu); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.c b/drivers/net/ethernet/marvell/prestera/prestera_acl.c index 47c899c08951..3a141f2db812 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_acl.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.c @@ -35,6 +35,10 @@ struct prestera_acl_rule_entry { u8 valid:1; } accept, drop, trap; struct { + u8 valid:1; + struct prestera_acl_action_police i; + } police; + struct { struct prestera_acl_action_jump i; u8 valid:1; } jump; @@ -421,13 +425,6 @@ int prestera_acl_rule_add(struct prestera_switch *sw, rule->re_arg.vtcam_id = ruleset->vtcam_id; rule->re_key.prio = rule->priority; - /* setup counter */ - rule->re_arg.count.valid = true; - err = prestera_acl_chain_to_client(ruleset->ht_key.chain_index, - &rule->re_arg.count.client); - if (err) - goto err_rule_add; - rule->re = prestera_acl_rule_entry_find(sw->acl, &rule->re_key); err = WARN_ON(rule->re) ? -EEXIST : 0; if (err) @@ -540,6 +537,12 @@ static int __prestera_acl_rule_entry2hw_add(struct prestera_switch *sw, act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_TRAP; act_num++; } + /* police */ + if (e->police.valid) { + act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_POLICE; + act_hw[act_num].police = e->police.i; + act_num++; + } /* jump */ if (e->jump.valid) { act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_JUMP; @@ -564,6 +567,9 @@ __prestera_acl_rule_entry_act_destruct(struct prestera_switch *sw, { /* counter */ prestera_counter_put(sw->counter, e->counter.block, e->counter.id); + /* police */ + if (e->police.valid) + prestera_hw_policer_release(sw, e->police.i.id); } void prestera_acl_rule_entry_destroy(struct prestera_acl *acl, @@ -586,6 +592,8 @@ __prestera_acl_rule_entry_act_construct(struct prestera_switch *sw, struct prestera_acl_rule_entry *e, struct prestera_acl_rule_entry_arg *arg) { + int err; + /* accept */ e->accept.valid = arg->accept.valid; /* drop */ @@ -595,10 +603,26 @@ __prestera_acl_rule_entry_act_construct(struct prestera_switch *sw, /* jump */ e->jump.valid = arg->jump.valid; e->jump.i = arg->jump.i; + /* police */ + if (arg->police.valid) { + u8 type = arg->police.ingress ? PRESTERA_POLICER_TYPE_INGRESS : + PRESTERA_POLICER_TYPE_EGRESS; + + err = prestera_hw_policer_create(sw, type, &e->police.i.id); + if (err) + goto err_out; + + err = prestera_hw_policer_sr_tcm_set(sw, e->police.i.id, + arg->police.rate, + arg->police.burst); + if (err) { + prestera_hw_policer_release(sw, e->police.i.id); + goto err_out; + } + e->police.valid = arg->police.valid; + } /* counter */ if (arg->count.valid) { - int err; - err = prestera_counter_get(sw->counter, arg->count.client, &e->counter.block, &e->counter.id); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.h b/drivers/net/ethernet/marvell/prestera/prestera_acl.h index 6d2ad27682d1..f963e1e0c0f0 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_acl.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.h @@ -56,6 +56,7 @@ enum prestera_acl_rule_action { PRESTERA_ACL_RULE_ACTION_TRAP = 2, PRESTERA_ACL_RULE_ACTION_JUMP = 5, PRESTERA_ACL_RULE_ACTION_COUNT = 7, + PRESTERA_ACL_RULE_ACTION_POLICE = 8, PRESTERA_ACL_RULE_ACTION_MAX }; @@ -74,6 +75,10 @@ struct prestera_acl_action_jump { u32 index; }; +struct prestera_acl_action_police { + u32 id; +}; + struct prestera_acl_action_count { u32 id; }; @@ -86,6 +91,7 @@ struct prestera_acl_rule_entry_key { struct prestera_acl_hw_action_info { enum prestera_acl_rule_action id; union { + struct prestera_acl_action_police police; struct prestera_acl_action_count count; struct prestera_acl_action_jump jump; }; @@ -107,6 +113,12 @@ struct prestera_acl_rule_entry_arg { } jump; struct { u8 valid:1; + u64 rate; + u64 burst; + bool ingress; + } police; + struct { + u8 valid:1; u32 client; } count; }; diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c index 921959a980ee..d43e503c644f 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c @@ -70,6 +70,24 @@ static int prestera_flower_parse_actions(struct prestera_flow_block *block, if (!flow_action_has_entries(flow_action)) return 0; + if (!flow_action_mixed_hw_stats_check(flow_action, extack)) + return -EOPNOTSUPP; + + act = flow_action_first_entry_get(flow_action); + if (act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED) { + /* Nothing to do */ + } else if (act->hw_stats & FLOW_ACTION_HW_STATS_DELAYED) { + /* setup counter first */ + rule->re_arg.count.valid = true; + err = prestera_acl_chain_to_client(chain_index, + &rule->re_arg.count.client); + if (err) + return err; + } else { + NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type"); + return -EOPNOTSUPP; + } + flow_action_for_each(i, act, flow_action) { switch (act->id) { case FLOW_ACTION_ACCEPT: @@ -90,6 +108,16 @@ static int prestera_flower_parse_actions(struct prestera_flow_block *block, rule->re_arg.trap.valid = 1; break; + case FLOW_ACTION_POLICE: + if (rule->re_arg.police.valid) + return -EEXIST; + + rule->re_arg.police.valid = 1; + rule->re_arg.police.rate = + act->police.rate_bytes_ps; + rule->re_arg.police.burst = act->police.burst; + rule->re_arg.police.ingress = true; + break; case FLOW_ACTION_GOTO: err = prestera_flower_parse_goto_action(block, rule, chain_index, diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c index c66cc929c820..79fd3cac539d 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c @@ -74,6 +74,10 @@ enum prestera_cmd_type_t { PRESTERA_CMD_TYPE_SPAN_UNBIND = 0x1102, PRESTERA_CMD_TYPE_SPAN_RELEASE = 0x1103, + PRESTERA_CMD_TYPE_POLICER_CREATE = 0x1500, + PRESTERA_CMD_TYPE_POLICER_RELEASE = 0x1501, + PRESTERA_CMD_TYPE_POLICER_SET = 0x1502, + PRESTERA_CMD_TYPE_CPU_CODE_COUNTERS_GET = 0x2000, PRESTERA_CMD_TYPE_ACK = 0x10000, @@ -164,6 +168,10 @@ enum { }; enum { + PRESTERA_POLICER_MODE_SR_TCM +}; + +enum { PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT = 0, PRESTERA_HW_FDB_ENTRY_TYPE_LAG = 1, PRESTERA_HW_FDB_ENTRY_TYPE_MAX = 2, @@ -430,6 +438,9 @@ struct prestera_msg_acl_action { } jump; struct { __le32 id; + } police; + struct { + __le32 id; } count; __le32 reserved[6]; }; @@ -570,6 +581,26 @@ struct mvsw_msg_cpu_code_counter_ret { __le64 packet_count; }; +struct prestera_msg_policer_req { + struct prestera_msg_cmd cmd; + __le32 id; + union { + struct { + __le64 cir; + __le32 cbs; + } __packed sr_tcm; /* make sure always 12 bytes size */ + __le32 reserved[6]; + }; + u8 mode; + u8 type; + u8 pad[2]; +}; + +struct prestera_msg_policer_resp { + struct prestera_msg_ret ret; + __le32 id; +}; + struct prestera_msg_event { __le16 type; __le16 id; @@ -622,6 +653,7 @@ static void prestera_hw_build_tests(void) BUILD_BUG_ON(sizeof(struct prestera_msg_rif_req) != 36); BUILD_BUG_ON(sizeof(struct prestera_msg_vr_req) != 8); BUILD_BUG_ON(sizeof(struct prestera_msg_lpm_req) != 36); + BUILD_BUG_ON(sizeof(struct prestera_msg_policer_req) != 36); /* structure that are part of req/resp fw messages */ BUILD_BUG_ON(sizeof(struct prestera_msg_iface) != 16); @@ -640,6 +672,7 @@ static void prestera_hw_build_tests(void) BUILD_BUG_ON(sizeof(struct prestera_msg_counter_resp) != 24); BUILD_BUG_ON(sizeof(struct prestera_msg_rif_resp) != 12); BUILD_BUG_ON(sizeof(struct prestera_msg_vr_resp) != 12); + BUILD_BUG_ON(sizeof(struct prestera_msg_policer_resp) != 12); /* check events */ BUILD_BUG_ON(sizeof(struct prestera_msg_event_port) != 20); @@ -1192,6 +1225,9 @@ prestera_acl_rule_add_put_action(struct prestera_msg_acl_action *action, case PRESTERA_ACL_RULE_ACTION_JUMP: action->jump.index = __cpu_to_le32(info->jump.index); break; + case PRESTERA_ACL_RULE_ACTION_POLICE: + action->police.id = __cpu_to_le32(info->police.id); + break; case PRESTERA_ACL_RULE_ACTION_COUNT: action->count.id = __cpu_to_le32(info->count.id); break; @@ -2163,3 +2199,48 @@ int prestera_hw_counter_clear(struct prestera_switch *sw, u32 block_id, return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_CLEAR, &req.cmd, sizeof(req)); } + +int prestera_hw_policer_create(struct prestera_switch *sw, u8 type, + u32 *policer_id) +{ + struct prestera_msg_policer_resp resp; + struct prestera_msg_policer_req req = { + .type = type + }; + int err; + + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_POLICER_CREATE, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *policer_id = __le32_to_cpu(resp.id); + return 0; +} + +int prestera_hw_policer_release(struct prestera_switch *sw, + u32 policer_id) +{ + struct prestera_msg_policer_req req = { + .id = __cpu_to_le32(policer_id) + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_POLICER_RELEASE, + &req.cmd, sizeof(req)); +} + +int prestera_hw_policer_sr_tcm_set(struct prestera_switch *sw, + u32 policer_id, u64 cir, u32 cbs) +{ + struct prestera_msg_policer_req req = { + .mode = PRESTERA_POLICER_MODE_SR_TCM, + .id = __cpu_to_le32(policer_id), + .sr_tcm = { + .cir = __cpu_to_le64(cir), + .cbs = __cpu_to_le32(cbs) + } + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_POLICER_SET, + &req.cmd, sizeof(req)); +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h index fd896a8838bb..579d9ba23ffc 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_hw.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h @@ -107,6 +107,11 @@ enum { PRESTERA_STP_FORWARD, }; +enum { + PRESTERA_POLICER_TYPE_INGRESS, + PRESTERA_POLICER_TYPE_EGRESS +}; + enum prestera_hw_cpu_code_cnt_t { PRESTERA_HW_CPU_CODE_CNT_TYPE_DROP = 0, PRESTERA_HW_CPU_CODE_CNT_TYPE_TRAP = 1, @@ -288,4 +293,12 @@ prestera_hw_cpu_code_counters_get(struct prestera_switch *sw, u8 code, enum prestera_hw_cpu_code_cnt_t counter_type, u64 *packet_count); +/* Policer API */ +int prestera_hw_policer_create(struct prestera_switch *sw, u8 type, + u32 *policer_id); +int prestera_hw_policer_release(struct prestera_switch *sw, + u32 policer_id); +int prestera_hw_policer_sr_tcm_set(struct prestera_switch *sw, + u32 policer_id, u64 cir, u32 cbs); + #endif /* _PRESTERA_HW_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c index 6c5618cf4f08..3754d8aec76d 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_router.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c @@ -4,6 +4,7 @@ #include <linux/kernel.h> #include <linux/types.h> #include <linux/inetdevice.h> +#include <net/inet_dscp.h> #include <net/switchdev.h> #include <linux/rhashtable.h> @@ -26,7 +27,7 @@ struct prestera_kern_fib_cache { /* Indicate if route is not overlapped by another table */ struct rhash_head ht_node; /* node of prestera_router */ struct fib_info *fi; - u8 kern_tos; + dscp_t kern_dscp; u8 kern_type; bool reachable; }; @@ -88,7 +89,7 @@ prestera_kern_fib_cache_destroy(struct prestera_switch *sw, static struct prestera_kern_fib_cache * prestera_kern_fib_cache_create(struct prestera_switch *sw, struct prestera_kern_fib_cache_key *key, - struct fib_info *fi, u8 tos, u8 type) + struct fib_info *fi, dscp_t dscp, u8 type) { struct prestera_kern_fib_cache *fib_cache; int err; @@ -100,7 +101,7 @@ prestera_kern_fib_cache_create(struct prestera_switch *sw, memcpy(&fib_cache->key, key, sizeof(*key)); fib_info_hold(fi); fib_cache->fi = fi; - fib_cache->kern_tos = tos; + fib_cache->kern_dscp = dscp; fib_cache->kern_type = type; err = rhashtable_insert_fast(&sw->router->kern_fib_cache_ht, @@ -132,7 +133,7 @@ __prestera_k_arb_fib_lpm_offload_set(struct prestera_switch *sw, fri.tb_id = fc->key.kern_tb_id; fri.dst = fc->key.addr.u.ipv4; fri.dst_len = fc->key.prefix_len; - fri.tos = fc->kern_tos; + fri.dscp = fc->kern_dscp; fri.type = fc->kern_type; /* flags begin */ fri.offload = offload; @@ -305,7 +306,7 @@ prestera_k_arb_fib_evt(struct prestera_switch *sw, if (replace) { fib_cache = prestera_kern_fib_cache_create(sw, &fc_key, fen_info->fi, - fen_info->tos, + fen_info->dscp, fen_info->type); if (!fib_cache) { dev_err(sw->dev->dev, "fib_cache == NULL"); diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index cf03c67fbf40..c1e985416c0e 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -50,7 +50,6 @@ #define PHY_RETRIES 1000 #define ETH_JUMBO_MTU 9000 #define TX_WATCHDOG (5 * HZ) -#define NAPI_WEIGHT 64 #define BLINK_MS 250 #define LINK_HZ HZ @@ -3833,7 +3832,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, dev->features |= NETIF_F_HIGHDMA; skge = netdev_priv(dev); - netif_napi_add(dev, &skge->napi, skge_poll, NAPI_WEIGHT); + netif_napi_add(dev, &skge->napi, skge_poll, NAPI_POLL_WEIGHT); skge->netdev = dev; skge->hw = hw; skge->msg_enable = netif_msg_init(debug, default_msg); diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index ea16b1dd6a98..a1e907c85217 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -63,7 +63,6 @@ #define TX_DEF_PENDING 63 #define TX_WATCHDOG (5 * HZ) -#define NAPI_WEIGHT 64 #define PHY_RETRIES 1000 #define SKY2_EEPROM_MAGIC 0x9955aabb @@ -4938,7 +4937,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } } - netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT); + netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_POLL_WEIGHT); err = register_netdev(dev); if (err) { diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig index 86d356b4388d..da4ec235d146 100644 --- a/drivers/net/ethernet/mediatek/Kconfig +++ b/drivers/net/ethernet/mediatek/Kconfig @@ -7,6 +7,10 @@ config NET_VENDOR_MEDIATEK if NET_VENDOR_MEDIATEK +config NET_MEDIATEK_SOC_WED + depends on ARCH_MEDIATEK || COMPILE_TEST + def_bool NET_MEDIATEK_SOC != n + config NET_MEDIATEK_SOC tristate "MediaTek SoC Gigabit Ethernet support" depends on NET_DSA || !NET_DSA diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile index 79d4cdbbcbf5..45ba0970504a 100644 --- a/drivers/net/ethernet/mediatek/Makefile +++ b/drivers/net/ethernet/mediatek/Makefile @@ -5,4 +5,9 @@ obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o +mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o +ifdef CONFIG_DEBUG_FS +mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o +endif +obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index f02d07ec5ccb..31c5da5d6b72 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -9,6 +9,7 @@ #include <linux/of_device.h> #include <linux/of_mdio.h> #include <linux/of_net.h> +#include <linux/of_address.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> #include <linux/clk.h> @@ -20,9 +21,11 @@ #include <linux/pinctrl/devinfo.h> #include <linux/phylink.h> #include <linux/jhash.h> +#include <linux/bitfield.h> #include <net/dsa.h> #include "mtk_eth_soc.h" +#include "mtk_wed.h" static int mtk_msg_level = -1; module_param_named(msg_level, mtk_msg_level, int, 0); @@ -786,7 +789,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) dma_addr_t dma_addr; int i; - eth->scratch_ring = dma_alloc_coherent(eth->dev, + eth->scratch_ring = dma_alloc_coherent(eth->dma_dev, cnt * sizeof(struct mtk_tx_dma), ð->phy_scratch_ring, GFP_ATOMIC); @@ -798,10 +801,10 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) if (unlikely(!eth->scratch_head)) return -ENOMEM; - dma_addr = dma_map_single(eth->dev, + dma_addr = dma_map_single(eth->dma_dev, eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(eth->dev, dma_addr))) + if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) return -ENOMEM; phy_ring_tail = eth->phy_scratch_ring + @@ -855,26 +858,26 @@ static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, { if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { - dma_unmap_single(eth->dev, + dma_unmap_single(eth->dma_dev, dma_unmap_addr(tx_buf, dma_addr0), dma_unmap_len(tx_buf, dma_len0), DMA_TO_DEVICE); } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { - dma_unmap_page(eth->dev, + dma_unmap_page(eth->dma_dev, dma_unmap_addr(tx_buf, dma_addr0), dma_unmap_len(tx_buf, dma_len0), DMA_TO_DEVICE); } } else { if (dma_unmap_len(tx_buf, dma_len0)) { - dma_unmap_page(eth->dev, + dma_unmap_page(eth->dma_dev, dma_unmap_addr(tx_buf, dma_addr0), dma_unmap_len(tx_buf, dma_len0), DMA_TO_DEVICE); } if (dma_unmap_len(tx_buf, dma_len1)) { - dma_unmap_page(eth->dev, + dma_unmap_page(eth->dma_dev, dma_unmap_addr(tx_buf, dma_addr1), dma_unmap_len(tx_buf, dma_len1), DMA_TO_DEVICE); @@ -952,9 +955,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, if (skb_vlan_tag_present(skb)) txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); - mapped_addr = dma_map_single(eth->dev, skb->data, + mapped_addr = dma_map_single(eth->dma_dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) + if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr))) return -ENOMEM; WRITE_ONCE(itxd->txd1, mapped_addr); @@ -993,10 +996,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); - mapped_addr = skb_frag_dma_map(eth->dev, frag, offset, + mapped_addr = skb_frag_dma_map(eth->dma_dev, frag, offset, frag_map_size, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) + if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr))) goto err_dma; if (i == nr_frags - 1 && @@ -1237,7 +1240,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, struct net_device *netdev; unsigned int pktlen; dma_addr_t dma_addr; - u32 hash; + u32 hash, reason; int mac; ring = mtk_get_rx_ring(eth); @@ -1274,18 +1277,18 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, netdev->stats.rx_dropped++; goto release_desc; } - dma_addr = dma_map_single(eth->dev, + dma_addr = dma_map_single(eth->dma_dev, new_data + NET_SKB_PAD + eth->ip_align, ring->buf_size, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(eth->dev, dma_addr))) { + if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) { skb_free_frag(new_data); netdev->stats.rx_dropped++; goto release_desc; } - dma_unmap_single(eth->dev, trxd.rxd1, + dma_unmap_single(eth->dma_dev, trxd.rxd1, ring->buf_size, DMA_FROM_DEVICE); /* receive data */ @@ -1313,6 +1316,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, skb_set_hash(skb, hash, PKT_HASH_TYPE_L4); } + reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4); + if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) + mtk_ppe_check_skb(eth->ppe, skb, + trxd.rxd4 & MTK_RXD4_FOE_ENTRY); + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX && (trxd.rxd2 & RX_DMA_VTAG)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), @@ -1558,7 +1566,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth) if (!ring->buf) goto no_tx_mem; - ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz, + ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz, &ring->phys, GFP_ATOMIC); if (!ring->dma) goto no_tx_mem; @@ -1576,7 +1584,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth) * descriptors in ring->dma_pdma. */ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { - ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz, + ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz, &ring->phys_pdma, GFP_ATOMIC); if (!ring->dma_pdma) @@ -1635,7 +1643,7 @@ static void mtk_tx_clean(struct mtk_eth *eth) } if (ring->dma) { - dma_free_coherent(eth->dev, + dma_free_coherent(eth->dma_dev, MTK_DMA_SIZE * sizeof(*ring->dma), ring->dma, ring->phys); @@ -1643,7 +1651,7 @@ static void mtk_tx_clean(struct mtk_eth *eth) } if (ring->dma_pdma) { - dma_free_coherent(eth->dev, + dma_free_coherent(eth->dma_dev, MTK_DMA_SIZE * sizeof(*ring->dma_pdma), ring->dma_pdma, ring->phys_pdma); @@ -1688,18 +1696,18 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) return -ENOMEM; } - ring->dma = dma_alloc_coherent(eth->dev, + ring->dma = dma_alloc_coherent(eth->dma_dev, rx_dma_size * sizeof(*ring->dma), &ring->phys, GFP_ATOMIC); if (!ring->dma) return -ENOMEM; for (i = 0; i < rx_dma_size; i++) { - dma_addr_t dma_addr = dma_map_single(eth->dev, + dma_addr_t dma_addr = dma_map_single(eth->dma_dev, ring->data[i] + NET_SKB_PAD + eth->ip_align, ring->buf_size, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(eth->dev, dma_addr))) + if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) return -ENOMEM; ring->dma[i].rxd1 = (unsigned int)dma_addr; @@ -1735,7 +1743,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring) continue; if (!ring->dma[i].rxd1) continue; - dma_unmap_single(eth->dev, + dma_unmap_single(eth->dma_dev, ring->dma[i].rxd1, ring->buf_size, DMA_FROM_DEVICE); @@ -1746,7 +1754,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring) } if (ring->dma) { - dma_free_coherent(eth->dev, + dma_free_coherent(eth->dma_dev, ring->dma_size * sizeof(*ring->dma), ring->dma, ring->phys); @@ -2099,7 +2107,7 @@ static void mtk_dma_free(struct mtk_eth *eth) if (eth->netdev[i]) netdev_reset_queue(eth->netdev[i]); if (eth->scratch_ring) { - dma_free_coherent(eth->dev, + dma_free_coherent(eth->dma_dev, MTK_DMA_SIZE * sizeof(struct mtk_tx_dma), eth->scratch_ring, eth->phy_scratch_ring); @@ -2267,7 +2275,7 @@ static int mtk_open(struct net_device *dev) if (err) return err; - if (eth->soc->offload_version && mtk_ppe_start(ð->ppe) == 0) + if (eth->soc->offload_version && mtk_ppe_start(eth->ppe) == 0) gdm_config = MTK_GDMA_TO_PPE; mtk_gdm_config(eth, gdm_config); @@ -2341,7 +2349,7 @@ static int mtk_stop(struct net_device *dev) mtk_dma_free(eth); if (eth->soc->offload_version) - mtk_ppe_stop(ð->ppe); + mtk_ppe_stop(eth->ppe); return 0; } @@ -2448,6 +2456,8 @@ static void mtk_dim_tx(struct work_struct *work) static int mtk_hw_init(struct mtk_eth *eth) { + u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA | + ETHSYS_DMA_AG_MAP_PPE; int i, val, ret; if (test_and_set_bit(MTK_HW_INIT, ð->state)) @@ -2460,6 +2470,10 @@ static int mtk_hw_init(struct mtk_eth *eth) if (ret) goto err_disable_pm; + if (eth->ethsys) + regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask, + of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { ret = device_reset(eth->dev); if (ret) { @@ -3040,6 +3054,35 @@ free_netdev: return err; } +void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev) +{ + struct net_device *dev, *tmp; + LIST_HEAD(dev_list); + int i; + + rtnl_lock(); + + for (i = 0; i < MTK_MAC_COUNT; i++) { + dev = eth->netdev[i]; + + if (!dev || !(dev->flags & IFF_UP)) + continue; + + list_add_tail(&dev->close_list, &dev_list); + } + + dev_close_many(&dev_list, false); + + eth->dma_dev = dma_dev; + + list_for_each_entry_safe(dev, tmp, &dev_list, close_list) { + list_del_init(&dev->close_list); + dev_open(dev, NULL); + } + + rtnl_unlock(); +} + static int mtk_probe(struct platform_device *pdev) { struct device_node *mac_np; @@ -3053,6 +3096,7 @@ static int mtk_probe(struct platform_device *pdev) eth->soc = of_device_get_match_data(&pdev->dev); eth->dev = &pdev->dev; + eth->dma_dev = &pdev->dev; eth->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(eth->base)) return PTR_ERR(eth->base); @@ -3101,6 +3145,16 @@ static int mtk_probe(struct platform_device *pdev) } } + if (of_dma_is_coherent(pdev->dev.of_node)) { + struct regmap *cci; + + cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "cci-control-port"); + /* enable CPU/bus coherency */ + if (!IS_ERR(cci)) + regmap_write(cci, 0, 3); + } + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii), GFP_KERNEL); @@ -3123,6 +3177,22 @@ static int mtk_probe(struct platform_device *pdev) } } + for (i = 0;; i++) { + struct device_node *np = of_parse_phandle(pdev->dev.of_node, + "mediatek,wed", i); + static const u32 wdma_regs[] = { + MTK_WDMA0_BASE, + MTK_WDMA1_BASE + }; + void __iomem *wdma; + + if (!np || i >= ARRAY_SIZE(wdma_regs)) + break; + + wdma = eth->base + wdma_regs[i]; + mtk_wed_add_hw(np, eth, wdma, i); + } + for (i = 0; i < 3; i++) { if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0) eth->irq[i] = eth->irq[0]; @@ -3198,10 +3268,11 @@ static int mtk_probe(struct platform_device *pdev) } if (eth->soc->offload_version) { - err = mtk_ppe_init(ð->ppe, eth->dev, - eth->base + MTK_ETH_PPE_BASE, 2); - if (err) + eth->ppe = mtk_ppe_init(eth, eth->base + MTK_ETH_PPE_BASE, 2); + if (!eth->ppe) { + err = -ENOMEM; goto err_free_dev; + } err = mtk_eth_offload_init(eth); if (err) @@ -3227,9 +3298,9 @@ static int mtk_probe(struct platform_device *pdev) */ init_dummy_netdev(ð->dummy_dev); netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx, - MTK_NAPI_WEIGHT); + NAPI_POLL_WEIGHT); netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx, - MTK_NAPI_WEIGHT); + NAPI_POLL_WEIGHT); platform_set_drvdata(pdev, eth); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index c9d42be314b5..b04977fa84f6 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -24,7 +24,6 @@ #define MTK_MAX_RX_LENGTH_2K 2048 #define MTK_TX_DMA_BUF_LEN 0x3fff #define MTK_DMA_SIZE 512 -#define MTK_NAPI_WEIGHT 64 #define MTK_MAC_COUNT 2 #define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN) #define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN) @@ -295,6 +294,9 @@ #define MTK_GDM1_TX_GPCNT 0x2438 #define MTK_STAT_OFFSET 0x40 +#define MTK_WDMA0_BASE 0x2800 +#define MTK_WDMA1_BASE 0x2c00 + /* QDMA descriptor txd4 */ #define TX_DMA_CHKSUM (0x7 << 29) #define TX_DMA_TSO BIT(28) @@ -465,6 +467,12 @@ #define RSTCTRL_FE BIT(6) #define RSTCTRL_PPE BIT(31) +/* ethernet dma channel agent map */ +#define ETHSYS_DMA_AG_MAP 0x408 +#define ETHSYS_DMA_AG_MAP_PDMA BIT(0) +#define ETHSYS_DMA_AG_MAP_QDMA BIT(1) +#define ETHSYS_DMA_AG_MAP_PPE BIT(2) + /* SGMII subsystem config registers */ /* Register to auto-negotiation restart */ #define SGMSYS_PCS_CONTROL_1 0x0 @@ -882,6 +890,7 @@ struct mtk_sgmii { /* struct mtk_eth - This is the main datasructure for holding the state * of the driver * @dev: The device pointer + * @dev: The device pointer used for dma mapping/alloc * @base: The mapped register i/o base * @page_lock: Make sure that register operations are atomic * @tx_irq__lock: Make sure that IRQ register operations are atomic @@ -925,6 +934,7 @@ struct mtk_sgmii { struct mtk_eth { struct device *dev; + struct device *dma_dev; void __iomem *base; spinlock_t page_lock; spinlock_t tx_irq_lock; @@ -974,7 +984,7 @@ struct mtk_eth { u32 rx_dma_l4_valid; int ip_align; - struct mtk_ppe ppe; + struct mtk_ppe *ppe; struct rhashtable flow_table; }; @@ -1023,6 +1033,7 @@ int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id); int mtk_eth_offload_init(struct mtk_eth *eth); int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data); +void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev); #endif /* MTK_ETH_H */ diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c index 3ad10c793308..683f89f8e3b2 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c @@ -6,9 +6,22 @@ #include <linux/iopoll.h> #include <linux/etherdevice.h> #include <linux/platform_device.h> +#include <linux/if_ether.h> +#include <linux/if_vlan.h> +#include <net/dsa.h> +#include "mtk_eth_soc.h" #include "mtk_ppe.h" #include "mtk_ppe_regs.h" +static DEFINE_SPINLOCK(ppe_lock); + +static const struct rhashtable_params mtk_flow_l2_ht_params = { + .head_offset = offsetof(struct mtk_flow_entry, l2_node), + .key_offset = offsetof(struct mtk_flow_entry, data.bridge), + .key_len = offsetof(struct mtk_foe_bridge, key_end), + .automatic_shrinking = true, +}; + static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val) { writel(val, ppe->base + reg); @@ -41,6 +54,11 @@ static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val) return ppe_m32(ppe, reg, val, 0); } +static u32 mtk_eth_timestamp(struct mtk_eth *eth) +{ + return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP; +} + static int mtk_ppe_wait_busy(struct mtk_ppe *ppe) { int ret; @@ -76,13 +94,6 @@ static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e) u32 hash; switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) { - case MTK_PPE_PKT_TYPE_BRIDGE: - hv1 = e->bridge.src_mac_lo; - hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16); - hv2 = e->bridge.src_mac_hi >> 16; - hv2 ^= e->bridge.dest_mac_lo; - hv3 = e->bridge.dest_mac_hi; - break; case MTK_PPE_PKT_TYPE_IPV4_ROUTE: case MTK_PPE_PKT_TYPE_IPV4_HNAPT: hv1 = e->ipv4.orig.ports; @@ -122,6 +133,9 @@ mtk_foe_entry_l2(struct mtk_foe_entry *entry) { int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); + if (type == MTK_PPE_PKT_TYPE_BRIDGE) + return &entry->bridge.l2; + if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) return &entry->ipv6.l2; @@ -133,6 +147,9 @@ mtk_foe_entry_ib2(struct mtk_foe_entry *entry) { int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); + if (type == MTK_PPE_PKT_TYPE_BRIDGE) + return &entry->bridge.ib2; + if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) return &entry->ipv6.ib2; @@ -167,7 +184,12 @@ int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto, if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T) entry->ipv6.ports = ports_pad; - if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) { + if (type == MTK_PPE_PKT_TYPE_BRIDGE) { + ether_addr_copy(entry->bridge.src_mac, src_mac); + ether_addr_copy(entry->bridge.dest_mac, dest_mac); + entry->bridge.ib2 = val; + l2 = &entry->bridge.l2; + } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) { entry->ipv6.ib2 = val; l2 = &entry->ipv6.l2; } else { @@ -329,32 +351,167 @@ int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid) return 0; } +int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq, + int bss, int wcid) +{ + struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry); + u32 *ib2 = mtk_foe_entry_ib2(entry); + + *ib2 &= ~MTK_FOE_IB2_PORT_MG; + *ib2 |= MTK_FOE_IB2_WDMA_WINFO; + if (wdma_idx) + *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX; + + l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) | + FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) | + FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq); + + return 0; +} + static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry) { return !(entry->ib1 & MTK_FOE_IB1_STATIC) && FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND; } -int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, - u16 timestamp) +static bool +mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data) +{ + int type, len; + + if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP) + return false; + + type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1); + if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE) + len = offsetof(struct mtk_foe_entry, ipv6._rsv); + else + len = offsetof(struct mtk_foe_entry, ipv4.ib2); + + return !memcmp(&entry->data.data, &data->data, len - 4); +} + +static void +__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) +{ + struct hlist_head *head; + struct hlist_node *tmp; + + if (entry->type == MTK_FLOW_TYPE_L2) { + rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node, + mtk_flow_l2_ht_params); + + head = &entry->l2_flows; + hlist_for_each_entry_safe(entry, tmp, head, l2_data.list) + __mtk_foe_entry_clear(ppe, entry); + return; + } + + hlist_del_init(&entry->list); + if (entry->hash != 0xffff) { + ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE; + ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, + MTK_FOE_STATE_BIND); + dma_wmb(); + } + entry->hash = 0xffff; + + if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW) + return; + + hlist_del_init(&entry->l2_data.list); + kfree(entry); +} + +static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1) +{ + u16 timestamp; + u16 now; + + now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP; + timestamp = ib1 & MTK_FOE_IB1_BIND_TIMESTAMP; + + if (timestamp > now) + return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now; + else + return now - timestamp; +} + +static void +mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) { + struct mtk_flow_entry *cur; struct mtk_foe_entry *hwe; - u32 hash; + struct hlist_node *tmp; + int idle; + + idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1); + hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) { + int cur_idle; + u32 ib1; + + hwe = &ppe->foe_table[cur->hash]; + ib1 = READ_ONCE(hwe->ib1); + + if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) { + cur->hash = 0xffff; + __mtk_foe_entry_clear(ppe, cur); + continue; + } + + cur_idle = __mtk_foe_entry_idle_time(ppe, ib1); + if (cur_idle >= idle) + continue; + + idle = cur_idle; + entry->data.ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP; + entry->data.ib1 |= hwe->ib1 & MTK_FOE_IB1_BIND_TIMESTAMP; + } +} + +static void +mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) +{ + struct mtk_foe_entry *hwe; + struct mtk_foe_entry foe; + + spin_lock_bh(&ppe_lock); + + if (entry->type == MTK_FLOW_TYPE_L2) { + mtk_flow_entry_update_l2(ppe, entry); + goto out; + } + + if (entry->hash == 0xffff) + goto out; + + hwe = &ppe->foe_table[entry->hash]; + memcpy(&foe, hwe, sizeof(foe)); + if (!mtk_flow_entry_match(entry, &foe)) { + entry->hash = 0xffff; + goto out; + } + + entry->data.ib1 = foe.ib1; + +out: + spin_unlock_bh(&ppe_lock); +} + +static void +__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, + u16 hash) +{ + struct mtk_foe_entry *hwe; + u16 timestamp; + timestamp = mtk_eth_timestamp(ppe->eth); timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP; entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP; entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp); - hash = mtk_ppe_hash_entry(entry); hwe = &ppe->foe_table[hash]; - if (!mtk_foe_entry_usable(hwe)) { - hwe++; - hash++; - - if (!mtk_foe_entry_usable(hwe)) - return -ENOSPC; - } - memcpy(&hwe->data, &entry->data, sizeof(hwe->data)); wmb(); hwe->ib1 = entry->ib1; @@ -362,32 +519,195 @@ int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, dma_wmb(); mtk_ppe_cache_clear(ppe); +} - return hash; +void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) +{ + spin_lock_bh(&ppe_lock); + __mtk_foe_entry_clear(ppe, entry); + spin_unlock_bh(&ppe_lock); +} + +static int +mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) +{ + entry->type = MTK_FLOW_TYPE_L2; + + return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node, + mtk_flow_l2_ht_params); +} + +int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) +{ + int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1); + u32 hash; + + if (type == MTK_PPE_PKT_TYPE_BRIDGE) + return mtk_foe_entry_commit_l2(ppe, entry); + + hash = mtk_ppe_hash_entry(&entry->data); + entry->hash = 0xffff; + spin_lock_bh(&ppe_lock); + hlist_add_head(&entry->list, &ppe->foe_flow[hash / 2]); + spin_unlock_bh(&ppe_lock); + + return 0; +} + +static void +mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry, + u16 hash) +{ + struct mtk_flow_entry *flow_info; + struct mtk_foe_entry foe, *hwe; + struct mtk_foe_mac_info *l2; + u32 ib1_mask = MTK_FOE_IB1_PACKET_TYPE | MTK_FOE_IB1_UDP; + int type; + + flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end), + GFP_ATOMIC); + if (!flow_info) + return; + + flow_info->l2_data.base_flow = entry; + flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW; + flow_info->hash = hash; + hlist_add_head(&flow_info->list, &ppe->foe_flow[hash / 2]); + hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows); + + hwe = &ppe->foe_table[hash]; + memcpy(&foe, hwe, sizeof(foe)); + foe.ib1 &= ib1_mask; + foe.ib1 |= entry->data.ib1 & ~ib1_mask; + + l2 = mtk_foe_entry_l2(&foe); + memcpy(l2, &entry->data.bridge.l2, sizeof(*l2)); + + type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, foe.ib1); + if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT) + memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new)); + else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP) + l2->etype = ETH_P_IPV6; + + *mtk_foe_entry_ib2(&foe) = entry->data.bridge.ib2; + + __mtk_foe_entry_commit(ppe, &foe, hash); } -int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base, +void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash) +{ + struct hlist_head *head = &ppe->foe_flow[hash / 2]; + struct mtk_foe_entry *hwe = &ppe->foe_table[hash]; + struct mtk_flow_entry *entry; + struct mtk_foe_bridge key = {}; + struct hlist_node *n; + struct ethhdr *eh; + bool found = false; + u8 *tag; + + spin_lock_bh(&ppe_lock); + + if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND) + goto out; + + hlist_for_each_entry_safe(entry, n, head, list) { + if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) { + if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == + MTK_FOE_STATE_BIND)) + continue; + + entry->hash = 0xffff; + __mtk_foe_entry_clear(ppe, entry); + continue; + } + + if (found || !mtk_flow_entry_match(entry, hwe)) { + if (entry->hash != 0xffff) + entry->hash = 0xffff; + continue; + } + + entry->hash = hash; + __mtk_foe_entry_commit(ppe, &entry->data, hash); + found = true; + } + + if (found) + goto out; + + eh = eth_hdr(skb); + ether_addr_copy(key.dest_mac, eh->h_dest); + ether_addr_copy(key.src_mac, eh->h_source); + tag = skb->data - 2; + key.vlan = 0; + switch (skb->protocol) { +#if IS_ENABLED(CONFIG_NET_DSA) + case htons(ETH_P_XDSA): + if (!netdev_uses_dsa(skb->dev) || + skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK) + goto out; + + tag += 4; + if (get_unaligned_be16(tag) != ETH_P_8021Q) + break; + + fallthrough; +#endif + case htons(ETH_P_8021Q): + key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK; + break; + default: + break; + } + + entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params); + if (!entry) + goto out; + + mtk_foe_entry_commit_subflow(ppe, entry, hash); + +out: + spin_unlock_bh(&ppe_lock); +} + +int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) +{ + mtk_flow_entry_update(ppe, entry); + + return __mtk_foe_entry_idle_time(ppe, entry->data.ib1); +} + +struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version) { + struct device *dev = eth->dev; struct mtk_foe_entry *foe; + struct mtk_ppe *ppe; + + ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL); + if (!ppe) + return NULL; + + rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params); /* need to allocate a separate device, since it PPE DMA access is * not coherent. */ ppe->base = base; + ppe->eth = eth; ppe->dev = dev; ppe->version = version; foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe), &ppe->foe_phys, GFP_KERNEL); if (!foe) - return -ENOMEM; + return NULL; ppe->foe_table = foe; mtk_ppe_debugfs_init(ppe); - return 0; + return ppe; } static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe) @@ -443,7 +763,6 @@ int mtk_ppe_start(struct mtk_ppe *ppe) MTK_PPE_FLOW_CFG_IP4_NAT | MTK_PPE_FLOW_CFG_IP4_NAPT | MTK_PPE_FLOW_CFG_IP4_DSLITE | - MTK_PPE_FLOW_CFG_L2_BRIDGE | MTK_PPE_FLOW_CFG_IP4_NAT_FRAG; ppe_w32(ppe, MTK_PPE_FLOW_CFG, val); diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h index 242fb8f2ae65..1f5cf1c9a947 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.h +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h @@ -6,6 +6,7 @@ #include <linux/kernel.h> #include <linux/bitfield.h> +#include <linux/rhashtable.h> #define MTK_ETH_PPE_BASE 0xc00 @@ -48,9 +49,9 @@ enum { #define MTK_FOE_IB2_DEST_PORT GENMASK(7, 5) #define MTK_FOE_IB2_MULTICAST BIT(8) -#define MTK_FOE_IB2_WHNAT_QID2 GENMASK(13, 12) -#define MTK_FOE_IB2_WHNAT_DEVIDX BIT(16) -#define MTK_FOE_IB2_WHNAT_NAT BIT(17) +#define MTK_FOE_IB2_WDMA_QID2 GENMASK(13, 12) +#define MTK_FOE_IB2_WDMA_DEVIDX BIT(16) +#define MTK_FOE_IB2_WDMA_WINFO BIT(17) #define MTK_FOE_IB2_PORT_MG GENMASK(17, 12) @@ -58,9 +59,9 @@ enum { #define MTK_FOE_IB2_DSCP GENMASK(31, 24) -#define MTK_FOE_VLAN2_WHNAT_BSS GEMMASK(5, 0) -#define MTK_FOE_VLAN2_WHNAT_WCID GENMASK(13, 6) -#define MTK_FOE_VLAN2_WHNAT_RING GENMASK(15, 14) +#define MTK_FOE_VLAN2_WINFO_BSS GENMASK(5, 0) +#define MTK_FOE_VLAN2_WINFO_WCID GENMASK(13, 6) +#define MTK_FOE_VLAN2_WINFO_RING GENMASK(15, 14) enum { MTK_FOE_STATE_INVALID, @@ -84,19 +85,16 @@ struct mtk_foe_mac_info { u16 src_mac_lo; }; +/* software-only entry type */ struct mtk_foe_bridge { - u32 dest_mac_hi; + u8 dest_mac[ETH_ALEN]; + u8 src_mac[ETH_ALEN]; + u16 vlan; - u16 src_mac_lo; - u16 dest_mac_lo; - - u32 src_mac_hi; + struct {} key_end; u32 ib2; - u32 _rsv[5]; - - u32 udf_tsid; struct mtk_foe_mac_info l2; }; @@ -235,7 +233,37 @@ enum { MTK_PPE_CPU_REASON_INVALID = 0x1f, }; +enum { + MTK_FLOW_TYPE_L4, + MTK_FLOW_TYPE_L2, + MTK_FLOW_TYPE_L2_SUBFLOW, +}; + +struct mtk_flow_entry { + union { + struct hlist_node list; + struct { + struct rhash_head l2_node; + struct hlist_head l2_flows; + }; + }; + u8 type; + s8 wed_index; + u16 hash; + union { + struct mtk_foe_entry data; + struct { + struct mtk_flow_entry *base_flow; + struct hlist_node list; + struct {} end; + } l2_data; + }; + struct rhash_head node; + unsigned long cookie; +}; + struct mtk_ppe { + struct mtk_eth *eth; struct device *dev; void __iomem *base; int version; @@ -243,19 +271,35 @@ struct mtk_ppe { struct mtk_foe_entry *foe_table; dma_addr_t foe_phys; + u16 foe_check_time[MTK_PPE_ENTRIES]; + struct hlist_head foe_flow[MTK_PPE_ENTRIES / 2]; + + struct rhashtable l2_flows; + void *acct_table; }; -int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base, - int version); +struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version); int mtk_ppe_start(struct mtk_ppe *ppe); int mtk_ppe_stop(struct mtk_ppe *ppe); +void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash); + static inline void -mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash) +mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash) { - ppe->foe_table[hash].ib1 = 0; - dma_wmb(); + u16 now, diff; + + if (!ppe) + return; + + now = (u16)jiffies; + diff = now - ppe->foe_check_time[hash]; + if (diff < HZ / 10) + return; + + ppe->foe_check_time[hash] = now; + __mtk_ppe_check_skb(ppe, skb, hash); } static inline int @@ -281,8 +325,11 @@ int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry, int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port); int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid); int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid); -int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, - u16 timestamp); +int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq, + int bss, int wcid); +int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); +void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); +int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); int mtk_ppe_debugfs_init(struct mtk_ppe *ppe); #endif diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c index d4b482340cb9..eb0b598f14e4 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c @@ -32,7 +32,6 @@ static const char *mtk_foe_pkt_type_str(int type) static const char * const type_str[] = { [MTK_PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T", [MTK_PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T", - [MTK_PPE_PKT_TYPE_BRIDGE] = "L2", [MTK_PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE", [MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T", [MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T", diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c index 7bb1f20002b5..1fe31058b0f2 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c @@ -6,10 +6,12 @@ #include <linux/if_ether.h> #include <linux/rhashtable.h> #include <linux/ip.h> +#include <linux/ipv6.h> #include <net/flow_offload.h> #include <net/pkt_cls.h> #include <net/dsa.h> #include "mtk_eth_soc.h" +#include "mtk_wed.h" struct mtk_flow_data { struct ethhdr eth; @@ -19,11 +21,18 @@ struct mtk_flow_data { __be32 src_addr; __be32 dst_addr; } v4; + + struct { + struct in6_addr src_addr; + struct in6_addr dst_addr; + } v6; }; __be16 src_port; __be16 dst_port; + u16 vlan_in; + struct { u16 id; __be16 proto; @@ -35,12 +44,6 @@ struct mtk_flow_data { } pppoe; }; -struct mtk_flow_entry { - struct rhash_head node; - unsigned long cookie; - u16 hash; -}; - static const struct rhashtable_params mtk_flow_ht_params = { .head_offset = offsetof(struct mtk_flow_entry, node), .key_offset = offsetof(struct mtk_flow_entry, cookie), @@ -48,12 +51,6 @@ static const struct rhashtable_params mtk_flow_ht_params = { .automatic_shrinking = true, }; -static u32 -mtk_eth_timestamp(struct mtk_eth *eth) -{ - return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP; -} - static int mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data, bool egress) @@ -63,6 +60,14 @@ mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data, data->v4.dst_addr, data->dst_port); } +static int +mtk_flow_set_ipv6_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data) +{ + return mtk_foe_entry_set_ipv6_tuple(foe, + data->v6.src_addr.s6_addr32, data->src_port, + data->v6.dst_addr.s6_addr32, data->dst_port); +} + static void mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth) { @@ -80,6 +85,35 @@ mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth) memcpy(dest, src, act->mangle.mask ? 2 : 4); } +static int +mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info) +{ + struct net_device_path_ctx ctx = { + .dev = dev, + .daddr = addr, + }; + struct net_device_path path = {}; + + if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)) + return -1; + + if (!dev->netdev_ops->ndo_fill_forward_path) + return -1; + + if (dev->netdev_ops->ndo_fill_forward_path(&ctx, &path)) + return -1; + + if (path.type != DEV_PATH_MTK_WDMA) + return -1; + + info->wdma_idx = path.mtk_wdma.wdma_idx; + info->queue = path.mtk_wdma.queue; + info->bss = path.mtk_wdma.bss; + info->wcid = path.mtk_wdma.wcid; + + return 0; +} + static int mtk_flow_mangle_ports(const struct flow_action_entry *act, @@ -149,10 +183,20 @@ mtk_flow_get_dsa_port(struct net_device **dev) static int mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe, - struct net_device *dev) + struct net_device *dev, const u8 *dest_mac, + int *wed_index) { + struct mtk_wdma_info info = {}; int pse_port, dsa_port; + if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) { + mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss, + info.wcid); + pse_port = 3; + *wed_index = info.wdma_idx; + goto out; + } + dsa_port = mtk_flow_get_dsa_port(&dev); if (dsa_port >= 0) mtk_foe_entry_set_dsa(foe, dsa_port); @@ -164,6 +208,7 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe, else return -EOPNOTSUPP; +out: mtk_foe_entry_set_pse_port(foe, pse_port); return 0; @@ -179,11 +224,10 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) struct net_device *odev = NULL; struct mtk_flow_entry *entry; int offload_type = 0; + int wed_index = -1; u16 addr_type = 0; - u32 timestamp; u8 l4proto = 0; int err = 0; - int hash; int i; if (rhashtable_lookup(ð->flow_table, &f->cookie, mtk_flow_ht_params)) @@ -215,9 +259,45 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) return -EOPNOTSUPP; } + switch (addr_type) { + case 0: + offload_type = MTK_PPE_PKT_TYPE_BRIDGE; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(rule, &match); + memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN); + memcpy(data.eth.h_source, match.key->src, ETH_ALEN); + } else { + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); + + if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q)) + return -EOPNOTSUPP; + + data.vlan_in = match.key->vlan_id; + } + break; + case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT; + break; + case FLOW_DISSECTOR_KEY_IPV6_ADDRS: + offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T; + break; + default: + return -EOPNOTSUPP; + } + flow_action_for_each(i, act, &rule->action) { switch (act->id) { case FLOW_ACTION_MANGLE: + if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) + return -EOPNOTSUPP; if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH) mtk_flow_offload_mangle_eth(act, &data.eth); break; @@ -249,14 +329,6 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) } } - switch (addr_type) { - case FLOW_DISSECTOR_KEY_IPV4_ADDRS: - offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT; - break; - default: - return -EOPNOTSUPP; - } - if (!is_valid_ether_addr(data.eth.h_source) || !is_valid_ether_addr(data.eth.h_dest)) return -EINVAL; @@ -270,10 +342,13 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { struct flow_match_ports ports; + if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) + return -EOPNOTSUPP; + flow_rule_match_ports(rule, &ports); data.src_port = ports.key->src; data.dst_port = ports.key->dst; - } else { + } else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) { return -EOPNOTSUPP; } @@ -288,10 +363,24 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) mtk_flow_set_ipv4_addr(&foe, &data, false); } + if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_match_ipv6_addrs addrs; + + flow_rule_match_ipv6_addrs(rule, &addrs); + + data.v6.src_addr = addrs.key->src; + data.v6.dst_addr = addrs.key->dst; + + mtk_flow_set_ipv6_addr(&foe, &data); + } + flow_action_for_each(i, act, &rule->action) { if (act->id != FLOW_ACTION_MANGLE) continue; + if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) + return -EOPNOTSUPP; + switch (act->mangle.htype) { case FLOW_ACT_MANGLE_HDR_TYPE_TCP: case FLOW_ACT_MANGLE_HDR_TYPE_UDP: @@ -317,6 +406,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) return err; } + if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) + foe.bridge.vlan = data.vlan_in; + if (data.vlan.num == 1) { if (data.vlan.proto != htons(ETH_P_8021Q)) return -EOPNOTSUPP; @@ -326,33 +418,38 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) if (data.pppoe.num == 1) mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid); - err = mtk_flow_set_output_device(eth, &foe, odev); + err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest, + &wed_index); if (err) return err; + if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0) + return err; + entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; entry->cookie = f->cookie; - timestamp = mtk_eth_timestamp(eth); - hash = mtk_foe_entry_commit(ð->ppe, &foe, timestamp); - if (hash < 0) { - err = hash; + memcpy(&entry->data, &foe, sizeof(entry->data)); + entry->wed_index = wed_index; + + if (mtk_foe_entry_commit(eth->ppe, entry) < 0) goto free; - } - entry->hash = hash; err = rhashtable_insert_fast(ð->flow_table, &entry->node, mtk_flow_ht_params); if (err < 0) - goto clear_flow; + goto clear; return 0; -clear_flow: - mtk_foe_entry_clear(ð->ppe, hash); + +clear: + mtk_foe_entry_clear(eth->ppe, entry); free: kfree(entry); + if (wed_index >= 0) + mtk_wed_flow_remove(wed_index); return err; } @@ -366,9 +463,11 @@ mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f) if (!entry) return -ENOENT; - mtk_foe_entry_clear(ð->ppe, entry->hash); + mtk_foe_entry_clear(eth->ppe, entry); rhashtable_remove_fast(ð->flow_table, &entry->node, mtk_flow_ht_params); + if (entry->wed_index >= 0) + mtk_wed_flow_remove(entry->wed_index); kfree(entry); return 0; @@ -378,7 +477,6 @@ static int mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f) { struct mtk_flow_entry *entry; - int timestamp; u32 idle; entry = rhashtable_lookup(ð->flow_table, &f->cookie, @@ -386,11 +484,7 @@ mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f) if (!entry) return -ENOENT; - timestamp = mtk_foe_entry_timestamp(ð->ppe, entry->hash); - if (timestamp < 0) - return -ETIMEDOUT; - - idle = mtk_eth_timestamp(eth) - timestamp; + idle = mtk_foe_entry_idle_time(eth->ppe, entry); f->stats.lastused = jiffies - idle * HZ; return 0; @@ -442,7 +536,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f) struct flow_block_cb *block_cb; flow_setup_cb_t *cb; - if (!eth->ppe.foe_table) + if (!eth->ppe || !eth->ppe->foe_table) return -EOPNOTSUPP; if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) @@ -483,15 +577,18 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f) int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { - if (type == TC_SETUP_FT) + switch (type) { + case TC_SETUP_BLOCK: + case TC_SETUP_FT: return mtk_eth_setup_tc_block(dev, type_data); - - return -EOPNOTSUPP; + default: + return -EOPNOTSUPP; + } } int mtk_eth_offload_init(struct mtk_eth *eth) { - if (!eth->ppe.foe_table) + if (!eth->ppe || !eth->ppe->foe_table) return 0; return rhashtable_init(ð->flow_table, &mtk_flow_ht_params); diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c index 4cd0747edaff..95839fd84dab 100644 --- a/drivers/net/ethernet/mediatek/mtk_star_emac.c +++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c @@ -30,7 +30,6 @@ #define MTK_STAR_WAIT_TIMEOUT 300 #define MTK_STAR_MAX_FRAME_SIZE 1514 #define MTK_STAR_SKB_ALIGNMENT 16 -#define MTK_STAR_NAPI_WEIGHT 64 #define MTK_STAR_HASHTABLE_MC_LIMIT 256 #define MTK_STAR_HASHTABLE_SIZE_MAX 512 @@ -1551,7 +1550,7 @@ static int mtk_star_probe(struct platform_device *pdev) ndev->netdev_ops = &mtk_star_netdev_ops; ndev->ethtool_ops = &mtk_star_ethtool_ops; - netif_napi_add(ndev, &priv->napi, mtk_star_poll, MTK_STAR_NAPI_WEIGHT); + netif_napi_add(ndev, &priv->napi, mtk_star_poll, NAPI_POLL_WEIGHT); return devm_register_netdev(dev, ndev); } diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c new file mode 100644 index 000000000000..8f0cd3196aac --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_wed.c @@ -0,0 +1,880 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/bitfield.h> +#include <linux/dma-mapping.h> +#include <linux/skbuff.h> +#include <linux/of_platform.h> +#include <linux/of_address.h> +#include <linux/mfd/syscon.h> +#include <linux/debugfs.h> +#include <linux/soc/mediatek/mtk_wed.h> +#include "mtk_eth_soc.h" +#include "mtk_wed_regs.h" +#include "mtk_wed.h" +#include "mtk_ppe.h" + +#define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000) + +#define MTK_WED_PKT_SIZE 1900 +#define MTK_WED_BUF_SIZE 2048 +#define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048) + +#define MTK_WED_TX_RING_SIZE 2048 +#define MTK_WED_WDMA_RING_SIZE 1024 + +static struct mtk_wed_hw *hw_list[2]; +static DEFINE_MUTEX(hw_lock); + +static void +wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) +{ + regmap_update_bits(dev->hw->regs, reg, mask | val, val); +} + +static void +wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask) +{ + return wed_m32(dev, reg, 0, mask); +} + +static void +wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) +{ + return wed_m32(dev, reg, mask, 0); +} + +static void +wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) +{ + wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val); +} + +static void +wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask) +{ + wdma_m32(dev, reg, 0, mask); +} + +static u32 +mtk_wed_read_reset(struct mtk_wed_device *dev) +{ + return wed_r32(dev, MTK_WED_RESET); +} + +static void +mtk_wed_reset(struct mtk_wed_device *dev, u32 mask) +{ + u32 status; + + wed_w32(dev, MTK_WED_RESET, mask); + if (readx_poll_timeout(mtk_wed_read_reset, dev, status, + !(status & mask), 0, 1000)) + WARN_ON_ONCE(1); +} + +static struct mtk_wed_hw * +mtk_wed_assign(struct mtk_wed_device *dev) +{ + struct mtk_wed_hw *hw; + + hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)]; + if (!hw || hw->wed_dev) + return NULL; + + hw->wed_dev = dev; + return hw; +} + +static int +mtk_wed_buffer_alloc(struct mtk_wed_device *dev) +{ + struct mtk_wdma_desc *desc; + dma_addr_t desc_phys; + void **page_list; + int token = dev->wlan.token_start; + int ring_size; + int n_pages; + int i, page_idx; + + ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); + n_pages = ring_size / MTK_WED_BUF_PER_PAGE; + + page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL); + if (!page_list) + return -ENOMEM; + + dev->buf_ring.size = ring_size; + dev->buf_ring.pages = page_list; + + desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc), + &desc_phys, GFP_KERNEL); + if (!desc) + return -ENOMEM; + + dev->buf_ring.desc = desc; + dev->buf_ring.desc_phys = desc_phys; + + for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) { + dma_addr_t page_phys, buf_phys; + struct page *page; + void *buf; + int s; + + page = __dev_alloc_pages(GFP_KERNEL, 0); + if (!page) + return -ENOMEM; + + page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev->hw->dev, page_phys)) { + __free_page(page); + return -ENOMEM; + } + + page_list[page_idx++] = page; + dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE, + DMA_BIDIRECTIONAL); + + buf = page_to_virt(page); + buf_phys = page_phys; + + for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) { + u32 txd_size; + u32 ctrl; + + txd_size = dev->wlan.init_buf(buf, buf_phys, token++); + + desc->buf0 = cpu_to_le32(buf_phys); + desc->buf1 = cpu_to_le32(buf_phys + txd_size); + ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) | + FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1, + MTK_WED_BUF_SIZE - txd_size) | + MTK_WDMA_DESC_CTRL_LAST_SEG1; + desc->ctrl = cpu_to_le32(ctrl); + desc->info = 0; + desc++; + + buf += MTK_WED_BUF_SIZE; + buf_phys += MTK_WED_BUF_SIZE; + } + + dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE, + DMA_BIDIRECTIONAL); + } + + return 0; +} + +static void +mtk_wed_free_buffer(struct mtk_wed_device *dev) +{ + struct mtk_wdma_desc *desc = dev->buf_ring.desc; + void **page_list = dev->buf_ring.pages; + int page_idx; + int i; + + if (!page_list) + return; + + if (!desc) + goto free_pagelist; + + for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) { + void *page = page_list[page_idx++]; + dma_addr_t buf_addr; + + if (!page) + break; + + buf_addr = le32_to_cpu(desc[i].buf0); + dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE, + DMA_BIDIRECTIONAL); + __free_page(page); + } + + dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc), + desc, dev->buf_ring.desc_phys); + +free_pagelist: + kfree(page_list); +} + +static void +mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring) +{ + if (!ring->desc) + return; + + dma_free_coherent(dev->hw->dev, ring->size * sizeof(*ring->desc), + ring->desc, ring->desc_phys); +} + +static void +mtk_wed_free_tx_rings(struct mtk_wed_device *dev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) + mtk_wed_free_ring(dev, &dev->tx_ring[i]); + for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) + mtk_wed_free_ring(dev, &dev->tx_wdma[i]); +} + +static void +mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en) +{ + u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; + + if (!dev->hw->num_flows) + mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; + + wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0); + wed_r32(dev, MTK_WED_EXT_INT_MASK); +} + +static void +mtk_wed_stop(struct mtk_wed_device *dev) +{ + regmap_write(dev->hw->mirror, dev->hw->index * 4, 0); + mtk_wed_set_ext_int(dev, false); + + wed_clr(dev, MTK_WED_CTRL, + MTK_WED_CTRL_WDMA_INT_AGENT_EN | + MTK_WED_CTRL_WPDMA_INT_AGENT_EN | + MTK_WED_CTRL_WED_TX_BM_EN | + MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); + wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0); + wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0); + wdma_w32(dev, MTK_WDMA_INT_MASK, 0); + wdma_w32(dev, MTK_WDMA_INT_GRP2, 0); + wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0); + + wed_clr(dev, MTK_WED_GLO_CFG, + MTK_WED_GLO_CFG_TX_DMA_EN | + MTK_WED_GLO_CFG_RX_DMA_EN); + wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, + MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | + MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); + wed_clr(dev, MTK_WED_WDMA_GLO_CFG, + MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); +} + +static void +mtk_wed_detach(struct mtk_wed_device *dev) +{ + struct device_node *wlan_node = dev->wlan.pci_dev->dev.of_node; + struct mtk_wed_hw *hw = dev->hw; + + mutex_lock(&hw_lock); + + mtk_wed_stop(dev); + + wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); + wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); + + mtk_wed_reset(dev, MTK_WED_RESET_WED); + + mtk_wed_free_buffer(dev); + mtk_wed_free_tx_rings(dev); + + if (of_dma_is_coherent(wlan_node)) + regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, + BIT(hw->index), BIT(hw->index)); + + if (!hw_list[!hw->index]->wed_dev && + hw->eth->dma_dev != hw->eth->dev) + mtk_eth_set_dma_device(hw->eth, hw->eth->dev); + + memset(dev, 0, sizeof(*dev)); + module_put(THIS_MODULE); + + hw->wed_dev = NULL; + mutex_unlock(&hw_lock); +} + +static void +mtk_wed_hw_init_early(struct mtk_wed_device *dev) +{ + u32 mask, set; + u32 offset; + + mtk_wed_stop(dev); + mtk_wed_reset(dev, MTK_WED_RESET_WED); + + mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE | + MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE | + MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE; + set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) | + MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP | + MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY; + wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set); + + wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO_PRERES); + + offset = dev->hw->index ? 0x04000400 : 0; + wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset); + wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset); + + wed_w32(dev, MTK_WED_PCIE_CFG_BASE, MTK_PCIE_BASE(dev->hw->index)); + wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys); +} + +static void +mtk_wed_hw_init(struct mtk_wed_device *dev) +{ + if (dev->init_done) + return; + + dev->init_done = true; + mtk_wed_set_ext_int(dev, false); + wed_w32(dev, MTK_WED_TX_BM_CTRL, + MTK_WED_TX_BM_CTRL_PAUSE | + FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, + dev->buf_ring.size / 128) | + FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, + MTK_WED_TX_RING_SIZE / 256)); + + wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys); + + wed_w32(dev, MTK_WED_TX_BM_TKID, + FIELD_PREP(MTK_WED_TX_BM_TKID_START, + dev->wlan.token_start) | + FIELD_PREP(MTK_WED_TX_BM_TKID_END, + dev->wlan.token_start + dev->wlan.nbuf - 1)); + + wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE); + + wed_w32(dev, MTK_WED_TX_BM_DYN_THR, + FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) | + MTK_WED_TX_BM_DYN_THR_HI); + + mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); + + wed_set(dev, MTK_WED_CTRL, + MTK_WED_CTRL_WED_TX_BM_EN | + MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); + + wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE); +} + +static void +mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size) +{ + int i; + + for (i = 0; i < size; i++) { + desc[i].buf0 = 0; + desc[i].ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); + desc[i].buf1 = 0; + desc[i].info = 0; + } +} + +static u32 +mtk_wed_check_busy(struct mtk_wed_device *dev) +{ + if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY) + return true; + + if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) & + MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY) + return true; + + if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY) + return true; + + if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) & + MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY) + return true; + + if (wdma_r32(dev, MTK_WDMA_GLO_CFG) & + MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY) + return true; + + if (wed_r32(dev, MTK_WED_CTRL) & + (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY)) + return true; + + return false; +} + +static int +mtk_wed_poll_busy(struct mtk_wed_device *dev) +{ + int sleep = 15000; + int timeout = 100 * sleep; + u32 val; + + return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep, + timeout, false, dev); +} + +static void +mtk_wed_reset_dma(struct mtk_wed_device *dev) +{ + bool busy = false; + u32 val; + int i; + + for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) { + struct mtk_wdma_desc *desc = dev->tx_ring[i].desc; + + if (!desc) + continue; + + mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE); + } + + if (mtk_wed_poll_busy(dev)) + busy = mtk_wed_check_busy(dev); + + if (busy) { + mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA); + } else { + wed_w32(dev, MTK_WED_RESET_IDX, + MTK_WED_RESET_IDX_TX | + MTK_WED_RESET_IDX_RX); + wed_w32(dev, MTK_WED_RESET_IDX, 0); + } + + wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); + wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); + + if (busy) { + mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT); + mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV); + } else { + wed_w32(dev, MTK_WED_WDMA_RESET_IDX, + MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV); + wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0); + + wed_set(dev, MTK_WED_WDMA_GLO_CFG, + MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); + + wed_clr(dev, MTK_WED_WDMA_GLO_CFG, + MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); + } + + for (i = 0; i < 100; i++) { + val = wed_r32(dev, MTK_WED_TX_BM_INTF); + if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40) + break; + } + + mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT); + mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); + + if (busy) { + mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); + mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV); + mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV); + } else { + wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, + MTK_WED_WPDMA_RESET_IDX_TX | + MTK_WED_WPDMA_RESET_IDX_RX); + wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0); + } + +} + +static int +mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, + int size) +{ + ring->desc = dma_alloc_coherent(dev->hw->dev, + size * sizeof(*ring->desc), + &ring->desc_phys, GFP_KERNEL); + if (!ring->desc) + return -ENOMEM; + + ring->size = size; + mtk_wed_ring_reset(ring->desc, size); + + return 0; +} + +static int +mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size) +{ + struct mtk_wed_ring *wdma = &dev->tx_wdma[idx]; + + if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE)) + return -ENOMEM; + + wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, + wdma->desc_phys); + wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, + size); + wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); + + wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, + wdma->desc_phys); + wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, + size); + + return 0; +} + +static void +mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) +{ + u32 wdma_mask; + u32 val; + int i; + + for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) + if (!dev->tx_wdma[i].desc) + mtk_wed_wdma_ring_setup(dev, i, 16); + + wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0)); + + mtk_wed_hw_init(dev); + + wed_set(dev, MTK_WED_CTRL, + MTK_WED_CTRL_WDMA_INT_AGENT_EN | + MTK_WED_CTRL_WPDMA_INT_AGENT_EN | + MTK_WED_CTRL_WED_TX_BM_EN | + MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); + + wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, MTK_WED_PCIE_INT_TRIGGER_STATUS); + + wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, + MTK_WED_WPDMA_INT_TRIGGER_RX_DONE | + MTK_WED_WPDMA_INT_TRIGGER_TX_DONE); + + wed_set(dev, MTK_WED_WPDMA_INT_CTRL, + MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV); + + wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask); + wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask); + + wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask); + wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask); + + wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask); + wed_w32(dev, MTK_WED_INT_MASK, irq_mask); + + wed_set(dev, MTK_WED_GLO_CFG, + MTK_WED_GLO_CFG_TX_DMA_EN | + MTK_WED_GLO_CFG_RX_DMA_EN); + wed_set(dev, MTK_WED_WPDMA_GLO_CFG, + MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | + MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); + wed_set(dev, MTK_WED_WDMA_GLO_CFG, + MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); + + mtk_wed_set_ext_int(dev, true); + val = dev->wlan.wpdma_phys | + MTK_PCIE_MIRROR_MAP_EN | + FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index); + + if (dev->hw->index) + val |= BIT(1); + val |= BIT(0); + regmap_write(dev->hw->mirror, dev->hw->index * 4, val); + + dev->running = true; +} + +static int +mtk_wed_attach(struct mtk_wed_device *dev) + __releases(RCU) +{ + struct mtk_wed_hw *hw; + int ret = 0; + + RCU_LOCKDEP_WARN(!rcu_read_lock_held(), + "mtk_wed_attach without holding the RCU read lock"); + + if (pci_domain_nr(dev->wlan.pci_dev->bus) > 1 || + !try_module_get(THIS_MODULE)) + ret = -ENODEV; + + rcu_read_unlock(); + + if (ret) + return ret; + + mutex_lock(&hw_lock); + + hw = mtk_wed_assign(dev); + if (!hw) { + module_put(THIS_MODULE); + ret = -ENODEV; + goto out; + } + + dev_info(&dev->wlan.pci_dev->dev, "attaching wed device %d\n", hw->index); + + dev->hw = hw; + dev->dev = hw->dev; + dev->irq = hw->irq; + dev->wdma_idx = hw->index; + + if (hw->eth->dma_dev == hw->eth->dev && + of_dma_is_coherent(hw->eth->dev->of_node)) + mtk_eth_set_dma_device(hw->eth, hw->dev); + + ret = mtk_wed_buffer_alloc(dev); + if (ret) { + mtk_wed_detach(dev); + goto out; + } + + mtk_wed_hw_init_early(dev); + regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0); + +out: + mutex_unlock(&hw_lock); + + return ret; +} + +static int +mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) +{ + struct mtk_wed_ring *ring = &dev->tx_ring[idx]; + + /* + * Tx ring redirection: + * Instead of configuring the WLAN PDMA TX ring directly, the WLAN + * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n) + * registers. + * + * WED driver posts its own DMA ring as WLAN PDMA TX and configures it + * into MTK_WED_WPDMA_RING_TX(n) registers. + * It gets filled with packets picked up from WED TX ring and from + * WDMA RX. + */ + + BUG_ON(idx > ARRAY_SIZE(dev->tx_ring)); + + if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE)) + return -ENOMEM; + + if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE)) + return -ENOMEM; + + ring->reg_base = MTK_WED_RING_TX(idx); + ring->wpdma = regs; + + /* WED -> WPDMA */ + wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); + wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE); + wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0); + + wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, + ring->desc_phys); + wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, + MTK_WED_TX_RING_SIZE); + wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); + + return 0; +} + +static int +mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) +{ + struct mtk_wed_ring *ring = &dev->txfree_ring; + int i; + + /* + * For txfree event handling, the same DMA ring is shared between WED + * and WLAN. The WLAN driver accesses the ring index registers through + * WED + */ + ring->reg_base = MTK_WED_RING_RX(1); + ring->wpdma = regs; + + for (i = 0; i < 12; i += 4) { + u32 val = readl(regs + i); + + wed_w32(dev, MTK_WED_RING_RX(1) + i, val); + wed_w32(dev, MTK_WED_WPDMA_RING_RX(1) + i, val); + } + + return 0; +} + +static u32 +mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask) +{ + u32 val; + + val = wed_r32(dev, MTK_WED_EXT_INT_STATUS); + wed_w32(dev, MTK_WED_EXT_INT_STATUS, val); + val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK; + if (!dev->hw->num_flows) + val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; + if (val && net_ratelimit()) + pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val); + + val = wed_r32(dev, MTK_WED_INT_STATUS); + val &= mask; + wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */ + + return val; +} + +static void +mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask) +{ + if (!dev->running) + return; + + mtk_wed_set_ext_int(dev, !!mask); + wed_w32(dev, MTK_WED_INT_MASK, mask); +} + +int mtk_wed_flow_add(int index) +{ + struct mtk_wed_hw *hw = hw_list[index]; + int ret; + + if (!hw || !hw->wed_dev) + return -ENODEV; + + if (hw->num_flows) { + hw->num_flows++; + return 0; + } + + mutex_lock(&hw_lock); + if (!hw->wed_dev) { + ret = -ENODEV; + goto out; + } + + ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev); + if (!ret) + hw->num_flows++; + mtk_wed_set_ext_int(hw->wed_dev, true); + +out: + mutex_unlock(&hw_lock); + + return ret; +} + +void mtk_wed_flow_remove(int index) +{ + struct mtk_wed_hw *hw = hw_list[index]; + + if (!hw) + return; + + if (--hw->num_flows) + return; + + mutex_lock(&hw_lock); + if (!hw->wed_dev) + goto out; + + hw->wed_dev->wlan.offload_disable(hw->wed_dev); + mtk_wed_set_ext_int(hw->wed_dev, true); + +out: + mutex_unlock(&hw_lock); +} + +void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, + void __iomem *wdma, int index) +{ + static const struct mtk_wed_ops wed_ops = { + .attach = mtk_wed_attach, + .tx_ring_setup = mtk_wed_tx_ring_setup, + .txfree_ring_setup = mtk_wed_txfree_ring_setup, + .start = mtk_wed_start, + .stop = mtk_wed_stop, + .reset_dma = mtk_wed_reset_dma, + .reg_read = wed_r32, + .reg_write = wed_w32, + .irq_get = mtk_wed_irq_get, + .irq_set_mask = mtk_wed_irq_set_mask, + .detach = mtk_wed_detach, + }; + struct device_node *eth_np = eth->dev->of_node; + struct platform_device *pdev; + struct mtk_wed_hw *hw; + struct regmap *regs; + int irq; + + if (!np) + return; + + pdev = of_find_device_by_node(np); + if (!pdev) + return; + + get_device(&pdev->dev); + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return; + + regs = syscon_regmap_lookup_by_phandle(np, NULL); + if (IS_ERR(regs)) + return; + + rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops); + + mutex_lock(&hw_lock); + + if (WARN_ON(hw_list[index])) + goto unlock; + + hw = kzalloc(sizeof(*hw), GFP_KERNEL); + if (!hw) + goto unlock; + hw->node = np; + hw->regs = regs; + hw->eth = eth; + hw->dev = &pdev->dev; + hw->wdma = wdma; + hw->index = index; + hw->irq = irq; + hw->mirror = syscon_regmap_lookup_by_phandle(eth_np, + "mediatek,pcie-mirror"); + hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np, + "mediatek,hifsys"); + if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) { + kfree(hw); + goto unlock; + } + + if (!index) { + regmap_write(hw->mirror, 0, 0); + regmap_write(hw->mirror, 4, 0); + } + mtk_wed_hw_add_debugfs(hw); + + hw_list[index] = hw; + +unlock: + mutex_unlock(&hw_lock); +} + +void mtk_wed_exit(void) +{ + int i; + + rcu_assign_pointer(mtk_soc_wed_ops, NULL); + + synchronize_rcu(); + + for (i = 0; i < ARRAY_SIZE(hw_list); i++) { + struct mtk_wed_hw *hw; + + hw = hw_list[i]; + if (!hw) + continue; + + hw_list[i] = NULL; + debugfs_remove(hw->debugfs_dir); + put_device(hw->dev); + kfree(hw); + } +} diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h new file mode 100644 index 000000000000..981ec613f4b0 --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_wed.h @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */ + +#ifndef __MTK_WED_PRIV_H +#define __MTK_WED_PRIV_H + +#include <linux/soc/mediatek/mtk_wed.h> +#include <linux/debugfs.h> +#include <linux/regmap.h> +#include <linux/netdevice.h> + +struct mtk_eth; + +struct mtk_wed_hw { + struct device_node *node; + struct mtk_eth *eth; + struct regmap *regs; + struct regmap *hifsys; + struct device *dev; + void __iomem *wdma; + struct regmap *mirror; + struct dentry *debugfs_dir; + struct mtk_wed_device *wed_dev; + u32 debugfs_reg; + u32 num_flows; + char dirname[5]; + int irq; + int index; +}; + +struct mtk_wdma_info { + u8 wdma_idx; + u8 queue; + u16 wcid; + u8 bss; +}; + +#ifdef CONFIG_NET_MEDIATEK_SOC_WED +static inline void +wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val) +{ + regmap_write(dev->hw->regs, reg, val); +} + +static inline u32 +wed_r32(struct mtk_wed_device *dev, u32 reg) +{ + unsigned int val; + + regmap_read(dev->hw->regs, reg, &val); + + return val; +} + +static inline void +wdma_w32(struct mtk_wed_device *dev, u32 reg, u32 val) +{ + writel(val, dev->hw->wdma + reg); +} + +static inline u32 +wdma_r32(struct mtk_wed_device *dev, u32 reg) +{ + return readl(dev->hw->wdma + reg); +} + +static inline u32 +wpdma_tx_r32(struct mtk_wed_device *dev, int ring, u32 reg) +{ + if (!dev->tx_ring[ring].wpdma) + return 0; + + return readl(dev->tx_ring[ring].wpdma + reg); +} + +static inline void +wpdma_tx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val) +{ + if (!dev->tx_ring[ring].wpdma) + return; + + writel(val, dev->tx_ring[ring].wpdma + reg); +} + +static inline u32 +wpdma_txfree_r32(struct mtk_wed_device *dev, u32 reg) +{ + if (!dev->txfree_ring.wpdma) + return 0; + + return readl(dev->txfree_ring.wpdma + reg); +} + +static inline void +wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val) +{ + if (!dev->txfree_ring.wpdma) + return; + + writel(val, dev->txfree_ring.wpdma + reg); +} + +void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, + void __iomem *wdma, int index); +void mtk_wed_exit(void); +int mtk_wed_flow_add(int index); +void mtk_wed_flow_remove(int index); +#else +static inline void +mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, + void __iomem *wdma, int index) +{ +} +static inline void +mtk_wed_exit(void) +{ +} +static inline int mtk_wed_flow_add(int index) +{ + return -EINVAL; +} +static inline void mtk_wed_flow_remove(int index) +{ +} +#endif + +#ifdef CONFIG_DEBUG_FS +void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw); +#else +static inline void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw) +{ +} +#endif + +#endif diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c new file mode 100644 index 000000000000..a81d3fd1a439 --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */ + +#include <linux/seq_file.h> +#include "mtk_wed.h" +#include "mtk_wed_regs.h" + +struct reg_dump { + const char *name; + u16 offset; + u8 type; + u8 base; +}; + +enum { + DUMP_TYPE_STRING, + DUMP_TYPE_WED, + DUMP_TYPE_WDMA, + DUMP_TYPE_WPDMA_TX, + DUMP_TYPE_WPDMA_TXFREE, +}; + +#define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING } +#define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ } +#define DUMP_RING(_prefix, _base, ...) \ + { _prefix " BASE", _base, __VA_ARGS__ }, \ + { _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \ + { _prefix " CIDX", _base + 0x8, __VA_ARGS__ }, \ + { _prefix " DIDX", _base + 0xc, __VA_ARGS__ } + +#define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED) +#define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED) + +#define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA) +#define DUMP_WDMA_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WDMA) + +#define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n) +#define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE) + +static void +print_reg_val(struct seq_file *s, const char *name, u32 val) +{ + seq_printf(s, "%-32s %08x\n", name, val); +} + +static void +dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev, + const struct reg_dump *regs, int n_regs) +{ + const struct reg_dump *cur; + u32 val; + + for (cur = regs; cur < ®s[n_regs]; cur++) { + switch (cur->type) { + case DUMP_TYPE_STRING: + seq_printf(s, "%s======== %s:\n", + cur > regs ? "\n" : "", + cur->name); + continue; + case DUMP_TYPE_WED: + val = wed_r32(dev, cur->offset); + break; + case DUMP_TYPE_WDMA: + val = wdma_r32(dev, cur->offset); + break; + case DUMP_TYPE_WPDMA_TX: + val = wpdma_tx_r32(dev, cur->base, cur->offset); + break; + case DUMP_TYPE_WPDMA_TXFREE: + val = wpdma_txfree_r32(dev, cur->offset); + break; + } + print_reg_val(s, cur->name, val); + } +} + + +static int +wed_txinfo_show(struct seq_file *s, void *data) +{ + static const struct reg_dump regs[] = { + DUMP_STR("WED TX"), + DUMP_WED(WED_TX_MIB(0)), + DUMP_WED_RING(WED_RING_TX(0)), + + DUMP_WED(WED_TX_MIB(1)), + DUMP_WED_RING(WED_RING_TX(1)), + + DUMP_STR("WPDMA TX"), + DUMP_WED(WED_WPDMA_TX_MIB(0)), + DUMP_WED_RING(WED_WPDMA_RING_TX(0)), + DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(0)), + + DUMP_WED(WED_WPDMA_TX_MIB(1)), + DUMP_WED_RING(WED_WPDMA_RING_TX(1)), + DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(1)), + + DUMP_STR("WPDMA TX"), + DUMP_WPDMA_TX_RING(0), + DUMP_WPDMA_TX_RING(1), + + DUMP_STR("WED WDMA RX"), + DUMP_WED(WED_WDMA_RX_MIB(0)), + DUMP_WED_RING(WED_WDMA_RING_RX(0)), + DUMP_WED(WED_WDMA_RX_THRES(0)), + DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(0)), + DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(0)), + + DUMP_WED(WED_WDMA_RX_MIB(1)), + DUMP_WED_RING(WED_WDMA_RING_RX(1)), + DUMP_WED(WED_WDMA_RX_THRES(1)), + DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(1)), + DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(1)), + + DUMP_STR("WDMA RX"), + DUMP_WDMA(WDMA_GLO_CFG), + DUMP_WDMA_RING(WDMA_RING_RX(0)), + DUMP_WDMA_RING(WDMA_RING_RX(1)), + }; + struct mtk_wed_hw *hw = s->private; + struct mtk_wed_device *dev = hw->wed_dev; + + if (!dev) + return 0; + + dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs)); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(wed_txinfo); + + +static int +mtk_wed_reg_set(void *data, u64 val) +{ + struct mtk_wed_hw *hw = data; + + regmap_write(hw->regs, hw->debugfs_reg, val); + + return 0; +} + +static int +mtk_wed_reg_get(void *data, u64 *val) +{ + struct mtk_wed_hw *hw = data; + unsigned int regval; + int ret; + + ret = regmap_read(hw->regs, hw->debugfs_reg, ®val); + if (ret) + return ret; + + *val = regval; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mtk_wed_reg_get, mtk_wed_reg_set, + "0x%08llx\n"); + +void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw) +{ + struct dentry *dir; + + snprintf(hw->dirname, sizeof(hw->dirname), "wed%d", hw->index); + dir = debugfs_create_dir(hw->dirname, NULL); + if (!dir) + return; + + hw->debugfs_dir = dir; + debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg); + debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval); + debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops); +} diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ops.c b/drivers/net/ethernet/mediatek/mtk_wed_ops.c new file mode 100644 index 000000000000..a5d9d8a5bce2 --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_wed_ops.c @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */ + +#include <linux/kernel.h> +#include <linux/soc/mediatek/mtk_wed.h> + +const struct mtk_wed_ops __rcu *mtk_soc_wed_ops; +EXPORT_SYMBOL_GPL(mtk_soc_wed_ops); diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h new file mode 100644 index 000000000000..0a0465ea58b4 --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h @@ -0,0 +1,251 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */ + +#ifndef __MTK_WED_REGS_H +#define __MTK_WED_REGS_H + +#define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0) +#define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15) +#define MTK_WDMA_DESC_CTRL_BURST BIT(16) +#define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16) +#define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30) +#define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31) + +struct mtk_wdma_desc { + __le32 buf0; + __le32 ctrl; + __le32 buf1; + __le32 info; +} __packed __aligned(4); + +#define MTK_WED_RESET 0x008 +#define MTK_WED_RESET_TX_BM BIT(0) +#define MTK_WED_RESET_TX_FREE_AGENT BIT(4) +#define MTK_WED_RESET_WPDMA_TX_DRV BIT(8) +#define MTK_WED_RESET_WPDMA_RX_DRV BIT(9) +#define MTK_WED_RESET_WPDMA_INT_AGENT BIT(11) +#define MTK_WED_RESET_WED_TX_DMA BIT(12) +#define MTK_WED_RESET_WDMA_RX_DRV BIT(17) +#define MTK_WED_RESET_WDMA_INT_AGENT BIT(19) +#define MTK_WED_RESET_WED BIT(31) + +#define MTK_WED_CTRL 0x00c +#define MTK_WED_CTRL_WPDMA_INT_AGENT_EN BIT(0) +#define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY BIT(1) +#define MTK_WED_CTRL_WDMA_INT_AGENT_EN BIT(2) +#define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3) +#define MTK_WED_CTRL_WED_TX_BM_EN BIT(8) +#define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9) +#define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10) +#define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11) +#define MTK_WED_CTRL_RESERVE_EN BIT(12) +#define MTK_WED_CTRL_RESERVE_BUSY BIT(13) +#define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24) +#define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28) + +#define MTK_WED_EXT_INT_STATUS 0x020 +#define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR BIT(0) +#define MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD BIT(1) +#define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4) +#define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8) +#define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9) +#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12) +#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13) +#define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16) +#define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17) +#define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18) +#define MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN BIT(19) +#define MTK_WED_EXT_INT_STATUS_RX_DRV_BM_DMAD_COHERENT BIT(20) +#define MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR BIT(21) +#define MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR BIT(22) +#define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE BIT(24) +#define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \ + MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \ + MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \ + MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \ + MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \ + MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \ + MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR | \ + MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR) + +#define MTK_WED_EXT_INT_MASK 0x028 + +#define MTK_WED_STATUS 0x060 +#define MTK_WED_STATUS_TX GENMASK(15, 8) + +#define MTK_WED_TX_BM_CTRL 0x080 +#define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0) +#define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16) +#define MTK_WED_TX_BM_CTRL_PAUSE BIT(28) + +#define MTK_WED_TX_BM_BASE 0x084 + +#define MTK_WED_TX_BM_TKID 0x088 +#define MTK_WED_TX_BM_TKID_START GENMASK(15, 0) +#define MTK_WED_TX_BM_TKID_END GENMASK(31, 16) + +#define MTK_WED_TX_BM_BUF_LEN 0x08c + +#define MTK_WED_TX_BM_INTF 0x09c +#define MTK_WED_TX_BM_INTF_TKID GENMASK(15, 0) +#define MTK_WED_TX_BM_INTF_TKFIFO_FDEP GENMASK(23, 16) +#define MTK_WED_TX_BM_INTF_TKID_VALID BIT(28) +#define MTK_WED_TX_BM_INTF_TKID_READ BIT(29) + +#define MTK_WED_TX_BM_DYN_THR 0x0a0 +#define MTK_WED_TX_BM_DYN_THR_LO GENMASK(6, 0) +#define MTK_WED_TX_BM_DYN_THR_HI GENMASK(22, 16) + +#define MTK_WED_INT_STATUS 0x200 +#define MTK_WED_INT_MASK 0x204 + +#define MTK_WED_GLO_CFG 0x208 +#define MTK_WED_GLO_CFG_TX_DMA_EN BIT(0) +#define MTK_WED_GLO_CFG_TX_DMA_BUSY BIT(1) +#define MTK_WED_GLO_CFG_RX_DMA_EN BIT(2) +#define MTK_WED_GLO_CFG_RX_DMA_BUSY BIT(3) +#define MTK_WED_GLO_CFG_RX_BT_SIZE GENMASK(5, 4) +#define MTK_WED_GLO_CFG_TX_WB_DDONE BIT(6) +#define MTK_WED_GLO_CFG_BIG_ENDIAN BIT(7) +#define MTK_WED_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8) +#define MTK_WED_GLO_CFG_TX_BT_SIZE_LO BIT(9) +#define MTK_WED_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10) +#define MTK_WED_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12) +#define MTK_WED_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13) +#define MTK_WED_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22) +#define MTK_WED_GLO_CFG_SW_RESET BIT(24) +#define MTK_WED_GLO_CFG_FIRST_TOKEN_ONLY BIT(26) +#define MTK_WED_GLO_CFG_OMIT_RX_INFO BIT(27) +#define MTK_WED_GLO_CFG_OMIT_TX_INFO BIT(28) +#define MTK_WED_GLO_CFG_BYTE_SWAP BIT(29) +#define MTK_WED_GLO_CFG_RX_2B_OFFSET BIT(31) + +#define MTK_WED_RESET_IDX 0x20c +#define MTK_WED_RESET_IDX_TX GENMASK(3, 0) +#define MTK_WED_RESET_IDX_RX GENMASK(17, 16) + +#define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4) + +#define MTK_WED_RING_TX(_n) (0x300 + (_n) * 0x10) + +#define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10) + +#define MTK_WED_WPDMA_INT_TRIGGER 0x504 +#define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1) +#define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4) + +#define MTK_WED_WPDMA_GLO_CFG 0x508 +#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN BIT(0) +#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY BIT(1) +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN BIT(2) +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY BIT(3) +#define MTK_WED_WPDMA_GLO_CFG_RX_BT_SIZE GENMASK(5, 4) +#define MTK_WED_WPDMA_GLO_CFG_TX_WB_DDONE BIT(6) +#define MTK_WED_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7) +#define MTK_WED_WPDMA_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8) +#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_LO BIT(9) +#define MTK_WED_WPDMA_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10) +#define MTK_WED_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12) +#define MTK_WED_WPDMA_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13) +#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22) +#define MTK_WED_WPDMA_GLO_CFG_SW_RESET BIT(24) +#define MTK_WED_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY BIT(26) +#define MTK_WED_WPDMA_GLO_CFG_OMIT_RX_INFO BIT(27) +#define MTK_WED_WPDMA_GLO_CFG_OMIT_TX_INFO BIT(28) +#define MTK_WED_WPDMA_GLO_CFG_BYTE_SWAP BIT(29) +#define MTK_WED_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31) + +#define MTK_WED_WPDMA_RESET_IDX 0x50c +#define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0) +#define MTK_WED_WPDMA_RESET_IDX_RX GENMASK(17, 16) + +#define MTK_WED_WPDMA_INT_CTRL 0x520 +#define MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV BIT(21) + +#define MTK_WED_WPDMA_INT_MASK 0x524 + +#define MTK_WED_PCIE_CFG_BASE 0x560 + +#define MTK_WED_PCIE_INT_TRIGGER 0x570 +#define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16) + +#define MTK_WED_WPDMA_CFG_BASE 0x580 + +#define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4) +#define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4) + +#define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10) +#define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10) +#define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10) +#define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4) + +#define MTK_WED_WDMA_GLO_CFG 0xa04 +#define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0) +#define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2) +#define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3) +#define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4) +#define MTK_WED_WDMA_GLO_CFG_TX_WB_DDONE BIT(6) +#define MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE BIT(13) +#define MTK_WED_WDMA_GLO_CFG_WCOMPLETE_SEL BIT(16) +#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_RXDMA_BYPASS BIT(17) +#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_BYPASS BIT(18) +#define MTK_WED_WDMA_GLO_CFG_FSM_RETURN_IDLE BIT(19) +#define MTK_WED_WDMA_GLO_CFG_WAIT_COHERENT BIT(20) +#define MTK_WED_WDMA_GLO_CFG_AXI_W_AFTER_AW BIT(21) +#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY_SINGLE_W BIT(22) +#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY BIT(23) +#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP BIT(24) +#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE BIT(25) +#define MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE BIT(26) +#define MTK_WED_WDMA_GLO_CFG_RXDRV_CLKGATE_BYPASS BIT(30) + +#define MTK_WED_WDMA_RESET_IDX 0xa08 +#define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16) +#define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24) + +#define MTK_WED_WDMA_INT_TRIGGER 0xa28 +#define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16) + +#define MTK_WED_WDMA_INT_CTRL 0xa2c +#define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16) + +#define MTK_WED_WDMA_OFFSET0 0xaa4 +#define MTK_WED_WDMA_OFFSET1 0xaa8 + +#define MTK_WED_WDMA_RX_MIB(_n) (0xae0 + (_n) * 4) +#define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4) +#define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4) + +#define MTK_WED_RING_OFS_BASE 0x00 +#define MTK_WED_RING_OFS_COUNT 0x04 +#define MTK_WED_RING_OFS_CPU_IDX 0x08 +#define MTK_WED_RING_OFS_DMA_IDX 0x0c + +#define MTK_WDMA_RING_RX(_n) (0x100 + (_n) * 0x10) + +#define MTK_WDMA_GLO_CFG 0x204 +#define MTK_WDMA_GLO_CFG_RX_INFO_PRERES GENMASK(28, 26) + +#define MTK_WDMA_RESET_IDX 0x208 +#define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0) +#define MTK_WDMA_RESET_IDX_RX GENMASK(17, 16) + +#define MTK_WDMA_INT_MASK 0x228 +#define MTK_WDMA_INT_MASK_TX_DONE GENMASK(3, 0) +#define MTK_WDMA_INT_MASK_RX_DONE GENMASK(17, 16) +#define MTK_WDMA_INT_MASK_TX_DELAY BIT(28) +#define MTK_WDMA_INT_MASK_TX_COHERENT BIT(29) +#define MTK_WDMA_INT_MASK_RX_DELAY BIT(30) +#define MTK_WDMA_INT_MASK_RX_COHERENT BIT(31) + +#define MTK_WDMA_INT_GRP1 0x250 +#define MTK_WDMA_INT_GRP2 0x254 + +#define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0) +#define MTK_PCIE_MIRROR_MAP_EN BIT(0) +#define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1) + +/* DMA channel mapping */ +#define HIFSYS_DMA_AG_MAP 0x008 + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 4ba1a78c6515..bfc0cd5ec423 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -16,13 +16,9 @@ config MLX5_CORE Core driver for low level functionality of the ConnectX-4 and Connect-IB cards by Mellanox Technologies. -config MLX5_ACCEL - bool - config MLX5_FPGA bool "Mellanox Technologies Innova support" depends on MLX5_CORE - select MLX5_ACCEL help Build support for the Innova family of network cards by Mellanox Technologies. Innova network cards are comprised of a ConnectX chip @@ -143,71 +139,21 @@ config MLX5_CORE_IPOIB help MLX5 IPoIB offloads & acceleration support. -config MLX5_FPGA_IPSEC - bool "Mellanox Technologies IPsec Innova support" - depends on MLX5_CORE - depends on MLX5_FPGA - help - Build IPsec support for the Innova family of network cards by Mellanox - Technologies. Innova network cards are comprised of a ConnectX chip - and an FPGA chip on one board. If you select this option, the - mlx5_core driver will include the Innova FPGA core and allow building - sandbox-specific client drivers. - -config MLX5_IPSEC - bool "Mellanox Technologies IPsec Connect-X support" - depends on MLX5_CORE_EN - depends on XFRM_OFFLOAD - depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD - select MLX5_ACCEL - help - Build IPsec support for the Connect-X family of network cards by Mellanox - Technologies. - Note: If you select this option, the mlx5_core driver will include - IPsec support for the Connect-X family. - config MLX5_EN_IPSEC - bool "IPSec XFRM cryptography-offload acceleration" + bool "Mellanox Technologies IPsec Connect-X support" depends on MLX5_CORE_EN depends on XFRM_OFFLOAD depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD - depends on MLX5_FPGA_IPSEC || MLX5_IPSEC help Build support for IPsec cryptography-offload acceleration in the NIC. - Note: Support for hardware with this capability needs to be selected - for this option to become available. - -config MLX5_FPGA_TLS - bool "Mellanox Technologies TLS Innova support" - depends on TLS_DEVICE - depends on TLS=y || MLX5_CORE=m - depends on MLX5_CORE_EN - depends on MLX5_FPGA - select MLX5_EN_TLS - help - Build TLS support for the Innova family of network cards by Mellanox - Technologies. Innova network cards are comprised of a ConnectX chip - and an FPGA chip on one board. If you select this option, the - mlx5_core driver will include the Innova FPGA core and allow building - sandbox-specific client drivers. -config MLX5_TLS +config MLX5_EN_TLS bool "Mellanox Technologies TLS Connect-X support" depends on TLS_DEVICE depends on TLS=y || MLX5_CORE=m depends on MLX5_CORE_EN - select MLX5_ACCEL - select MLX5_EN_TLS - help - Build TLS support for the Connect-X family of network cards by Mellanox - Technologies. - -config MLX5_EN_TLS - bool help Build support for TLS cryptography-offload acceleration in the NIC. - Note: Support for hardware with this capability needs to be selected - for this option to become available. config MLX5_SW_STEERING bool "Mellanox Technologies software-managed steering" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 4bc666714a35..81620c25c77e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -28,7 +28,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en/rqt.o en/tir.o en/rss.o en/rx_res.o \ en_selftest.o en/port.o en/monitor_stats.o en/health.o \ en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \ en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \ - en/qos.o en/trap.o en/fs_tt_redirect.o en/selq.o + en/qos.o en/trap.o en/fs_tt_redirect.o en/selq.o lib/crypto.o # # Netdev extra @@ -88,17 +88,13 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib # # Accelerations & FPGA # -mlx5_core-$(CONFIG_MLX5_IPSEC) += accel/ipsec_offload.o -mlx5_core-$(CONFIG_MLX5_FPGA_IPSEC) += fpga/ipsec.o -mlx5_core-$(CONFIG_MLX5_FPGA_TLS) += fpga/tls.o -mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o accel/tls.o accel/ipsec.o - mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \ - en_accel/ipsec_stats.o en_accel/ipsec_fs.o + en_accel/ipsec_stats.o en_accel/ipsec_fs.o \ + en_accel/ipsec_offload.o -mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o \ +mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/ktls_stats.o \ en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \ en_accel/ktls_tx.o en_accel/ktls_rx.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h deleted file mode 100644 index 82b185121edb..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h +++ /dev/null @@ -1,36 +0,0 @@ -#ifndef __MLX5E_ACCEL_H__ -#define __MLX5E_ACCEL_H__ - -#ifdef CONFIG_MLX5_ACCEL - -#include <linux/skbuff.h> -#include <linux/netdevice.h> - -static inline bool is_metadata_hdr_valid(struct sk_buff *skb) -{ - __be16 *ethtype; - - if (unlikely(skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN)) - return false; - ethtype = (__be16 *)(skb->data + ETH_ALEN * 2); - if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE)) - return false; - return true; -} - -static inline void remove_metadata_hdr(struct sk_buff *skb) -{ - struct ethhdr *old_eth; - struct ethhdr *new_eth; - - /* Remove the metadata from the buffer */ - old_eth = (struct ethhdr *)skb->data; - new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN); - memmove(new_eth, old_eth, 2 * ETH_ALEN); - /* Ethertype is already in its new place */ - skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN); -} - -#endif /* CONFIG_MLX5_ACCEL */ - -#endif /* __MLX5E_EN_ACCEL_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c deleted file mode 100644 index 09f5ce97af46..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#include <linux/mlx5/device.h> - -#include "accel/ipsec.h" -#include "mlx5_core.h" -#include "fpga/ipsec.h" -#include "accel/ipsec_offload.h" - -void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops; - int err = 0; - - ipsec_ops = (mlx5_ipsec_offload_ops(mdev)) ? - mlx5_ipsec_offload_ops(mdev) : - mlx5_fpga_ipsec_ops(mdev); - - if (!ipsec_ops || !ipsec_ops->init) { - mlx5_core_dbg(mdev, "IPsec ops is not supported\n"); - return; - } - - err = ipsec_ops->init(mdev); - if (err) { - mlx5_core_warn_once(mdev, "Failed to start IPsec device, err = %d\n", err); - return; - } - - mdev->ipsec_ops = ipsec_ops; -} - -void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; - - if (!ipsec_ops || !ipsec_ops->cleanup) - return; - - ipsec_ops->cleanup(mdev); -} - -u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; - - if (!ipsec_ops || !ipsec_ops->device_caps) - return 0; - - return ipsec_ops->device_caps(mdev); -} -EXPORT_SYMBOL_GPL(mlx5_accel_ipsec_device_caps); - -unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; - - if (!ipsec_ops || !ipsec_ops->counters_count) - return -EOPNOTSUPP; - - return ipsec_ops->counters_count(mdev); -} - -int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, - unsigned int count) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; - - if (!ipsec_ops || !ipsec_ops->counters_read) - return -EOPNOTSUPP; - - return ipsec_ops->counters_read(mdev, counters, count); -} - -void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, - struct mlx5_accel_esp_xfrm *xfrm, - u32 *sa_handle) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; - __be32 saddr[4] = {}, daddr[4] = {}; - - if (!ipsec_ops || !ipsec_ops->create_hw_context) - return ERR_PTR(-EOPNOTSUPP); - - if (!xfrm->attrs.is_ipv6) { - saddr[3] = xfrm->attrs.saddr.a4; - daddr[3] = xfrm->attrs.daddr.a4; - } else { - memcpy(saddr, xfrm->attrs.saddr.a6, sizeof(saddr)); - memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr)); - } - - return ipsec_ops->create_hw_context(mdev, xfrm, saddr, daddr, xfrm->attrs.spi, - xfrm->attrs.is_ipv6, sa_handle); -} - -void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; - - if (!ipsec_ops || !ipsec_ops->free_hw_context) - return; - - ipsec_ops->free_hw_context(context); -} - -struct mlx5_accel_esp_xfrm * -mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, - const struct mlx5_accel_esp_xfrm_attrs *attrs, - u32 flags) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; - struct mlx5_accel_esp_xfrm *xfrm; - - if (!ipsec_ops || !ipsec_ops->esp_create_xfrm) - return ERR_PTR(-EOPNOTSUPP); - - xfrm = ipsec_ops->esp_create_xfrm(mdev, attrs, flags); - if (IS_ERR(xfrm)) - return xfrm; - - xfrm->mdev = mdev; - return xfrm; -} -EXPORT_SYMBOL_GPL(mlx5_accel_esp_create_xfrm); - -void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops; - - if (!ipsec_ops || !ipsec_ops->esp_destroy_xfrm) - return; - - ipsec_ops->esp_destroy_xfrm(xfrm); -} -EXPORT_SYMBOL_GPL(mlx5_accel_esp_destroy_xfrm); - -int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, - const struct mlx5_accel_esp_xfrm_attrs *attrs) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops; - - if (!ipsec_ops || !ipsec_ops->esp_modify_xfrm) - return -EOPNOTSUPP; - - return ipsec_ops->esp_modify_xfrm(xfrm, attrs); -} -EXPORT_SYMBOL_GPL(mlx5_accel_esp_modify_xfrm); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h deleted file mode 100644 index fbb9c5415d53..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#ifndef __MLX5_ACCEL_IPSEC_H__ -#define __MLX5_ACCEL_IPSEC_H__ - -#include <linux/mlx5/driver.h> -#include <linux/mlx5/accel.h> - -#ifdef CONFIG_MLX5_ACCEL - -#define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \ - MLX5_ACCEL_IPSEC_CAP_DEVICE) - -unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev); -int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, - unsigned int count); - -void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, - struct mlx5_accel_esp_xfrm *xfrm, - u32 *sa_handle); -void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context); - -void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev); -void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev); - -struct mlx5_accel_ipsec_ops { - u32 (*device_caps)(struct mlx5_core_dev *mdev); - unsigned int (*counters_count)(struct mlx5_core_dev *mdev); - int (*counters_read)(struct mlx5_core_dev *mdev, u64 *counters, unsigned int count); - void* (*create_hw_context)(struct mlx5_core_dev *mdev, - struct mlx5_accel_esp_xfrm *xfrm, - const __be32 saddr[4], const __be32 daddr[4], - const __be32 spi, bool is_ipv6, u32 *sa_handle); - void (*free_hw_context)(void *context); - int (*init)(struct mlx5_core_dev *mdev); - void (*cleanup)(struct mlx5_core_dev *mdev); - struct mlx5_accel_esp_xfrm* (*esp_create_xfrm)(struct mlx5_core_dev *mdev, - const struct mlx5_accel_esp_xfrm_attrs *attrs, - u32 flags); - int (*esp_modify_xfrm)(struct mlx5_accel_esp_xfrm *xfrm, - const struct mlx5_accel_esp_xfrm_attrs *attrs); - void (*esp_destroy_xfrm)(struct mlx5_accel_esp_xfrm *xfrm); -}; - -#else - -#define MLX5_IPSEC_DEV(mdev) false - -static inline void * -mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, - struct mlx5_accel_esp_xfrm *xfrm, - u32 *sa_handle) -{ - return NULL; -} - -static inline void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context) {} - -static inline void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) {} - -static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) {} - -#endif /* CONFIG_MLX5_ACCEL */ - -#endif /* __MLX5_ACCEL_IPSEC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h deleted file mode 100644 index 970c66d19c1d..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h +++ /dev/null @@ -1,38 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ -/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ - -#ifndef __MLX5_IPSEC_OFFLOAD_H__ -#define __MLX5_IPSEC_OFFLOAD_H__ - -#include <linux/mlx5/driver.h> -#include "accel/ipsec.h" - -#ifdef CONFIG_MLX5_IPSEC - -const struct mlx5_accel_ipsec_ops *mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev); -static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev) -{ - if (!MLX5_CAP_GEN(mdev, ipsec_offload)) - return false; - - if (!MLX5_CAP_GEN(mdev, log_max_dek)) - return false; - - if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) & - MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC)) - return false; - - return MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) && - MLX5_CAP_ETH(mdev, insert_trailer); -} - -#else -static inline const struct mlx5_accel_ipsec_ops * -mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev) { return NULL; } -static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev) -{ - return false; -} - -#endif /* CONFIG_MLX5_IPSEC */ -#endif /* __MLX5_IPSEC_OFFLOAD_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c deleted file mode 100644 index 6c2b86a26863..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#include <linux/mlx5/device.h> - -#include "accel/tls.h" -#include "mlx5_core.h" -#include "lib/mlx5.h" - -#ifdef CONFIG_MLX5_FPGA_TLS -#include "fpga/tls.h" - -int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, - struct tls_crypto_info *crypto_info, - u32 start_offload_tcp_sn, u32 *p_swid, - bool direction_sx) -{ - return mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, - start_offload_tcp_sn, p_swid, - direction_sx); -} - -void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, - bool direction_sx) -{ - mlx5_fpga_tls_del_flow(mdev, swid, GFP_KERNEL, direction_sx); -} - -int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle, - u32 seq, __be64 rcd_sn) -{ - return mlx5_fpga_tls_resync_rx(mdev, handle, seq, rcd_sn); -} - -bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) -{ - return mlx5_fpga_is_tls_device(mdev) || - mlx5_accel_is_ktls_device(mdev); -} - -u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) -{ - return mlx5_fpga_tls_device_caps(mdev); -} - -int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) -{ - return mlx5_fpga_tls_init(mdev); -} - -void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev) -{ - mlx5_fpga_tls_cleanup(mdev); -} -#endif - -#ifdef CONFIG_MLX5_TLS -int mlx5_ktls_create_key(struct mlx5_core_dev *mdev, - struct tls_crypto_info *crypto_info, - u32 *p_key_id) -{ - u32 sz_bytes; - void *key; - - switch (crypto_info->cipher_type) { - case TLS_CIPHER_AES_GCM_128: { - struct tls12_crypto_info_aes_gcm_128 *info = - (struct tls12_crypto_info_aes_gcm_128 *)crypto_info; - - key = info->key; - sz_bytes = sizeof(info->key); - break; - } - case TLS_CIPHER_AES_GCM_256: { - struct tls12_crypto_info_aes_gcm_256 *info = - (struct tls12_crypto_info_aes_gcm_256 *)crypto_info; - - key = info->key; - sz_bytes = sizeof(info->key); - break; - } - default: - return -EINVAL; - } - - return mlx5_create_encryption_key(mdev, key, sz_bytes, - MLX5_ACCEL_OBJ_TLS_KEY, - p_key_id); -} - -void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id) -{ - mlx5_destroy_encryption_key(mdev, key_id); -} -#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h deleted file mode 100644 index fd874f0c380a..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#ifndef __MLX5_ACCEL_TLS_H__ -#define __MLX5_ACCEL_TLS_H__ - -#include <linux/mlx5/driver.h> -#include <linux/tls.h> - -#ifdef CONFIG_MLX5_TLS -int mlx5_ktls_create_key(struct mlx5_core_dev *mdev, - struct tls_crypto_info *crypto_info, - u32 *p_key_id); -void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id); - -static inline bool mlx5_accel_is_ktls_tx(struct mlx5_core_dev *mdev) -{ - return MLX5_CAP_GEN(mdev, tls_tx); -} - -static inline bool mlx5_accel_is_ktls_rx(struct mlx5_core_dev *mdev) -{ - return MLX5_CAP_GEN(mdev, tls_rx); -} - -static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev) -{ - if (!mlx5_accel_is_ktls_tx(mdev) && - !mlx5_accel_is_ktls_rx(mdev)) - return false; - - if (!MLX5_CAP_GEN(mdev, log_max_dek)) - return false; - - return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128); -} - -static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev, - struct tls_crypto_info *crypto_info) -{ - switch (crypto_info->cipher_type) { - case TLS_CIPHER_AES_GCM_128: - if (crypto_info->version == TLS_1_2_VERSION) - return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128); - break; - } - - return false; -} -#else -static inline bool mlx5_accel_is_ktls_tx(struct mlx5_core_dev *mdev) -{ return false; } - -static inline bool mlx5_accel_is_ktls_rx(struct mlx5_core_dev *mdev) -{ return false; } - -static inline int -mlx5_ktls_create_key(struct mlx5_core_dev *mdev, - struct tls_crypto_info *crypto_info, - u32 *p_key_id) { return -ENOTSUPP; } -static inline void -mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id) {} - -static inline bool -mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev) { return false; } -static inline bool -mlx5e_ktls_type_check(struct mlx5_core_dev *mdev, - struct tls_crypto_info *crypto_info) { return false; } -#endif - -enum { - MLX5_ACCEL_TLS_TX = BIT(0), - MLX5_ACCEL_TLS_RX = BIT(1), - MLX5_ACCEL_TLS_V12 = BIT(2), - MLX5_ACCEL_TLS_V13 = BIT(3), - MLX5_ACCEL_TLS_LRO = BIT(4), - MLX5_ACCEL_TLS_IPV6 = BIT(5), - MLX5_ACCEL_TLS_AES_GCM128 = BIT(30), - MLX5_ACCEL_TLS_AES_GCM256 = BIT(31), -}; - -struct mlx5_ifc_tls_flow_bits { - u8 src_port[0x10]; - u8 dst_port[0x10]; - union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6; - union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6; - u8 ipv6[0x1]; - u8 direction_sx[0x1]; - u8 reserved_at_2[0x1e]; -}; - -#ifdef CONFIG_MLX5_FPGA_TLS -int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, - struct tls_crypto_info *crypto_info, - u32 start_offload_tcp_sn, u32 *p_swid, - bool direction_sx); -void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, - bool direction_sx); -int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle, - u32 seq, __be64 rcd_sn); -bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev); -u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev); -int mlx5_accel_tls_init(struct mlx5_core_dev *mdev); -void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev); - -#else - -static inline int -mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, - struct tls_crypto_info *crypto_info, - u32 start_offload_tcp_sn, u32 *p_swid, - bool direction_sx) { return -ENOTSUPP; } -static inline void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, - bool direction_sx) { } -static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle, - u32 seq, __be64 rcd_sn) { return 0; } -static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) -{ - return mlx5_accel_is_ktls_device(mdev); -} -static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; } -static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; } -static inline void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev) { } -#endif - -#endif /* __MLX5_ACCEL_TLS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index 057dde6f4417..e8789e6d7e7b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -584,14 +584,6 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink) struct mlx5_core_dev *dev = devlink_priv(devlink); union devlink_param_value value; - if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS) - strcpy(value.vstr, "dmfs"); - else - strcpy(value.vstr, "smfs"); - devlink_param_driverinit_value_set(devlink, - MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE, - value); - value.vbool = MLX5_CAP_GEN(dev, roce); devlink_param_driverinit_value_set(devlink, DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, @@ -602,18 +594,6 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink) devlink_param_driverinit_value_set(devlink, MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM, value); - - if (MLX5_ESWITCH_MANAGER(dev)) { - if (mlx5_esw_vport_match_metadata_supported(dev->priv.eswitch)) { - dev->priv.eswitch->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA; - value.vbool = true; - } else { - value.vbool = false; - } - devlink_param_driverinit_value_set(devlink, - MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA, - value); - } #endif value.vu32 = MLX5_COMP_EQ_SIZE; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c index 7841ef6c193c..c5bb79a4fa57 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c @@ -259,6 +259,9 @@ const char *parse_fs_dst(struct trace_seq *p, case MLX5_FLOW_DESTINATION_TYPE_PORT: trace_seq_printf(p, "port\n"); break; + case MLX5_FLOW_DESTINATION_TYPE_NONE: + trace_seq_printf(p, "none\n"); + break; } trace_seq_putc(p, 0); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 8653ac0fd865..b90902db7819 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -354,7 +354,6 @@ enum { MLX5E_RQ_STATE_AM, MLX5E_RQ_STATE_NO_CSUM_COMPLETE, MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */ - MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */ MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, /* set when mini_cqe_resp_stride_index cap is used */ MLX5E_RQ_STATE_SHAMPO, /* set when SHAMPO cap is used */ }; @@ -649,8 +648,8 @@ typedef struct sk_buff * (*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, u16 cqe_bcnt, u32 head_offset, u32 page_idx); typedef struct sk_buff * -(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, - struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt); +(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, + u32 cqe_bcnt); typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq); typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16); typedef void (*mlx5e_fp_shampo_dealloc_hd)(struct mlx5e_rq*, u16, u16, bool); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h index 9976de8b9047..b59aee75de94 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h @@ -40,13 +40,11 @@ struct mlx5e_dcbx_dp { }; void mlx5e_dcbnl_build_netdev(struct net_device *netdev); -void mlx5e_dcbnl_build_rep_netdev(struct net_device *netdev); void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv); void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv); void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv); #else static inline void mlx5e_dcbnl_build_netdev(struct net_device *netdev) {} -static inline void mlx5e_dcbnl_build_rep_netdev(struct net_device *netdev) {} static inline void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv) {} static inline void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv) {} static inline void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv) {} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 08fd1370a8b0..1e8700957280 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -5,8 +5,7 @@ #include "en/txrx.h" #include "en/port.h" #include "en_accel/en_accel.h" -#include "accel/ipsec.h" -#include "fpga/ipsec.h" +#include "en_accel/ipsec_offload.h" static bool mlx5e_rx_is_xdp(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) @@ -207,7 +206,7 @@ u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *par bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE); u16 stop_room; - stop_room = mlx5e_tls_get_stop_room(mdev, params); + stop_room = mlx5e_ktls_get_stop_room(mdev, params); stop_room += mlx5e_stop_room_for_max_wqe(mdev); if (is_mpwqe) /* A MPWQE can take up to the maximum-sized WQE + all the normal @@ -327,9 +326,6 @@ bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, if (!mlx5e_check_fragmented_striding_rq_cap(mdev)) return false; - if (mlx5_fpga_is_ipsec_device(mdev)) - return false; - if (params->xdp_prog) { /* XSK params are not considered here. If striding RQ is in use, * and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will @@ -423,9 +419,6 @@ static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev, int max_mtu; int i; - if (mlx5_fpga_is_ipsec_device(mdev)) - byte_count += MLX5E_METADATA_ETHER_LEN; - if (mlx5e_rx_is_linear_skb(params, xsk)) { int frag_stride; @@ -696,8 +689,8 @@ void mlx5e_build_sq_param(struct mlx5_core_dev *mdev, void *wq = MLX5_ADDR_OF(sqc, sqc, wq); bool allow_swp; - allow_swp = mlx5_geneve_tx_allowed(mdev) || - !!MLX5_IPSEC_DEV(mdev); + allow_swp = + mlx5_geneve_tx_allowed(mdev) || !!mlx5_ipsec_device_caps(mdev); mlx5e_build_sq_param_common(mdev, param); MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); MLX5_SET(sqc, sqc, allow_swp, allow_swp); @@ -804,7 +797,7 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev, static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev) { - if (mlx5e_accel_is_ktls_rx(mdev)) + if (mlx5e_is_ktls_rx(mdev)) return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; @@ -833,7 +826,7 @@ static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev, mlx5e_build_sq_param_common(mdev, param); param->stop_room = mlx5e_stop_room_for_wqe(mdev, 1); /* for XSK NOP */ - param->is_tls = mlx5e_accel_is_ktls_rx(mdev); + param->is_tls = mlx5e_is_ktls_rx(mdev); if (param->is_tls) param->stop_room += mlx5e_stop_room_for_wqe(mdev, 1); /* for TLS RX resync NOP */ MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c index 59988e24b704..b979826f3f6c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c @@ -100,7 +100,7 @@ mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl, spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2 | MLX5_MATCH_OUTER_HEADERS; dr_matcher = mlx5_smfs_matcher_create(tbl, priority, spec); - kfree(spec); + kvfree(spec); if (!dr_matcher) return ERR_PTR(-EINVAL); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c index fd4504518578..1cbd2eb9d04f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c @@ -93,6 +93,7 @@ sampler_termtbl_create(struct mlx5e_tc_psample *tc_psample) act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; dest.vport.num = esw->manager_vport; + dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; tc_psample->termtbl_rule = mlx5_add_flow_rules(tc_psample->termtbl, NULL, &act, &dest, 1); if (IS_ERR(tc_psample->termtbl_rule)) { err = PTR_ERR(tc_psample->termtbl_rule); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index e49f51124c74..89aa0208b5d2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -1812,7 +1812,6 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL); if (!ct_flow) { - kfree(ct_flow); return ERR_PTR(-ENOMEM); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c index 021da085e603..9a1553598a7c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c @@ -80,7 +80,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, } struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, - struct mlx5_cqe64 *cqe, struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) { @@ -99,11 +98,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool); net_prefetch(xdp->data); - if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) { - rq->stats->wqe_err++; - return NULL; - } - prog = rcu_dereference(rq->xdp_prog); if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp))) return NULL; /* page/packet was consumed by XDP */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h index 7f88ccf67fdd..a8cfab4a393c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h @@ -15,7 +15,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, u32 head_offset, u32 page_idx); struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, - struct mlx5_cqe64 *cqe, struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c index 3ec0c17db010..4902ef74fedf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c @@ -23,7 +23,7 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) c = priv->channels.c[ix]; if (unlikely(!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))) - return -ENXIO; + return -EINVAL; if (!napi_if_scheduled_mark_missed(&c->napi)) { /* To avoid WQE overrun, don't post a NOP if async_icosq is not diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h index 62cde3e87c2e..04c0a5e1c89a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h @@ -37,8 +37,8 @@ #include <linux/skbuff.h> #include <linux/netdevice.h> #include "en_accel/ipsec_rxtx.h" -#include "en_accel/tls.h" -#include "en_accel/tls_rxtx.h" +#include "en_accel/ktls.h" +#include "en_accel/ktls_txrx.h" #include "en.h" #include "en/txrx.h" @@ -124,8 +124,9 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev, #ifdef CONFIG_MLX5_EN_TLS /* May send SKBs and WQEs. */ - if (mlx5e_tls_skb_offloaded(skb)) - if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, &state->tls))) + if (mlx5e_ktls_skb_offloaded(skb)) + if (unlikely(!mlx5e_ktls_handle_tx_skb(dev, sq, skb, + &state->tls))) return false; #endif @@ -174,7 +175,7 @@ static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq, struct mlx5_wqe_inline_seg *inlseg) { #ifdef CONFIG_MLX5_EN_TLS - mlx5e_tls_handle_tx_wqe(&wqe->ctrl, &state->tls); + mlx5e_ktls_handle_tx_wqe(&wqe->ctrl, &state->tls); #endif #ifdef CONFIG_MLX5_EN_IPSEC diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c index 4c4ee524176c..3ae6067c7e6b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c @@ -102,7 +102,7 @@ struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv, break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: - if (!sk->sk_ipv6only && + if (!ipv6_only_sock(sk) && ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) { accel_fs_tcp_set_ipv4_flow(spec, sk); ft = &fs_tcp->tables[ACCEL_FS_IPV4_TCP]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 299e3f0fcb5c..c280a18ff002 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -226,8 +226,7 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) return -EINVAL; } if (x->props.flags & XFRM_STATE_ESN && - !(mlx5_accel_ipsec_device_caps(priv->mdev) & - MLX5_ACCEL_IPSEC_CAP_ESN)) { + !(mlx5_ipsec_device_caps(priv->mdev) & MLX5_ACCEL_IPSEC_CAP_ESN)) { netdev_info(netdev, "Cannot offload ESN xfrm states\n"); return -EINVAL; } @@ -275,8 +274,7 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) return -EINVAL; } if (x->props.family == AF_INET6 && - !(mlx5_accel_ipsec_device_caps(priv->mdev) & - MLX5_ACCEL_IPSEC_CAP_IPV6)) { + !(mlx5_ipsec_device_caps(priv->mdev) & MLX5_ACCEL_IPSEC_CAP_IPV6)) { netdev_info(netdev, "IPv6 xfrm state offload is not supported by this device\n"); return -EINVAL; } @@ -286,9 +284,6 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) static int mlx5e_xfrm_fs_add_rule(struct mlx5e_priv *priv, struct mlx5e_ipsec_sa_entry *sa_entry) { - if (!mlx5_is_ipsec_device(priv->mdev)) - return 0; - return mlx5e_accel_ipsec_fs_add_rule(priv, &sa_entry->xfrm->attrs, sa_entry->ipsec_obj_id, &sa_entry->ipsec_rule); @@ -297,9 +292,6 @@ static int mlx5e_xfrm_fs_add_rule(struct mlx5e_priv *priv, static void mlx5e_xfrm_fs_del_rule(struct mlx5e_priv *priv, struct mlx5e_ipsec_sa_entry *sa_entry) { - if (!mlx5_is_ipsec_device(priv->mdev)) - return; - mlx5e_accel_ipsec_fs_del_rule(priv, &sa_entry->xfrm->attrs, &sa_entry->ipsec_rule); } @@ -333,9 +325,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) /* create xfrm */ mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs); - sa_entry->xfrm = - mlx5_accel_esp_create_xfrm(priv->mdev, &attrs, - MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA); + sa_entry->xfrm = mlx5_accel_esp_create_xfrm(priv->mdev, &attrs); if (IS_ERR(sa_entry->xfrm)) { err = PTR_ERR(sa_entry->xfrm); goto err_sa_entry; @@ -414,7 +404,7 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv) { struct mlx5e_ipsec *ipsec = NULL; - if (!MLX5_IPSEC_DEV(priv->mdev)) { + if (!mlx5_ipsec_device_caps(priv->mdev)) { netdev_dbg(priv->netdev, "Not an IPSec offload device\n"); return 0; } @@ -425,10 +415,7 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv) hash_init(ipsec->sadb_rx); spin_lock_init(&ipsec->sadb_rx_lock); - ida_init(&ipsec->halloc); ipsec->en_priv = priv; - ipsec->no_trailer = !!(mlx5_accel_ipsec_device_caps(priv->mdev) & - MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER); ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0, priv->netdev->name); if (!ipsec->wq) { @@ -452,7 +439,6 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv) mlx5e_accel_ipsec_fs_cleanup(priv); destroy_workqueue(ipsec->wq); - ida_destroy(&ipsec->halloc); kfree(ipsec); priv->ipsec = NULL; } @@ -531,7 +517,7 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv) struct mlx5_core_dev *mdev = priv->mdev; struct net_device *netdev = priv->netdev; - if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) || + if (!(mlx5_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) || !MLX5_CAP_ETH(mdev, swp)) { mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n"); return; @@ -550,15 +536,13 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv) netdev->features |= NETIF_F_HW_ESP_TX_CSUM; netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM; - if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_LSO) || + if (!(mlx5_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_LSO) || !MLX5_CAP_ETH(mdev, swp_lso)) { mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n"); return; } - if (mlx5_is_ipsec_device(mdev)) - netdev->gso_partial_features |= NETIF_F_GSO_ESP; - + netdev->gso_partial_features |= NETIF_F_GSO_ESP; mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n"); netdev->features |= NETIF_F_GSO_ESP; netdev->hw_features |= NETIF_F_GSO_ESP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index 6164c7f59efb..a0e9dade09e9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -40,7 +40,7 @@ #include <net/xfrm.h> #include <linux/idr.h> -#include "accel/ipsec.h" +#include "ipsec_offload.h" #define MLX5E_IPSEC_SADB_RX_BITS 10 #define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L @@ -55,24 +55,6 @@ struct mlx5e_ipsec_sw_stats { atomic64_t ipsec_tx_drop_no_state; atomic64_t ipsec_tx_drop_not_ip; atomic64_t ipsec_tx_drop_trailer; - atomic64_t ipsec_tx_drop_metadata; -}; - -struct mlx5e_ipsec_stats { - u64 ipsec_dec_in_packets; - u64 ipsec_dec_out_packets; - u64 ipsec_dec_bypass_packets; - u64 ipsec_enc_in_packets; - u64 ipsec_enc_out_packets; - u64 ipsec_enc_bypass_packets; - u64 ipsec_dec_drop_packets; - u64 ipsec_dec_auth_fail_packets; - u64 ipsec_enc_drop_packets; - u64 ipsec_add_sa_success; - u64 ipsec_add_sa_fail; - u64 ipsec_del_sa_success; - u64 ipsec_del_sa_fail; - u64 ipsec_cmd_drop; }; struct mlx5e_accel_fs_esp; @@ -81,11 +63,8 @@ struct mlx5e_ipsec_tx; struct mlx5e_ipsec { struct mlx5e_priv *en_priv; DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS); - bool no_trailer; - spinlock_t sadb_rx_lock; /* Protects sadb_rx and halloc */ - struct ida halloc; + spinlock_t sadb_rx_lock; /* Protects sadb_rx */ struct mlx5e_ipsec_sw_stats sw_stats; - struct mlx5e_ipsec_stats stats; struct workqueue_struct *wq; struct mlx5e_accel_fs_esp *rx_fs; struct mlx5e_ipsec_tx *tx_fs; @@ -116,7 +95,6 @@ struct mlx5e_ipsec_sa_entry { struct mlx5e_ipsec_rule ipsec_rule; }; -void mlx5e_ipsec_build_inverse_table(void); int mlx5e_ipsec_init(struct mlx5e_priv *priv); void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv); void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv); @@ -125,11 +103,6 @@ struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *dev, unsigned int handle); #else - -static inline void mlx5e_ipsec_build_inverse_table(void) -{ -} - static inline int mlx5e_ipsec_init(struct mlx5e_priv *priv) { return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index 17da23dff0ed..66b529e36ea1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -2,7 +2,7 @@ /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ #include <linux/netdevice.h> -#include "accel/ipsec_offload.h" +#include "ipsec_offload.h" #include "ipsec_fs.h" #include "fs_core.h" @@ -700,9 +700,6 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv) { int err; - if (!mlx5_is_ipsec_device(priv->mdev) || !priv->ipsec) - return -EOPNOTSUPP; - err = fs_init_tx(priv); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h index 3389b3bb3ef8..b70953979709 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h @@ -6,10 +6,9 @@ #include "en.h" #include "ipsec.h" -#include "accel/ipsec_offload.h" +#include "ipsec_offload.h" #include "en/fs.h" -#ifdef CONFIG_MLX5_EN_IPSEC void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv); int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv); int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv, @@ -19,8 +18,4 @@ int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv, void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv, struct mlx5_accel_esp_xfrm_attrs *attrs, struct mlx5e_ipsec_rule *ipsec_rule); -#else -static inline void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv) {} -static inline int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv) { return 0; } -#endif #endif /* __MLX5_IPSEC_STEERING_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c index d6667d38e1de..37c9880719cf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c @@ -1,14 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB -/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ +/* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */ #include "mlx5_core.h" #include "ipsec_offload.h" #include "lib/mlx5.h" #include "en_accel/ipsec_fs.h" -#define MLX5_IPSEC_DEV_BASIC_CAPS (MLX5_ACCEL_IPSEC_CAP_DEVICE | MLX5_ACCEL_IPSEC_CAP_IPV6 | \ - MLX5_ACCEL_IPSEC_CAP_LSO) - struct mlx5_ipsec_sa_ctx { struct rhash_head hash; u32 enc_key_id; @@ -25,25 +22,37 @@ struct mlx5_ipsec_esp_xfrm { struct mlx5_accel_esp_xfrm accel_xfrm; }; -static u32 mlx5_ipsec_offload_device_caps(struct mlx5_core_dev *mdev) +u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) { - u32 caps = MLX5_IPSEC_DEV_BASIC_CAPS; + u32 caps; + + if (!MLX5_CAP_GEN(mdev, ipsec_offload)) + return 0; + + if (!MLX5_CAP_GEN(mdev, log_max_dek)) + return 0; + + if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) & + MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC)) + return 0; - if (!mlx5_is_ipsec_device(mdev)) + if (!MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) || + !MLX5_CAP_ETH(mdev, insert_trailer)) return 0; if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) || !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt)) return 0; + caps = MLX5_ACCEL_IPSEC_CAP_DEVICE | MLX5_ACCEL_IPSEC_CAP_IPV6 | + MLX5_ACCEL_IPSEC_CAP_LSO; + if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) && MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt)) caps |= MLX5_ACCEL_IPSEC_CAP_ESP; - if (MLX5_CAP_IPSEC(mdev, ipsec_esn)) { + if (MLX5_CAP_IPSEC(mdev, ipsec_esn)) caps |= MLX5_ACCEL_IPSEC_CAP_ESN; - caps |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN; - } /* We can accommodate up to 2^24 different IPsec objects * because we use up to 24 bit in flow table metadata @@ -52,6 +61,7 @@ static u32 mlx5_ipsec_offload_device_caps(struct mlx5_core_dev *mdev) WARN_ON_ONCE(MLX5_CAP_IPSEC(mdev, log_max_ipsec_offload) > 24); return caps; } +EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps); static int mlx5_ipsec_offload_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev, @@ -94,8 +104,7 @@ mlx5_ipsec_offload_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev, static struct mlx5_accel_esp_xfrm * mlx5_ipsec_offload_esp_create_xfrm(struct mlx5_core_dev *mdev, - const struct mlx5_accel_esp_xfrm_attrs *attrs, - u32 flags) + const struct mlx5_accel_esp_xfrm_attrs *attrs) { struct mlx5_ipsec_esp_xfrm *mxfrm; int err = 0; @@ -274,11 +283,6 @@ static void mlx5_ipsec_offload_delete_sa_ctx(void *context) mutex_unlock(&mxfrm->lock); } -static int mlx5_ipsec_offload_init(struct mlx5_core_dev *mdev) -{ - return 0; -} - static int mlx5_modify_ipsec_obj(struct mlx5_core_dev *mdev, struct mlx5_ipsec_obj_attrs *attrs, u32 ipsec_id) @@ -366,20 +370,51 @@ change_sw_xfrm_attrs: return err; } -static const struct mlx5_accel_ipsec_ops ipsec_offload_ops = { - .device_caps = mlx5_ipsec_offload_device_caps, - .create_hw_context = mlx5_ipsec_offload_create_sa_ctx, - .free_hw_context = mlx5_ipsec_offload_delete_sa_ctx, - .init = mlx5_ipsec_offload_init, - .esp_create_xfrm = mlx5_ipsec_offload_esp_create_xfrm, - .esp_destroy_xfrm = mlx5_ipsec_offload_esp_destroy_xfrm, - .esp_modify_xfrm = mlx5_ipsec_offload_esp_modify_xfrm, -}; +void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm *xfrm, + u32 *sa_handle) +{ + __be32 saddr[4] = {}, daddr[4] = {}; + + if (!xfrm->attrs.is_ipv6) { + saddr[3] = xfrm->attrs.saddr.a4; + daddr[3] = xfrm->attrs.daddr.a4; + } else { + memcpy(saddr, xfrm->attrs.saddr.a6, sizeof(saddr)); + memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr)); + } + + return mlx5_ipsec_offload_create_sa_ctx(mdev, xfrm, saddr, daddr, + xfrm->attrs.spi, + xfrm->attrs.is_ipv6, sa_handle); +} + +void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context) +{ + mlx5_ipsec_offload_delete_sa_ctx(context); +} -const struct mlx5_accel_ipsec_ops *mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev) +struct mlx5_accel_esp_xfrm * +mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *attrs) { - if (!mlx5_ipsec_offload_device_caps(mdev)) - return NULL; + struct mlx5_accel_esp_xfrm *xfrm; - return &ipsec_offload_ops; + xfrm = mlx5_ipsec_offload_esp_create_xfrm(mdev, attrs); + if (IS_ERR(xfrm)) + return xfrm; + + xfrm->mdev = mdev; + return xfrm; +} + +void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) +{ + mlx5_ipsec_offload_esp_destroy_xfrm(xfrm); +} + +int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, + const struct mlx5_accel_esp_xfrm_attrs *attrs) +{ + return mlx5_ipsec_offload_esp_modify_xfrm(xfrm, attrs); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.h new file mode 100644 index 000000000000..7dac104e6ef1 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ + +#ifndef __MLX5_IPSEC_OFFLOAD_H__ +#define __MLX5_IPSEC_OFFLOAD_H__ + +#include <linux/mlx5/driver.h> +#include <linux/mlx5/accel.h> + +void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm *xfrm, + u32 *sa_handle); +void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context); +#endif /* __MLX5_IPSEC_OFFLOAD_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index b56fea142c24..9b65c765cbd9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -34,78 +34,16 @@ #include <crypto/aead.h> #include <net/xfrm.h> #include <net/esp.h> -#include "accel/ipsec_offload.h" +#include "ipsec_offload.h" #include "en_accel/ipsec_rxtx.h" #include "en_accel/ipsec.h" -#include "accel/accel.h" #include "en.h" enum { - MLX5E_IPSEC_RX_SYNDROME_DECRYPTED = 0x11, - MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED = 0x12, - MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO = 0x17, -}; - -struct mlx5e_ipsec_rx_metadata { - unsigned char nexthdr; - __be32 sa_handle; -} __packed; - -enum { MLX5E_IPSEC_TX_SYNDROME_OFFLOAD = 0x8, MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP = 0x9, }; -struct mlx5e_ipsec_tx_metadata { - __be16 mss_inv; /* 1/MSS in 16bit fixed point, only for LSO */ - __be16 seq; /* LSBs of the first TCP seq, only for LSO */ - u8 esp_next_proto; /* Next protocol of ESP */ -} __packed; - -struct mlx5e_ipsec_metadata { - unsigned char syndrome; - union { - unsigned char raw[5]; - /* from FPGA to host, on successful decrypt */ - struct mlx5e_ipsec_rx_metadata rx; - /* from host to FPGA */ - struct mlx5e_ipsec_tx_metadata tx; - } __packed content; - /* packet type ID field */ - __be16 ethertype; -} __packed; - -#define MAX_LSO_MSS 2048 - -/* Pre-calculated (Q0.16) fixed-point inverse 1/x function */ -static __be16 mlx5e_ipsec_inverse_table[MAX_LSO_MSS]; - -static inline __be16 mlx5e_ipsec_mss_inv(struct sk_buff *skb) -{ - return mlx5e_ipsec_inverse_table[skb_shinfo(skb)->gso_size]; -} - -static struct mlx5e_ipsec_metadata *mlx5e_ipsec_add_metadata(struct sk_buff *skb) -{ - struct mlx5e_ipsec_metadata *mdata; - struct ethhdr *eth; - - if (unlikely(skb_cow_head(skb, sizeof(*mdata)))) - return ERR_PTR(-ENOMEM); - - eth = (struct ethhdr *)skb_push(skb, sizeof(*mdata)); - skb->mac_header -= sizeof(*mdata); - mdata = (struct mlx5e_ipsec_metadata *)(eth + 1); - - memmove(skb->data, skb->data + sizeof(*mdata), - 2 * ETH_ALEN); - - eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE); - - memset(mdata->content.raw, 0, sizeof(mdata->content.raw)); - return mdata; -} - static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x) { unsigned int alen = crypto_aead_authsize(x->data); @@ -244,40 +182,6 @@ void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x, skb_store_bits(skb, iv_offset, &seqno, 8); } -static void mlx5e_ipsec_set_metadata(struct sk_buff *skb, - struct mlx5e_ipsec_metadata *mdata, - struct xfrm_offload *xo) -{ - struct ip_esp_hdr *esph; - struct tcphdr *tcph; - - if (skb_is_gso(skb)) { - /* Add LSO metadata indication */ - esph = ip_esp_hdr(skb); - tcph = inner_tcp_hdr(skb); - netdev_dbg(skb->dev, " Offloading GSO packet outer L3 %u; L4 %u; Inner L3 %u; L4 %u\n", - skb->network_header, - skb->transport_header, - skb->inner_network_header, - skb->inner_transport_header); - netdev_dbg(skb->dev, " Offloading GSO packet of len %u; mss %u; TCP sp %u dp %u seq 0x%x ESP seq 0x%x\n", - skb->len, skb_shinfo(skb)->gso_size, - ntohs(tcph->source), ntohs(tcph->dest), - ntohl(tcph->seq), ntohl(esph->seq_no)); - mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP; - mdata->content.tx.mss_inv = mlx5e_ipsec_mss_inv(skb); - mdata->content.tx.seq = htons(ntohl(tcph->seq) & 0xFFFF); - } else { - mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD; - } - mdata->content.tx.esp_next_proto = xo->proto; - - netdev_dbg(skb->dev, " TX metadata syndrome %u proto %u mss_inv %04x seq %04x\n", - mdata->syndrome, mdata->content.tx.esp_next_proto, - ntohs(mdata->content.tx.mss_inv), - ntohs(mdata->content.tx.seq)); -} - void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe, struct mlx5e_accel_tx_ipsec_state *ipsec_st, struct mlx5_wqe_inline_seg *inlseg) @@ -298,16 +202,14 @@ static int mlx5e_ipsec_set_state(struct mlx5e_priv *priv, ipsec_st->x = x; ipsec_st->xo = xo; - if (mlx5_is_ipsec_device(priv->mdev)) { - aead = x->data; - alen = crypto_aead_authsize(aead); - blksize = ALIGN(crypto_aead_blocksize(aead), 4); - clen = ALIGN(skb->len + 2, blksize); - plen = max_t(u32, clen - skb->len, 4); - tailen = plen + alen; - ipsec_st->plen = plen; - ipsec_st->tailen = tailen; - } + aead = x->data; + alen = crypto_aead_authsize(aead); + blksize = ALIGN(crypto_aead_blocksize(aead), 4); + clen = ALIGN(skb->len + 2, blksize); + plen = max_t(u32, clen - skb->len, 4); + tailen = plen + alen; + ipsec_st->plen = plen; + ipsec_st->tailen = tailen; return 0; } @@ -340,19 +242,17 @@ void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb, ((struct iphdr *)skb_network_header(skb))->protocol : ((struct ipv6hdr *)skb_network_header(skb))->nexthdr; - if (mlx5_is_ipsec_device(priv->mdev)) { - eseg->flow_table_metadata |= cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC); - eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER); - encap = x->encap; - if (!encap) { - eseg->trailer |= (l3_proto == IPPROTO_ESP) ? - cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC) : - cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC); - } else if (encap->encap_type == UDP_ENCAP_ESPINUDP) { - eseg->trailer |= (l3_proto == IPPROTO_ESP) ? - cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC) : - cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC); - } + eseg->flow_table_metadata |= cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC); + eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER); + encap = x->encap; + if (!encap) { + eseg->trailer |= (l3_proto == IPPROTO_ESP) ? + cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC) : + cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC); + } else if (encap->encap_type == UDP_ENCAP_ESPINUDP) { + eseg->trailer |= (l3_proto == IPPROTO_ESP) ? + cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC) : + cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC); } } @@ -363,7 +263,6 @@ bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev, struct mlx5e_priv *priv = netdev_priv(netdev); struct xfrm_offload *xo = xfrm_offload(skb); struct mlx5e_ipsec_sa_entry *sa_entry; - struct mlx5e_ipsec_metadata *mdata; struct xfrm_state *x; struct sec_path *sp; @@ -392,19 +291,8 @@ bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev, goto drop; } - if (MLX5_CAP_GEN(priv->mdev, fpga)) { - mdata = mlx5e_ipsec_add_metadata(skb); - if (IS_ERR(mdata)) { - atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata); - goto drop; - } - } - sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle; sa_entry->set_iv_op(skb, x, xo); - if (MLX5_CAP_GEN(priv->mdev, fpga)) - mlx5e_ipsec_set_metadata(skb, mdata, xo); - mlx5e_ipsec_set_state(priv, skb, x, xo, ipsec_st); return true; @@ -414,79 +302,6 @@ drop: return false; } -static inline struct xfrm_state * -mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb, - struct mlx5e_ipsec_metadata *mdata) -{ - struct mlx5e_priv *priv = netdev_priv(netdev); - struct xfrm_offload *xo; - struct xfrm_state *xs; - struct sec_path *sp; - u32 sa_handle; - - sp = secpath_set(skb); - if (unlikely(!sp)) { - atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc); - return NULL; - } - - sa_handle = be32_to_cpu(mdata->content.rx.sa_handle); - xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle); - if (unlikely(!xs)) { - atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss); - return NULL; - } - - sp = skb_sec_path(skb); - sp->xvec[sp->len++] = xs; - sp->olen++; - - xo = xfrm_offload(skb); - xo->flags = CRYPTO_DONE; - switch (mdata->syndrome) { - case MLX5E_IPSEC_RX_SYNDROME_DECRYPTED: - xo->status = CRYPTO_SUCCESS; - if (likely(priv->ipsec->no_trailer)) { - xo->flags |= XFRM_ESP_NO_TRAILER; - xo->proto = mdata->content.rx.nexthdr; - } - break; - case MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED: - xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED; - break; - case MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO: - xo->status = CRYPTO_INVALID_PROTOCOL; - break; - default: - atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome); - return NULL; - } - return xs; -} - -struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, - struct sk_buff *skb, u32 *cqe_bcnt) -{ - struct mlx5e_ipsec_metadata *mdata; - struct xfrm_state *xs; - - if (!is_metadata_hdr_valid(skb)) - return skb; - - /* Use the metadata */ - mdata = (struct mlx5e_ipsec_metadata *)(skb->data + ETH_HLEN); - xs = mlx5e_ipsec_build_sp(netdev, skb, mdata); - if (unlikely(!xs)) { - kfree_skb(skb); - return NULL; - } - - remove_metadata_hdr(skb); - *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN; - - return skb; -} - enum { MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED, MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED, @@ -528,8 +343,6 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev, switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) { case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED: xo->status = CRYPTO_SUCCESS; - if (WARN_ON_ONCE(priv->ipsec->no_trailer)) - xo->flags |= XFRM_ESP_NO_TRAILER; break; case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED: xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED; @@ -541,21 +354,3 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev, atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome); } } - -void mlx5e_ipsec_build_inverse_table(void) -{ - u16 mss_inv; - u32 mss; - - /* Calculate 1/x inverse table for use in GSO data path. - * Using this table, we provide the IPSec accelerator with the value of - * 1/gso_size so that it can infer the position of each segment inside - * the GSO, and increment the ESP sequence number, and generate the IV. - * The HW needs this value in Q0.16 fixed-point number format - */ - mlx5e_ipsec_inverse_table[1] = htons(0xFFFF); - for (mss = 2; mss < MAX_LSO_MSS; mss++) { - mss_inv = div_u64(1ULL << 32, mss) >> 16; - mlx5e_ipsec_inverse_table[mss] = htons(mss_inv); - } -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h index 428881e0adcb..0ae4e12ce528 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h @@ -53,9 +53,6 @@ struct mlx5e_accel_tx_ipsec_state { #ifdef CONFIG_MLX5_EN_IPSEC -struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, - struct sk_buff *skb, u32 *cqe_bcnt); - void mlx5e_ipsec_inverse_table_init(void); void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x, struct xfrm_offload *xo); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c index 5cb936541b9e..3aace1c2a763 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c @@ -35,27 +35,9 @@ #include <net/sock.h> #include "en.h" -#include "accel/ipsec.h" +#include "ipsec_offload.h" #include "fpga/sdk.h" #include "en_accel/ipsec.h" -#include "fpga/ipsec.h" - -static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = { - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_in_packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_out_packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_bypass_packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_in_packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_out_packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_bypass_packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_drop_packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_auth_fail_packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_drop_packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_add_sa_success) }, - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_add_sa_fail) }, - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_del_sa_success) }, - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_del_sa_fail) }, - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_cmd_drop) }, -}; static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) }, @@ -65,13 +47,11 @@ static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_no_state) }, { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_not_ip) }, { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_trailer) }, - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_metadata) }, }; #define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \ atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset)) -#define NUM_IPSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_hw_stats_desc) #define NUM_IPSEC_SW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_sw_stats_desc) static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw) @@ -103,45 +83,4 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw) return idx; } -static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw) -{ - return (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0; -} - -static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw) -{ - int ret = 0; - - if (priv->ipsec) - ret = mlx5_accel_ipsec_counters_read(priv->mdev, (u64 *)&priv->ipsec->stats, - NUM_IPSEC_HW_COUNTERS); - if (ret) - memset(&priv->ipsec->stats, 0, sizeof(priv->ipsec->stats)); -} - -static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_hw) -{ - unsigned int i; - - if (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev)) - for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - mlx5e_ipsec_hw_stats_desc[i].format); - - return idx; -} - -static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw) -{ - int i; - - if (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev)) - for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++) - data[idx++] = MLX5E_READ_CTR64_CPU(&priv->ipsec->stats, - mlx5e_ipsec_hw_stats_desc, - i); - return idx; -} - MLX5E_DEFINE_STATS_GRP(ipsec_sw, 0); -MLX5E_DEFINE_STATS_GRP(ipsec_hw, 0); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c index d93aadbf10da..814f2a56f633 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c @@ -2,11 +2,49 @@ // Copyright (c) 2019 Mellanox Technologies. #include "en.h" -#include "en_accel/tls.h" +#include "lib/mlx5.h" #include "en_accel/ktls.h" #include "en_accel/ktls_utils.h" #include "en_accel/fs_tcp.h" +int mlx5_ktls_create_key(struct mlx5_core_dev *mdev, + struct tls_crypto_info *crypto_info, + u32 *p_key_id) +{ + u32 sz_bytes; + void *key; + + switch (crypto_info->cipher_type) { + case TLS_CIPHER_AES_GCM_128: { + struct tls12_crypto_info_aes_gcm_128 *info = + (struct tls12_crypto_info_aes_gcm_128 *)crypto_info; + + key = info->key; + sz_bytes = sizeof(info->key); + break; + } + case TLS_CIPHER_AES_GCM_256: { + struct tls12_crypto_info_aes_gcm_256 *info = + (struct tls12_crypto_info_aes_gcm_256 *)crypto_info; + + key = info->key; + sz_bytes = sizeof(info->key); + break; + } + default: + return -EINVAL; + } + + return mlx5_create_encryption_key(mdev, key, sz_bytes, + MLX5_ACCEL_OBJ_TLS_KEY, + p_key_id); +} + +void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id) +{ + mlx5_destroy_encryption_key(mdev, key_id); +} + static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk, enum tls_offload_ctx_dir direction, struct tls_crypto_info *crypto_info, @@ -59,15 +97,15 @@ void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv) struct net_device *netdev = priv->netdev; struct mlx5_core_dev *mdev = priv->mdev; - if (!mlx5e_accel_is_ktls_tx(mdev) && !mlx5e_accel_is_ktls_rx(mdev)) + if (!mlx5e_is_ktls_tx(mdev) && !mlx5e_is_ktls_rx(mdev)) return; - if (mlx5e_accel_is_ktls_tx(mdev)) { + if (mlx5e_is_ktls_tx(mdev)) { netdev->hw_features |= NETIF_F_HW_TLS_TX; netdev->features |= NETIF_F_HW_TLS_TX; } - if (mlx5e_accel_is_ktls_rx(mdev)) + if (mlx5e_is_ktls_rx(mdev)) netdev->hw_features |= NETIF_F_HW_TLS_RX; netdev->tlsdev_ops = &mlx5e_ktls_ops; @@ -92,7 +130,7 @@ int mlx5e_ktls_init_rx(struct mlx5e_priv *priv) { int err; - if (!mlx5e_accel_is_ktls_rx(priv->mdev)) + if (!mlx5e_is_ktls_rx(priv->mdev)) return 0; priv->tls->rx_wq = create_singlethread_workqueue("mlx5e_tls_rx"); @@ -112,7 +150,7 @@ int mlx5e_ktls_init_rx(struct mlx5e_priv *priv) void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv) { - if (!mlx5e_accel_is_ktls_rx(priv->mdev)) + if (!mlx5e_is_ktls_rx(priv->mdev)) return; if (priv->netdev->features & NETIF_F_HW_TLS_RX) @@ -120,3 +158,24 @@ void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv) destroy_workqueue(priv->tls->rx_wq); } + +int mlx5e_ktls_init(struct mlx5e_priv *priv) +{ + struct mlx5e_tls *tls; + + if (!mlx5e_is_ktls_device(priv->mdev)) + return 0; + + tls = kzalloc(sizeof(*tls), GFP_KERNEL); + if (!tls) + return -ENOMEM; + + priv->tls = tls; + return 0; +} + +void mlx5e_ktls_cleanup(struct mlx5e_priv *priv) +{ + kfree(priv->tls); + priv->tls = NULL; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h index 5833deb2354c..d016624fbc9d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h @@ -4,9 +4,42 @@ #ifndef __MLX5E_KTLS_H__ #define __MLX5E_KTLS_H__ +#include <linux/tls.h> +#include <net/tls.h> #include "en.h" #ifdef CONFIG_MLX5_EN_TLS +int mlx5_ktls_create_key(struct mlx5_core_dev *mdev, + struct tls_crypto_info *crypto_info, + u32 *p_key_id); +void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id); + +static inline bool mlx5e_is_ktls_device(struct mlx5_core_dev *mdev) +{ + if (is_kdump_kernel()) + return false; + + if (!MLX5_CAP_GEN(mdev, tls_tx) && !MLX5_CAP_GEN(mdev, tls_rx)) + return false; + + if (!MLX5_CAP_GEN(mdev, log_max_dek)) + return false; + + return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128); +} + +static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev, + struct tls_crypto_info *crypto_info) +{ + switch (crypto_info->cipher_type) { + case TLS_CIPHER_AES_GCM_128: + if (crypto_info->version == TLS_1_2_VERSION) + return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128); + break; + } + + return false; +} void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv); int mlx5e_ktls_init_rx(struct mlx5e_priv *priv); @@ -16,26 +49,36 @@ struct mlx5e_ktls_resync_resp * mlx5e_ktls_rx_resync_create_resp_list(void); void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list); -static inline bool mlx5e_accel_is_ktls_tx(struct mlx5_core_dev *mdev) +static inline bool mlx5e_is_ktls_tx(struct mlx5_core_dev *mdev) { - return !is_kdump_kernel() && - mlx5_accel_is_ktls_tx(mdev); + return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_tx); } -static inline bool mlx5e_accel_is_ktls_rx(struct mlx5_core_dev *mdev) +static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev) { - return !is_kdump_kernel() && - mlx5_accel_is_ktls_rx(mdev); + return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_rx); } -static inline bool mlx5e_accel_is_ktls_device(struct mlx5_core_dev *mdev) -{ - return !is_kdump_kernel() && - mlx5_accel_is_ktls_device(mdev); -} +struct mlx5e_tls_sw_stats { + atomic64_t tx_tls_ctx; + atomic64_t tx_tls_del; + atomic64_t rx_tls_ctx; + atomic64_t rx_tls_del; +}; -#else +struct mlx5e_tls { + struct mlx5e_tls_sw_stats sw_stats; + struct workqueue_struct *rx_wq; +}; +int mlx5e_ktls_init(struct mlx5e_priv *priv); +void mlx5e_ktls_cleanup(struct mlx5e_priv *priv); + +int mlx5e_ktls_get_count(struct mlx5e_priv *priv); +int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data); +int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data); + +#else static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv) { } @@ -64,10 +107,23 @@ mlx5e_ktls_rx_resync_create_resp_list(void) static inline void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list) {} -static inline bool mlx5e_accel_is_ktls_tx(struct mlx5_core_dev *mdev) { return false; } -static inline bool mlx5e_accel_is_ktls_rx(struct mlx5_core_dev *mdev) { return false; } -static inline bool mlx5e_accel_is_ktls_device(struct mlx5_core_dev *mdev) { return false; } +static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev) +{ + return false; +} + +static inline int mlx5e_ktls_init(struct mlx5e_priv *priv) { return 0; } +static inline void mlx5e_ktls_cleanup(struct mlx5e_priv *priv) { } +static inline int mlx5e_ktls_get_count(struct mlx5e_priv *priv) { return 0; } +static inline int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data) +{ + return 0; +} +static inline int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data) +{ + return 0; +} #endif #endif /* __MLX5E_TLS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c index 96064a2033f7..0bb0633b7542 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c @@ -3,7 +3,7 @@ #include <net/inet6_hashtables.h> #include "en_accel/en_accel.h" -#include "en_accel/tls.h" +#include "en_accel/ktls.h" #include "en_accel/ktls_txrx.h" #include "en_accel/ktls_utils.h" #include "en_accel/fs_tcp.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c index 56e7b2aee85f..2ab46c4247ff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c @@ -36,14 +36,7 @@ #include "en.h" #include "fpga/sdk.h" -#include "en_accel/tls.h" - -static const struct counter_desc mlx5e_tls_sw_stats_desc[] = { - { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_metadata) }, - { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_resync_alloc) }, - { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_no_sync_data) }, - { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_bypass_required) }, -}; +#include "en_accel/ktls.h" static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_ctx) }, @@ -55,51 +48,43 @@ static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = { #define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \ atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset)) -static const struct counter_desc *get_tls_atomic_stats(struct mlx5e_priv *priv) -{ - if (!priv->tls) - return NULL; - if (mlx5e_accel_is_ktls_device(priv->mdev)) - return mlx5e_ktls_sw_stats_desc; - return mlx5e_tls_sw_stats_desc; -} - -int mlx5e_tls_get_count(struct mlx5e_priv *priv) +int mlx5e_ktls_get_count(struct mlx5e_priv *priv) { if (!priv->tls) return 0; - if (mlx5e_accel_is_ktls_device(priv->mdev)) - return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc); - return ARRAY_SIZE(mlx5e_tls_sw_stats_desc); + + return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc); } -int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) +int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data) { - const struct counter_desc *stats_desc; unsigned int i, n, idx = 0; - stats_desc = get_tls_atomic_stats(priv); - n = mlx5e_tls_get_count(priv); + if (!priv->tls) + return 0; + + n = mlx5e_ktls_get_count(priv); for (i = 0; i < n; i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, - stats_desc[i].format); + mlx5e_ktls_sw_stats_desc[i].format); return n; } -int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) +int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data) { - const struct counter_desc *stats_desc; unsigned int i, n, idx = 0; - stats_desc = get_tls_atomic_stats(priv); - n = mlx5e_tls_get_count(priv); + if (!priv->tls) + return 0; + + n = mlx5e_ktls_get_count(priv); for (i = 0; i < n; i++) - data[idx++] = - MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats, - stats_desc, i); + data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats, + mlx5e_ktls_sw_stats_desc, + i); return n; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index aaf11c66bf4c..4b6f0d1ea59a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB // Copyright (c) 2019 Mellanox Technologies. -#include "en_accel/tls.h" +#include "en_accel/ktls.h" #include "en_accel/ktls_txrx.h" #include "en_accel/ktls_utils.h" @@ -27,7 +27,7 @@ u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *pa { u16 num_dumps, stop_room = 0; - if (!mlx5e_accel_is_ktls_tx(mdev)) + if (!mlx5e_is_ktls_tx(mdev)) return 0; num_dumps = mlx5e_ktls_dumps_num_wqes(params, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE); @@ -448,14 +448,26 @@ err_out: return MLX5E_KTLS_SYNC_FAIL; } -bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq, - struct sk_buff *skb, int datalen, +bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, + struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state) { struct mlx5e_ktls_offload_context_tx *priv_tx; struct mlx5e_sq_stats *stats = sq->stats; + struct tls_context *tls_ctx; + int datalen; u32 seq; + datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)); + if (!datalen) + return true; + + mlx5e_tx_mpwqe_ensure_complete(sq); + + tls_ctx = tls_get_ctx(skb->sk); + if (WARN_ON_ONCE(tls_ctx->netdev != netdev)) + goto err_out; + priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx); if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h index 08c9d5134479..2dd78dd4ad65 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h @@ -16,8 +16,8 @@ struct mlx5e_accel_tx_tls_state { u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params); -bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq, - struct sk_buff *skb, int datalen, +bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, + struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state); void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, struct mlx5_cqe64 *cqe, u32 *cqe_bcnt); @@ -48,6 +48,18 @@ mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget) { return budget && test_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &c->async_icosq.state); } + +static inline bool mlx5e_ktls_skb_offloaded(struct sk_buff *skb) +{ + return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk); +} + +static inline void +mlx5e_ktls_handle_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg, + struct mlx5e_accel_tx_tls_state *state) +{ + cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8); +} #else static inline bool mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq, @@ -69,6 +81,18 @@ mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget) return false; } +static inline u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + return 0; +} + +static inline void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, + struct sk_buff *skb, + struct mlx5_cqe64 *cqe, + u32 *cqe_bcnt) +{ +} #endif /* CONFIG_MLX5_EN_TLS */ #endif /* __MLX5E_TLS_TXRX_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h index e5c180f2403b..0dc715c4c10d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h @@ -6,7 +6,6 @@ #include <net/tls.h> #include "en.h" -#include "accel/tls.h" enum { MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD = 0, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c deleted file mode 100644 index b8fc863aa68d..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c +++ /dev/null @@ -1,247 +0,0 @@ -/* - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#include <linux/netdevice.h> -#include <net/ipv6.h> -#include "en_accel/tls.h" -#include "accel/tls.h" - -static void mlx5e_tls_set_ipv4_flow(void *flow, struct sock *sk) -{ - struct inet_sock *inet = inet_sk(sk); - - MLX5_SET(tls_flow, flow, ipv6, 0); - memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), - &inet->inet_daddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4)); - memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv4_layout.ipv4), - &inet->inet_rcv_saddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4)); -} - -#if IS_ENABLED(CONFIG_IPV6) -static void mlx5e_tls_set_ipv6_flow(void *flow, struct sock *sk) -{ - struct ipv6_pinfo *np = inet6_sk(sk); - - MLX5_SET(tls_flow, flow, ipv6, 1); - memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv6_layout.ipv6), - &sk->sk_v6_daddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); - memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv6_layout.ipv6), - &np->saddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); -} -#endif - -static void mlx5e_tls_set_flow_tcp_ports(void *flow, struct sock *sk) -{ - struct inet_sock *inet = inet_sk(sk); - - memcpy(MLX5_ADDR_OF(tls_flow, flow, src_port), &inet->inet_sport, - MLX5_FLD_SZ_BYTES(tls_flow, src_port)); - memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_port), &inet->inet_dport, - MLX5_FLD_SZ_BYTES(tls_flow, dst_port)); -} - -static int mlx5e_tls_set_flow(void *flow, struct sock *sk, u32 caps) -{ - switch (sk->sk_family) { - case AF_INET: - mlx5e_tls_set_ipv4_flow(flow, sk); - break; -#if IS_ENABLED(CONFIG_IPV6) - case AF_INET6: - if (!sk->sk_ipv6only && - ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) { - mlx5e_tls_set_ipv4_flow(flow, sk); - break; - } - if (!(caps & MLX5_ACCEL_TLS_IPV6)) - goto error_out; - - mlx5e_tls_set_ipv6_flow(flow, sk); - break; -#endif - default: - goto error_out; - } - - mlx5e_tls_set_flow_tcp_ports(flow, sk); - return 0; -error_out: - return -EINVAL; -} - -static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk, - enum tls_offload_ctx_dir direction, - struct tls_crypto_info *crypto_info, - u32 start_offload_tcp_sn) -{ - struct mlx5e_priv *priv = netdev_priv(netdev); - struct tls_context *tls_ctx = tls_get_ctx(sk); - struct mlx5_core_dev *mdev = priv->mdev; - u32 caps = mlx5_accel_tls_device_caps(mdev); - int ret = -ENOMEM; - void *flow; - u32 swid; - - flow = kzalloc(MLX5_ST_SZ_BYTES(tls_flow), GFP_KERNEL); - if (!flow) - return ret; - - ret = mlx5e_tls_set_flow(flow, sk, caps); - if (ret) - goto free_flow; - - ret = mlx5_accel_tls_add_flow(mdev, flow, crypto_info, - start_offload_tcp_sn, &swid, - direction == TLS_OFFLOAD_CTX_DIR_TX); - if (ret < 0) - goto free_flow; - - if (direction == TLS_OFFLOAD_CTX_DIR_TX) { - struct mlx5e_tls_offload_context_tx *tx_ctx = - mlx5e_get_tls_tx_context(tls_ctx); - - tx_ctx->swid = htonl(swid); - tx_ctx->expected_seq = start_offload_tcp_sn; - } else { - struct mlx5e_tls_offload_context_rx *rx_ctx = - mlx5e_get_tls_rx_context(tls_ctx); - - rx_ctx->handle = htonl(swid); - } - - return 0; -free_flow: - kfree(flow); - return ret; -} - -static void mlx5e_tls_del(struct net_device *netdev, - struct tls_context *tls_ctx, - enum tls_offload_ctx_dir direction) -{ - struct mlx5e_priv *priv = netdev_priv(netdev); - unsigned int handle; - - handle = ntohl((direction == TLS_OFFLOAD_CTX_DIR_TX) ? - mlx5e_get_tls_tx_context(tls_ctx)->swid : - mlx5e_get_tls_rx_context(tls_ctx)->handle); - - mlx5_accel_tls_del_flow(priv->mdev, handle, - direction == TLS_OFFLOAD_CTX_DIR_TX); -} - -static int mlx5e_tls_resync(struct net_device *netdev, struct sock *sk, - u32 seq, u8 *rcd_sn_data, - enum tls_offload_ctx_dir direction) -{ - struct tls_context *tls_ctx = tls_get_ctx(sk); - struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5e_tls_offload_context_rx *rx_ctx; - __be64 rcd_sn = *(__be64 *)rcd_sn_data; - - if (WARN_ON_ONCE(direction != TLS_OFFLOAD_CTX_DIR_RX)) - return -EINVAL; - rx_ctx = mlx5e_get_tls_rx_context(tls_ctx); - - netdev_info(netdev, "resyncing seq %d rcd %lld\n", seq, - be64_to_cpu(rcd_sn)); - mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn); - atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_reply); - - return 0; -} - -static const struct tlsdev_ops mlx5e_tls_ops = { - .tls_dev_add = mlx5e_tls_add, - .tls_dev_del = mlx5e_tls_del, - .tls_dev_resync = mlx5e_tls_resync, -}; - -void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) -{ - struct net_device *netdev = priv->netdev; - u32 caps; - - if (mlx5e_accel_is_ktls_device(priv->mdev)) { - mlx5e_ktls_build_netdev(priv); - return; - } - - /* FPGA */ - if (!mlx5e_accel_is_tls_device(priv->mdev)) - return; - - caps = mlx5_accel_tls_device_caps(priv->mdev); - if (caps & MLX5_ACCEL_TLS_TX) { - netdev->features |= NETIF_F_HW_TLS_TX; - netdev->hw_features |= NETIF_F_HW_TLS_TX; - } - - if (caps & MLX5_ACCEL_TLS_RX) { - netdev->features |= NETIF_F_HW_TLS_RX; - netdev->hw_features |= NETIF_F_HW_TLS_RX; - } - - if (!(caps & MLX5_ACCEL_TLS_LRO)) { - netdev->features &= ~NETIF_F_LRO; - netdev->hw_features &= ~NETIF_F_LRO; - } - - netdev->tlsdev_ops = &mlx5e_tls_ops; -} - -int mlx5e_tls_init(struct mlx5e_priv *priv) -{ - struct mlx5e_tls *tls; - - if (!mlx5e_accel_is_tls_device(priv->mdev)) - return 0; - - tls = kzalloc(sizeof(*tls), GFP_KERNEL); - if (!tls) - return -ENOMEM; - - priv->tls = tls; - return 0; -} - -void mlx5e_tls_cleanup(struct mlx5e_priv *priv) -{ - struct mlx5e_tls *tls = priv->tls; - - if (!tls) - return; - - kfree(tls); - priv->tls = NULL; -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h deleted file mode 100644 index 62ecf14bf86a..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ -#ifndef __MLX5E_TLS_H__ -#define __MLX5E_TLS_H__ - -#include "accel/tls.h" -#include "en_accel/ktls.h" - -#ifdef CONFIG_MLX5_EN_TLS -#include <net/tls.h> -#include "en.h" - -struct mlx5e_tls_sw_stats { - atomic64_t tx_tls_ctx; - atomic64_t tx_tls_del; - atomic64_t tx_tls_drop_metadata; - atomic64_t tx_tls_drop_resync_alloc; - atomic64_t tx_tls_drop_no_sync_data; - atomic64_t tx_tls_drop_bypass_required; - atomic64_t rx_tls_ctx; - atomic64_t rx_tls_del; - atomic64_t rx_tls_drop_resync_request; - atomic64_t rx_tls_resync_request; - atomic64_t rx_tls_resync_reply; - atomic64_t rx_tls_auth_fail; -}; - -struct mlx5e_tls { - struct mlx5e_tls_sw_stats sw_stats; - struct workqueue_struct *rx_wq; -}; - -struct mlx5e_tls_offload_context_tx { - struct tls_offload_context_tx base; - u32 expected_seq; - __be32 swid; -}; - -static inline struct mlx5e_tls_offload_context_tx * -mlx5e_get_tls_tx_context(struct tls_context *tls_ctx) -{ - BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_tx) > - TLS_OFFLOAD_CONTEXT_SIZE_TX); - return container_of(tls_offload_ctx_tx(tls_ctx), - struct mlx5e_tls_offload_context_tx, - base); -} - -struct mlx5e_tls_offload_context_rx { - struct tls_offload_context_rx base; - __be32 handle; -}; - -static inline struct mlx5e_tls_offload_context_rx * -mlx5e_get_tls_rx_context(struct tls_context *tls_ctx) -{ - BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_rx) > - TLS_OFFLOAD_CONTEXT_SIZE_RX); - return container_of(tls_offload_ctx_rx(tls_ctx), - struct mlx5e_tls_offload_context_rx, - base); -} - -static inline bool mlx5e_is_tls_on(struct mlx5e_priv *priv) -{ - return priv->tls; -} - -void mlx5e_tls_build_netdev(struct mlx5e_priv *priv); -int mlx5e_tls_init(struct mlx5e_priv *priv); -void mlx5e_tls_cleanup(struct mlx5e_priv *priv); - -int mlx5e_tls_get_count(struct mlx5e_priv *priv); -int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data); -int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data); - -static inline bool mlx5e_accel_is_tls_device(struct mlx5_core_dev *mdev) -{ - return !is_kdump_kernel() && - mlx5_accel_is_tls_device(mdev); -} - -#else - -static inline void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) -{ - if (!is_kdump_kernel() && - mlx5_accel_is_ktls_device(priv->mdev)) - mlx5e_ktls_build_netdev(priv); -} - -static inline bool mlx5e_is_tls_on(struct mlx5e_priv *priv) { return false; } -static inline int mlx5e_tls_init(struct mlx5e_priv *priv) { return 0; } -static inline void mlx5e_tls_cleanup(struct mlx5e_priv *priv) { } -static inline int mlx5e_tls_get_count(struct mlx5e_priv *priv) { return 0; } -static inline int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) { return 0; } -static inline int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) { return 0; } -static inline bool mlx5e_accel_is_tls_device(struct mlx5_core_dev *mdev) { return false; } - -#endif - -#endif /* __MLX5E_TLS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c deleted file mode 100644 index a05580cea481..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +++ /dev/null @@ -1,390 +0,0 @@ -/* - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#include "en_accel/tls.h" -#include "en_accel/tls_rxtx.h" -#include "accel/accel.h" - -#include <net/inet6_hashtables.h> -#include <linux/ipv6.h> - -#define SYNDROM_DECRYPTED 0x30 -#define SYNDROM_RESYNC_REQUEST 0x31 -#define SYNDROM_AUTH_FAILED 0x32 - -#define SYNDROME_OFFLOAD_REQUIRED 32 -#define SYNDROME_SYNC 33 - -struct sync_info { - u64 rcd_sn; - s32 sync_len; - int nr_frags; - skb_frag_t frags[MAX_SKB_FRAGS]; -}; - -struct recv_metadata_content { - u8 syndrome; - u8 reserved; - __be32 sync_seq; -} __packed; - -struct send_metadata_content { - /* One byte of syndrome followed by 3 bytes of swid */ - __be32 syndrome_swid; - __be16 first_seq; -} __packed; - -struct mlx5e_tls_metadata { - union { - /* from fpga to host */ - struct recv_metadata_content recv; - /* from host to fpga */ - struct send_metadata_content send; - unsigned char raw[6]; - } __packed content; - /* packet type ID field */ - __be16 ethertype; -} __packed; - -static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid) -{ - struct mlx5e_tls_metadata *pet; - struct ethhdr *eth; - - if (skb_cow_head(skb, sizeof(struct mlx5e_tls_metadata))) - return -ENOMEM; - - eth = (struct ethhdr *)skb_push(skb, sizeof(struct mlx5e_tls_metadata)); - skb->mac_header -= sizeof(struct mlx5e_tls_metadata); - pet = (struct mlx5e_tls_metadata *)(eth + 1); - - memmove(skb->data, skb->data + sizeof(struct mlx5e_tls_metadata), - 2 * ETH_ALEN); - - eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE); - pet->content.send.syndrome_swid = - htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid; - - return 0; -} - -static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context_tx *context, - u32 tcp_seq, struct sync_info *info) -{ - int remaining, i = 0, ret = -EINVAL; - struct tls_record_info *record; - unsigned long flags; - s32 sync_size; - - spin_lock_irqsave(&context->base.lock, flags); - record = tls_get_record(&context->base, tcp_seq, &info->rcd_sn); - - if (unlikely(!record)) - goto out; - - sync_size = tcp_seq - tls_record_start_seq(record); - info->sync_len = sync_size; - if (unlikely(sync_size < 0)) { - if (tls_record_is_start_marker(record)) - goto done; - - goto out; - } - - remaining = sync_size; - while (remaining > 0) { - info->frags[i] = record->frags[i]; - __skb_frag_ref(&info->frags[i]); - remaining -= skb_frag_size(&info->frags[i]); - - if (remaining < 0) - skb_frag_size_add(&info->frags[i], remaining); - - i++; - } - info->nr_frags = i; -done: - ret = 0; -out: - spin_unlock_irqrestore(&context->base.lock, flags); - return ret; -} - -static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb, - struct sk_buff *nskb, u32 tcp_seq, - int headln, __be64 rcd_sn) -{ - struct mlx5e_tls_metadata *pet; - u8 syndrome = SYNDROME_SYNC; - struct iphdr *iph; - struct tcphdr *th; - int data_len, mss; - - nskb->dev = skb->dev; - skb_reset_mac_header(nskb); - skb_set_network_header(nskb, skb_network_offset(skb)); - skb_set_transport_header(nskb, skb_transport_offset(skb)); - memcpy(nskb->data, skb->data, headln); - memcpy(nskb->data + headln, &rcd_sn, sizeof(rcd_sn)); - - iph = ip_hdr(nskb); - iph->tot_len = htons(nskb->len - skb_network_offset(nskb)); - th = tcp_hdr(nskb); - data_len = nskb->len - headln; - tcp_seq -= data_len; - th->seq = htonl(tcp_seq); - - mss = nskb->dev->mtu - (headln - skb_network_offset(nskb)); - skb_shinfo(nskb)->gso_size = 0; - if (data_len > mss) { - skb_shinfo(nskb)->gso_size = mss; - skb_shinfo(nskb)->gso_segs = DIV_ROUND_UP(data_len, mss); - } - skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; - - pet = (struct mlx5e_tls_metadata *)(nskb->data + sizeof(struct ethhdr)); - memcpy(pet, &syndrome, sizeof(syndrome)); - pet->content.send.first_seq = htons(tcp_seq); - - /* MLX5 devices don't care about the checksum partial start, offset - * and pseudo header - */ - nskb->ip_summed = CHECKSUM_PARTIAL; - - nskb->queue_mapping = skb->queue_mapping; -} - -static bool mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context, - struct mlx5e_txqsq *sq, struct sk_buff *skb, - struct mlx5e_tls *tls) -{ - u32 tcp_seq = ntohl(tcp_hdr(skb)->seq); - struct sync_info info; - struct sk_buff *nskb; - int linear_len = 0; - int headln; - int i; - - sq->stats->tls_ooo++; - - if (mlx5e_tls_get_sync_data(context, tcp_seq, &info)) { - /* We might get here if a retransmission reaches the driver - * after the relevant record is acked. - * It should be safe to drop the packet in this case - */ - atomic64_inc(&tls->sw_stats.tx_tls_drop_no_sync_data); - goto err_out; - } - - if (unlikely(info.sync_len < 0)) { - u32 payload; - - headln = skb_transport_offset(skb) + tcp_hdrlen(skb); - payload = skb->len - headln; - if (likely(payload <= -info.sync_len)) - /* SKB payload doesn't require offload - */ - return true; - - atomic64_inc(&tls->sw_stats.tx_tls_drop_bypass_required); - goto err_out; - } - - if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) { - atomic64_inc(&tls->sw_stats.tx_tls_drop_metadata); - goto err_out; - } - - headln = skb_transport_offset(skb) + tcp_hdrlen(skb); - linear_len += headln + sizeof(info.rcd_sn); - nskb = alloc_skb(linear_len, GFP_ATOMIC); - if (unlikely(!nskb)) { - atomic64_inc(&tls->sw_stats.tx_tls_drop_resync_alloc); - goto err_out; - } - - context->expected_seq = tcp_seq + skb->len - headln; - skb_put(nskb, linear_len); - for (i = 0; i < info.nr_frags; i++) - skb_shinfo(nskb)->frags[i] = info.frags[i]; - - skb_shinfo(nskb)->nr_frags = info.nr_frags; - nskb->data_len = info.sync_len; - nskb->len += info.sync_len; - sq->stats->tls_resync_bytes += nskb->len; - mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln, - cpu_to_be64(info.rcd_sn)); - mlx5e_sq_xmit_simple(sq, nskb, true); - - return true; - -err_out: - dev_kfree_skb_any(skb); - return false; -} - -bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, - struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state) -{ - struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5e_tls_offload_context_tx *context; - struct tls_context *tls_ctx; - u32 expected_seq; - int datalen; - u32 skb_seq; - - datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)); - if (!datalen) - return true; - - mlx5e_tx_mpwqe_ensure_complete(sq); - - tls_ctx = tls_get_ctx(skb->sk); - if (WARN_ON_ONCE(tls_ctx->netdev != netdev)) - goto err_out; - - if (mlx5e_accel_is_ktls_tx(sq->mdev)) - return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state); - - /* FPGA */ - skb_seq = ntohl(tcp_hdr(skb)->seq); - context = mlx5e_get_tls_tx_context(tls_ctx); - expected_seq = context->expected_seq; - - if (unlikely(expected_seq != skb_seq)) - return mlx5e_tls_handle_ooo(context, sq, skb, priv->tls); - - if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) { - atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata); - dev_kfree_skb_any(skb); - return false; - } - - context->expected_seq = skb_seq + datalen; - return true; - -err_out: - dev_kfree_skb_any(skb); - return false; -} - -static int tls_update_resync_sn(struct net_device *netdev, - struct sk_buff *skb, - struct mlx5e_tls_metadata *mdata) -{ - struct sock *sk = NULL; - struct iphdr *iph; - struct tcphdr *th; - __be32 seq; - - if (mdata->ethertype != htons(ETH_P_IP)) - return -EINVAL; - - iph = (struct iphdr *)(mdata + 1); - - th = ((void *)iph) + iph->ihl * 4; - - if (iph->version == 4) { - sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo, - iph->saddr, th->source, iph->daddr, - th->dest, netdev->ifindex); -#if IS_ENABLED(CONFIG_IPV6) - } else { - struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph; - - sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo, - &ipv6h->saddr, th->source, - &ipv6h->daddr, ntohs(th->dest), - netdev->ifindex, 0); -#endif - } - if (!sk || sk->sk_state == TCP_TIME_WAIT) { - struct mlx5e_priv *priv = netdev_priv(netdev); - - atomic64_inc(&priv->tls->sw_stats.rx_tls_drop_resync_request); - goto out; - } - - skb->sk = sk; - skb->destructor = sock_edemux; - - memcpy(&seq, &mdata->content.recv.sync_seq, sizeof(seq)); - tls_offload_rx_resync_request(sk, seq); -out: - return 0; -} - -/* FPGA tls rx handler */ -void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb, - u32 *cqe_bcnt) -{ - struct mlx5e_tls_metadata *mdata; - struct mlx5e_priv *priv; - - /* Use the metadata */ - mdata = (struct mlx5e_tls_metadata *)(skb->data + ETH_HLEN); - switch (mdata->content.recv.syndrome) { - case SYNDROM_DECRYPTED: - skb->decrypted = 1; - break; - case SYNDROM_RESYNC_REQUEST: - tls_update_resync_sn(rq->netdev, skb, mdata); - priv = netdev_priv(rq->netdev); - atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_request); - break; - case SYNDROM_AUTH_FAILED: - /* Authentication failure will be observed and verified by kTLS */ - priv = netdev_priv(rq->netdev); - atomic64_inc(&priv->tls->sw_stats.rx_tls_auth_fail); - break; - default: - /* Bypass the metadata header to others */ - return; - } - - remove_metadata_hdr(skb); - *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN; -} - -u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params) -{ - if (!mlx5e_accel_is_tls_device(mdev)) - return 0; - - if (mlx5e_accel_is_ktls_device(mdev)) - return mlx5e_ktls_get_stop_room(mdev, params); - - /* FPGA */ - /* Resync SKB. */ - return mlx5e_stop_room_for_max_wqe(mdev); -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h deleted file mode 100644 index 0ca0a023fb8d..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#ifndef __MLX5E_TLS_RXTX_H__ -#define __MLX5E_TLS_RXTX_H__ - -#include "accel/accel.h" -#include "en_accel/ktls_txrx.h" - -#ifdef CONFIG_MLX5_EN_TLS - -#include <linux/skbuff.h> -#include "en.h" -#include "en/txrx.h" - -u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params); - -bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, - struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state); - -static inline bool mlx5e_tls_skb_offloaded(struct sk_buff *skb) -{ - return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk); -} - -static inline void -mlx5e_tls_handle_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg, - struct mlx5e_accel_tx_tls_state *state) -{ - cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8); -} - -void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb, - u32 *cqe_bcnt); - -static inline void -mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, - struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) -{ - if (unlikely(get_cqe_tls_offload(cqe))) /* cqe bit indicates a TLS device */ - return mlx5e_ktls_handle_rx_skb(rq, skb, cqe, cqe_bcnt); - - if (unlikely(test_bit(MLX5E_RQ_STATE_FPGA_TLS, &rq->state) && is_metadata_hdr_valid(skb))) - return mlx5e_tls_handle_rx_skb_metadata(rq, skb, cqe_bcnt); -} - -#else - -static inline bool -mlx5e_accel_is_tls(struct mlx5_cqe64 *cqe, struct sk_buff *skb) { return false; } -static inline void -mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, - struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) {} -static inline u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params) -{ - return 0; -} - -#endif /* CONFIG_MLX5_EN_TLS */ - -#endif /* __MLX5E_TLS_RXTX_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index d659fe07d464..6435517c0bee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -1026,15 +1026,6 @@ void mlx5e_dcbnl_build_netdev(struct net_device *netdev) netdev->dcbnl_ops = &mlx5e_dcbnl_ops; } -void mlx5e_dcbnl_build_rep_netdev(struct net_device *netdev) -{ - struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5_core_dev *mdev = priv->mdev; - - if (MLX5_CAP_GEN(mdev, qos)) - netdev->dcbnl_ops = &mlx5e_dcbnl_ops; -} - static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv, enum mlx5_dcbx_oper_mode *mode) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 2f1dedc721d1..12b72a0bcb1a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -47,9 +47,8 @@ #include "en_rep.h" #include "en_accel/ipsec.h" #include "en_accel/en_accel.h" -#include "en_accel/tls.h" -#include "accel/ipsec.h" -#include "accel/tls.h" +#include "en_accel/ktls.h" +#include "en_accel/ipsec_offload.h" #include "lib/vxlan.h" #include "lib/clock.h" #include "en/port.h" @@ -68,7 +67,6 @@ #include "en/ptp.h" #include "qos.h" #include "en/trap.h" -#include "fpga/ipsec.h" bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) { @@ -1036,9 +1034,6 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param, if (err) goto err_destroy_rq; - if (mlx5e_is_tls_on(rq->priv) && !mlx5e_accel_is_ktls_device(mdev)) - __set_bit(MLX5E_RQ_STATE_FPGA_TLS, &rq->state); /* must be FPGA */ - if (MLX5_CAP_ETH(mdev, cqe_checksum_full)) __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state); @@ -1334,7 +1329,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work); if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert)) set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state); - if (MLX5_IPSEC_DEV(c->priv->mdev)) + if (mlx5_ipsec_device_caps(c->priv->mdev)) set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); if (param->is_mpw) set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state); @@ -4471,12 +4466,6 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog) return -EINVAL; } - if (mlx5_fpga_is_ipsec_device(priv->mdev)) { - netdev_warn(netdev, - "XDP is not available on Innova cards with IPsec support\n"); - return -EINVAL; - } - new_params = priv->channels.params; new_params.xdp_prog = prog; @@ -4934,7 +4923,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) mlx5e_set_netdev_dev_addr(netdev); mlx5e_ipsec_build_netdev(priv); - mlx5e_tls_build_netdev(priv); + mlx5e_ktls_build_netdev(priv); } void mlx5e_create_q_counters(struct mlx5e_priv *priv) @@ -4996,7 +4985,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, if (err) mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err); - err = mlx5e_tls_init(priv); + err = mlx5e_ktls_init(priv); if (err) mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); @@ -5007,7 +4996,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) { mlx5e_health_destroy_reporters(priv); - mlx5e_tls_cleanup(priv); + mlx5e_ktls_cleanup(priv); mlx5e_ipsec_cleanup(priv); mlx5e_fs_cleanup(priv); } @@ -5704,7 +5693,6 @@ int mlx5e_init(void) { int ret; - mlx5e_ipsec_build_inverse_table(); mlx5e_build_ptys2ethtool_map(); ret = auxiliary_driver_register(&mlx5e_driver); if (ret) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 6b7e7ea6ded2..47f7b4c034cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1112,7 +1112,6 @@ static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = { &MLX5E_STATS_GRP(per_port_buff_congest), #ifdef CONFIG_MLX5_EN_IPSEC &MLX5E_STATS_GRP(ipsec_sw), - &MLX5E_STATS_GRP(ipsec_hw), #endif &MLX5E_STATS_GRP(ptp), }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 56bb58704bf9..2dea9e4649a6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -48,10 +48,9 @@ #include "en_rep.h" #include "en/rep/tc.h" #include "ipoib/ipoib.h" -#include "accel/ipsec.h" -#include "fpga/ipsec.h" +#include "en_accel/ipsec_offload.h" #include "en_accel/ipsec_rxtx.h" -#include "en_accel/tls_rxtx.h" +#include "en_accel/ktls_txrx.h" #include "en/xdp.h" #include "en/xsk/rx.h" #include "en/health.h" @@ -1416,7 +1415,8 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, skb->mac_len = ETH_HLEN; - mlx5e_tls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt); + if (unlikely(get_cqe_tls_offload(cqe))) + mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt); if (unlikely(mlx5_ipsec_is_rx_flow(cqe))) mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe); @@ -1521,8 +1521,8 @@ static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom, } static struct sk_buff * -mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, - struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) +mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, + u32 cqe_bcnt) { struct mlx5e_dma_info *di = wi->di; u16 rx_headroom = rq->buff.headroom; @@ -1565,8 +1565,8 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, } static struct sk_buff * -mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, - struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) +mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, + u32 cqe_bcnt) { struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; struct mlx5e_wqe_frag_info *head_wi = wi; @@ -1709,7 +1709,7 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, mlx5e_skb_from_cqe_linear, mlx5e_skb_from_cqe_nonlinear, - rq, cqe, wi, cqe_bcnt); + rq, wi, cqe_bcnt); if (!skb) { /* probably for XDP */ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { @@ -1762,7 +1762,7 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, mlx5e_skb_from_cqe_linear, mlx5e_skb_from_cqe_nonlinear, - rq, cqe, wi, cqe_bcnt); + rq, wi, cqe_bcnt); if (!skb) { /* probably for XDP */ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { @@ -2361,7 +2361,7 @@ static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, mlx5e_skb_from_cqe_linear, mlx5e_skb_from_cqe_nonlinear, - rq, cqe, wi, cqe_bcnt); + rq, wi, cqe_bcnt); if (!skb) goto wq_free_wqe; @@ -2383,46 +2383,6 @@ const struct mlx5e_rx_handlers mlx5i_rx_handlers = { }; #endif /* CONFIG_MLX5_CORE_IPOIB */ -#ifdef CONFIG_MLX5_EN_IPSEC - -static void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) -{ - struct mlx5_wq_cyc *wq = &rq->wqe.wq; - struct mlx5e_wqe_frag_info *wi; - struct sk_buff *skb; - u32 cqe_bcnt; - u16 ci; - - ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); - wi = get_frag(rq, ci); - cqe_bcnt = be32_to_cpu(cqe->byte_cnt); - - if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { - rq->stats->wqe_err++; - goto wq_free_wqe; - } - - skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, - mlx5e_skb_from_cqe_linear, - mlx5e_skb_from_cqe_nonlinear, - rq, cqe, wi, cqe_bcnt); - if (unlikely(!skb)) /* a DROP, save the page-reuse checks */ - goto wq_free_wqe; - - skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt); - if (unlikely(!skb)) - goto wq_free_wqe; - - mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); - napi_gro_receive(rq->cq.napi, skb); - -wq_free_wqe: - mlx5e_free_rx_wqe(rq, wi, true); - mlx5_wq_cyc_pop(wq); -} - -#endif /* CONFIG_MLX5_EN_IPSEC */ - int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk) { struct net_device *netdev = rq->netdev; @@ -2439,10 +2399,6 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool rq->post_wqes = mlx5e_post_rx_mpwqes; rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; - if (mlx5_fpga_is_ipsec_device(mdev)) { - netdev_err(netdev, "MPWQE RQ with Innova IPSec offload not supported\n"); - return -EINVAL; - } if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) { rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe_shampo; if (!rq->handle_rx_cqe) { @@ -2466,14 +2422,7 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool mlx5e_skb_from_cqe_nonlinear; rq->post_wqes = mlx5e_post_rx_wqes; rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; - -#ifdef CONFIG_MLX5_EN_IPSEC - if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) && - priv->ipsec) - rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe; - else -#endif - rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe; + rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe; if (!rq->handle_rx_cqe) { netdev_err(netdev, "RX handler of RQ is not set\n"); return -EINVAL; @@ -2504,7 +2453,7 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe goto free_wqe; } - skb = mlx5e_skb_from_cqe_nonlinear(rq, cqe, wi, cqe_bcnt); + skb = mlx5e_skb_from_cqe_nonlinear(rq, wi, cqe_bcnt); if (!skb) goto free_wqe; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index bdc870f9c2f3..57fa0489eeb8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -32,7 +32,7 @@ #include "lib/mlx5.h" #include "en.h" -#include "en_accel/tls.h" +#include "en_accel/ktls.h" #include "en_accel/en_accel.h" #include "en/ptp.h" #include "en/port.h" @@ -1900,17 +1900,17 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; } static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls) { - return mlx5e_tls_get_count(priv); + return mlx5e_ktls_get_count(priv); } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls) { - return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN); + return idx + mlx5e_ktls_get_strings(priv, data + idx * ETH_GSTRING_LEN); } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls) { - return idx + mlx5e_tls_get_stats(priv, data + idx); + return idx + mlx5e_ktls_get_stats(priv, data + idx); } static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; } @@ -2443,7 +2443,6 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = { &MLX5E_STATS_GRP(pme), #ifdef CONFIG_MLX5_EN_IPSEC &MLX5E_STATS_GRP(ipsec_sw), - &MLX5E_STATS_GRP(ipsec_hw), #endif &MLX5E_STATS_GRP(tls), &MLX5E_STATS_GRP(channels), diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index a7a025d15c14..e48b15b55b6f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -482,7 +482,6 @@ extern MLX5E_DECLARE_STATS_GRP(per_prio); extern MLX5E_DECLARE_STATS_GRP(pme); extern MLX5E_DECLARE_STATS_GRP(channels); extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest); -extern MLX5E_DECLARE_STATS_GRP(ipsec_hw); extern MLX5E_DECLARE_STATS_GRP(ipsec_sw); extern MLX5E_DECLARE_STATS_GRP(ptp); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 458ec0bca1b8..25f2d2717aaa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1582,6 +1582,9 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC; else esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; + if (MLX5_ESWITCH_MANAGER(dev) && + mlx5_esw_vport_match_metadata_supported(esw)) + esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA; dev->priv.eswitch = esw; BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h index 2a984e82ae16..750c32050165 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h @@ -57,9 +57,6 @@ struct mlx5_fpga_device { u32 mkey; struct mlx5_uars_page *uar; } conn_res; - - struct mlx5_fpga_ipsec *ipsec; - struct mlx5_fpga_tls *tls; }; #define mlx5_fpga_dbg(__adev, format, ...) \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c deleted file mode 100644 index 8ec148010d62..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ /dev/null @@ -1,1582 +0,0 @@ -/* - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#include <linux/rhashtable.h> -#include <linux/mlx5/driver.h> -#include <linux/mlx5/fs_helpers.h> -#include <linux/mlx5/fs.h> -#include <linux/rbtree.h> - -#include "mlx5_core.h" -#include "fs_cmd.h" -#include "fpga/ipsec.h" -#include "fpga/sdk.h" -#include "fpga/core.h" - -enum mlx5_fpga_ipsec_cmd_status { - MLX5_FPGA_IPSEC_CMD_PENDING, - MLX5_FPGA_IPSEC_CMD_SEND_FAIL, - MLX5_FPGA_IPSEC_CMD_COMPLETE, -}; - -struct mlx5_fpga_ipsec_cmd_context { - struct mlx5_fpga_dma_buf buf; - enum mlx5_fpga_ipsec_cmd_status status; - struct mlx5_ifc_fpga_ipsec_cmd_resp resp; - int status_code; - struct completion complete; - struct mlx5_fpga_device *dev; - struct list_head list; /* Item in pending_cmds */ - u8 command[]; -}; - -struct mlx5_fpga_esp_xfrm; - -struct mlx5_fpga_ipsec_sa_ctx { - struct rhash_head hash; - struct mlx5_ifc_fpga_ipsec_sa hw_sa; - u32 sa_handle; - struct mlx5_core_dev *dev; - struct mlx5_fpga_esp_xfrm *fpga_xfrm; -}; - -struct mlx5_fpga_esp_xfrm { - unsigned int num_rules; - struct mlx5_fpga_ipsec_sa_ctx *sa_ctx; - struct mutex lock; /* xfrm lock */ - struct mlx5_accel_esp_xfrm accel_xfrm; -}; - -struct mlx5_fpga_ipsec_rule { - struct rb_node node; - struct fs_fte *fte; - struct mlx5_fpga_ipsec_sa_ctx *ctx; -}; - -static const struct rhashtable_params rhash_sa = { - /* Keep out "cmd" field from the key as it's - * value is not constant during the lifetime - * of the key object. - */ - .key_len = sizeof_field(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) - - sizeof_field(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd), - .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) + - sizeof_field(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd), - .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash), - .automatic_shrinking = true, - .min_size = 1, -}; - -struct mlx5_fpga_ipsec { - struct mlx5_fpga_device *fdev; - struct list_head pending_cmds; - spinlock_t pending_cmds_lock; /* Protects pending_cmds */ - u32 caps[MLX5_ST_SZ_DW(ipsec_extended_cap)]; - struct mlx5_fpga_conn *conn; - - struct notifier_block fs_notifier_ingress_bypass; - struct notifier_block fs_notifier_egress; - - /* Map hardware SA --> SA context - * (mlx5_fpga_ipsec_sa) (mlx5_fpga_ipsec_sa_ctx) - * We will use this hash to avoid SAs duplication in fpga which - * aren't allowed - */ - struct rhashtable sa_hash; /* hw_sa -> mlx5_fpga_ipsec_sa_ctx */ - struct mutex sa_hash_lock; - - /* Tree holding all rules for this fpga device - * Key for searching a rule (mlx5_fpga_ipsec_rule) is (ft, id) - */ - struct rb_root rules_rb; - struct mutex rules_rb_lock; /* rules lock */ - - struct ida halloc; -}; - -bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev) -{ - if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga)) - return false; - - if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) != - MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX) - return false; - - if (MLX5_CAP_FPGA(mdev, sandbox_product_id) != - MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC) - return false; - - return true; -} - -static void mlx5_fpga_ipsec_send_complete(struct mlx5_fpga_conn *conn, - struct mlx5_fpga_device *fdev, - struct mlx5_fpga_dma_buf *buf, - u8 status) -{ - struct mlx5_fpga_ipsec_cmd_context *context; - - if (status) { - context = container_of(buf, struct mlx5_fpga_ipsec_cmd_context, - buf); - mlx5_fpga_warn(fdev, "IPSec command send failed with status %u\n", - status); - context->status = MLX5_FPGA_IPSEC_CMD_SEND_FAIL; - complete(&context->complete); - } -} - -static inline -int syndrome_to_errno(enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome) -{ - switch (syndrome) { - case MLX5_FPGA_IPSEC_RESPONSE_SUCCESS: - return 0; - case MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE: - return -EEXIST; - case MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST: - return -EINVAL; - case MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE: - return -EIO; - } - return -EIO; -} - -static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf) -{ - struct mlx5_ifc_fpga_ipsec_cmd_resp *resp = buf->sg[0].data; - struct mlx5_fpga_ipsec_cmd_context *context; - enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome; - struct mlx5_fpga_device *fdev = cb_arg; - unsigned long flags; - - if (buf->sg[0].size < sizeof(*resp)) { - mlx5_fpga_warn(fdev, "Short receive from FPGA IPSec: %u < %zu bytes\n", - buf->sg[0].size, sizeof(*resp)); - return; - } - - mlx5_fpga_dbg(fdev, "mlx5_ipsec recv_cb syndrome %08x\n", - ntohl(resp->syndrome)); - - spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags); - context = list_first_entry_or_null(&fdev->ipsec->pending_cmds, - struct mlx5_fpga_ipsec_cmd_context, - list); - if (context) - list_del(&context->list); - spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags); - - if (!context) { - mlx5_fpga_warn(fdev, "Received IPSec offload response without pending command request\n"); - return; - } - mlx5_fpga_dbg(fdev, "Handling response for %p\n", context); - - syndrome = ntohl(resp->syndrome); - context->status_code = syndrome_to_errno(syndrome); - context->status = MLX5_FPGA_IPSEC_CMD_COMPLETE; - memcpy(&context->resp, resp, sizeof(*resp)); - - if (context->status_code) - mlx5_fpga_warn(fdev, "IPSec command failed with syndrome %08x\n", - syndrome); - - complete(&context->complete); -} - -static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev, - const void *cmd, int cmd_size) -{ - struct mlx5_fpga_ipsec_cmd_context *context; - struct mlx5_fpga_device *fdev = mdev->fpga; - unsigned long flags; - int res; - - if (!fdev || !fdev->ipsec) - return ERR_PTR(-EOPNOTSUPP); - - if (cmd_size & 3) - return ERR_PTR(-EINVAL); - - context = kzalloc(sizeof(*context) + cmd_size, GFP_ATOMIC); - if (!context) - return ERR_PTR(-ENOMEM); - - context->status = MLX5_FPGA_IPSEC_CMD_PENDING; - context->dev = fdev; - context->buf.complete = mlx5_fpga_ipsec_send_complete; - init_completion(&context->complete); - memcpy(&context->command, cmd, cmd_size); - context->buf.sg[0].size = cmd_size; - context->buf.sg[0].data = &context->command; - - spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags); - res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf); - if (!res) - list_add_tail(&context->list, &fdev->ipsec->pending_cmds); - spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags); - - if (res) { - mlx5_fpga_warn(fdev, "Failed to send IPSec command: %d\n", res); - kfree(context); - return ERR_PTR(res); - } - - /* Context should be freed by the caller after completion. */ - return context; -} - -static int mlx5_fpga_ipsec_cmd_wait(void *ctx) -{ - struct mlx5_fpga_ipsec_cmd_context *context = ctx; - unsigned long timeout = - msecs_to_jiffies(MLX5_FPGA_CMD_TIMEOUT_MSEC); - int res; - - res = wait_for_completion_timeout(&context->complete, timeout); - if (!res) { - mlx5_fpga_warn(context->dev, "Failure waiting for IPSec command response\n"); - return -ETIMEDOUT; - } - - if (context->status == MLX5_FPGA_IPSEC_CMD_COMPLETE) - res = context->status_code; - else - res = -EIO; - - return res; -} - -static inline bool is_v2_sadb_supported(struct mlx5_fpga_ipsec *fipsec) -{ - if (MLX5_GET(ipsec_extended_cap, fipsec->caps, v2_command)) - return true; - return false; -} - -static int mlx5_fpga_ipsec_update_hw_sa(struct mlx5_fpga_device *fdev, - struct mlx5_ifc_fpga_ipsec_sa *hw_sa, - int opcode) -{ - struct mlx5_core_dev *dev = fdev->mdev; - struct mlx5_ifc_fpga_ipsec_sa *sa; - struct mlx5_fpga_ipsec_cmd_context *cmd_context; - size_t sa_cmd_size; - int err; - - hw_sa->ipsec_sa_v1.cmd = htonl(opcode); - if (is_v2_sadb_supported(fdev->ipsec)) - sa_cmd_size = sizeof(*hw_sa); - else - sa_cmd_size = sizeof(hw_sa->ipsec_sa_v1); - - cmd_context = (struct mlx5_fpga_ipsec_cmd_context *) - mlx5_fpga_ipsec_cmd_exec(dev, hw_sa, sa_cmd_size); - if (IS_ERR(cmd_context)) - return PTR_ERR(cmd_context); - - err = mlx5_fpga_ipsec_cmd_wait(cmd_context); - if (err) - goto out; - - sa = (struct mlx5_ifc_fpga_ipsec_sa *)&cmd_context->command; - if (sa->ipsec_sa_v1.sw_sa_handle != cmd_context->resp.sw_sa_handle) { - mlx5_fpga_err(fdev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n", - ntohl(sa->ipsec_sa_v1.sw_sa_handle), - ntohl(cmd_context->resp.sw_sa_handle)); - err = -EIO; - } - -out: - kfree(cmd_context); - return err; -} - -u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) -{ - struct mlx5_fpga_device *fdev = mdev->fpga; - u32 ret = 0; - - if (mlx5_fpga_is_ipsec_device(mdev)) { - ret |= MLX5_ACCEL_IPSEC_CAP_DEVICE; - ret |= MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA; - } else { - return ret; - } - - if (!fdev->ipsec) - return ret; - - if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esp)) - ret |= MLX5_ACCEL_IPSEC_CAP_ESP; - - if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, ipv6)) - ret |= MLX5_ACCEL_IPSEC_CAP_IPV6; - - if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, lso)) - ret |= MLX5_ACCEL_IPSEC_CAP_LSO; - - if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, rx_no_trailer)) - ret |= MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER; - - if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esn)) { - ret |= MLX5_ACCEL_IPSEC_CAP_ESN; - ret |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN; - } - - return ret; -} - -static unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev) -{ - struct mlx5_fpga_device *fdev = mdev->fpga; - - if (!fdev || !fdev->ipsec) - return 0; - - return MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, - number_of_ipsec_counters); -} - -static int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, - unsigned int counters_count) -{ - struct mlx5_fpga_device *fdev = mdev->fpga; - unsigned int i; - __be32 *data; - u32 count; - u64 addr; - int ret; - - if (!fdev || !fdev->ipsec) - return 0; - - addr = (u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, - ipsec_counters_addr_low) + - ((u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, - ipsec_counters_addr_high) << 32); - - count = mlx5_fpga_ipsec_counters_count(mdev); - - data = kzalloc(array3_size(sizeof(*data), count, 2), GFP_KERNEL); - if (!data) { - ret = -ENOMEM; - goto out; - } - - ret = mlx5_fpga_mem_read(fdev, count * sizeof(u64), addr, data, - MLX5_FPGA_ACCESS_TYPE_DONTCARE); - if (ret < 0) { - mlx5_fpga_err(fdev, "Failed to read IPSec counters from HW: %d\n", - ret); - goto out; - } - ret = 0; - - if (count > counters_count) - count = counters_count; - - /* Each counter is low word, then high. But each word is big-endian */ - for (i = 0; i < count; i++) - counters[i] = (u64)ntohl(data[i * 2]) | - ((u64)ntohl(data[i * 2 + 1]) << 32); - -out: - kfree(data); - return ret; -} - -static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags) -{ - struct mlx5_fpga_ipsec_cmd_context *context; - struct mlx5_ifc_fpga_ipsec_cmd_cap cmd = {0}; - int err; - - cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP); - cmd.flags = htonl(flags); - context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd)); - if (IS_ERR(context)) - return PTR_ERR(context); - - err = mlx5_fpga_ipsec_cmd_wait(context); - if (err) - goto out; - - if ((context->resp.flags & cmd.flags) != cmd.flags) { - mlx5_fpga_err(context->dev, "Failed to set capabilities. cmd 0x%08x vs resp 0x%08x\n", - cmd.flags, - context->resp.flags); - err = -EIO; - } - -out: - kfree(context); - return err; -} - -static int mlx5_fpga_ipsec_enable_supported_caps(struct mlx5_core_dev *mdev) -{ - u32 dev_caps = mlx5_fpga_ipsec_device_caps(mdev); - u32 flags = 0; - - if (dev_caps & MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER) - flags |= MLX5_FPGA_IPSEC_CAP_NO_TRAILER; - - return mlx5_fpga_ipsec_set_caps(mdev, flags); -} - -static void -mlx5_fpga_ipsec_build_hw_xfrm(struct mlx5_core_dev *mdev, - const struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs, - struct mlx5_ifc_fpga_ipsec_sa *hw_sa) -{ - const struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm; - - /* key */ - memcpy(&hw_sa->ipsec_sa_v1.key_enc, aes_gcm->aes_key, - aes_gcm->key_len / 8); - /* Duplicate 128 bit key twice according to HW layout */ - if (aes_gcm->key_len == 128) - memcpy(&hw_sa->ipsec_sa_v1.key_enc[16], - aes_gcm->aes_key, aes_gcm->key_len / 8); - - /* salt and seq_iv */ - memcpy(&hw_sa->ipsec_sa_v1.gcm.salt_iv, &aes_gcm->seq_iv, - sizeof(aes_gcm->seq_iv)); - memcpy(&hw_sa->ipsec_sa_v1.gcm.salt, &aes_gcm->salt, - sizeof(aes_gcm->salt)); - - /* esn */ - if (xfrm_attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) { - hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_ESN_EN; - hw_sa->ipsec_sa_v1.flags |= - (xfrm_attrs->flags & - MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ? - MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0; - hw_sa->esn = htonl(xfrm_attrs->esn); - } else { - hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_ESN_EN; - hw_sa->ipsec_sa_v1.flags &= - ~(xfrm_attrs->flags & - MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ? - MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0; - hw_sa->esn = 0; - } - - /* rx handle */ - hw_sa->ipsec_sa_v1.sw_sa_handle = htonl(xfrm_attrs->sa_handle); - - /* enc mode */ - switch (aes_gcm->key_len) { - case 128: - hw_sa->ipsec_sa_v1.enc_mode = - MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128; - break; - case 256: - hw_sa->ipsec_sa_v1.enc_mode = - MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128; - break; - } - - /* flags */ - hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_SA_VALID | - MLX5_FPGA_IPSEC_SA_SPI_EN | - MLX5_FPGA_IPSEC_SA_IP_ESP; - - if (xfrm_attrs->action & MLX5_ACCEL_ESP_ACTION_ENCRYPT) - hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_DIR_SX; - else - hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_DIR_SX; -} - -static void -mlx5_fpga_ipsec_build_hw_sa(struct mlx5_core_dev *mdev, - struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs, - const __be32 saddr[4], - const __be32 daddr[4], - const __be32 spi, bool is_ipv6, - struct mlx5_ifc_fpga_ipsec_sa *hw_sa) -{ - mlx5_fpga_ipsec_build_hw_xfrm(mdev, xfrm_attrs, hw_sa); - - /* IPs */ - memcpy(hw_sa->ipsec_sa_v1.sip, saddr, sizeof(hw_sa->ipsec_sa_v1.sip)); - memcpy(hw_sa->ipsec_sa_v1.dip, daddr, sizeof(hw_sa->ipsec_sa_v1.dip)); - - /* SPI */ - hw_sa->ipsec_sa_v1.spi = spi; - - /* flags */ - if (is_ipv6) - hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_IPV6; -} - -static bool is_full_mask(const void *p, size_t len) -{ - WARN_ON(len % 4); - - return !memchr_inv(p, 0xff, len); -} - -static bool validate_fpga_full_mask(struct mlx5_core_dev *dev, - const u32 *match_c, - const u32 *match_v) -{ - const void *misc_params_c = MLX5_ADDR_OF(fte_match_param, - match_c, - misc_parameters); - const void *headers_c = MLX5_ADDR_OF(fte_match_param, - match_c, - outer_headers); - const void *headers_v = MLX5_ADDR_OF(fte_match_param, - match_v, - outer_headers); - - if (mlx5_fs_is_outer_ipv4_flow(dev, headers_c, headers_v)) { - const void *s_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4, - headers_c, - src_ipv4_src_ipv6.ipv4_layout.ipv4); - const void *d_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4, - headers_c, - dst_ipv4_dst_ipv6.ipv4_layout.ipv4); - - if (!is_full_mask(s_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout, - ipv4)) || - !is_full_mask(d_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout, - ipv4))) - return false; - } else { - const void *s_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4, - headers_c, - src_ipv4_src_ipv6.ipv6_layout.ipv6); - const void *d_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4, - headers_c, - dst_ipv4_dst_ipv6.ipv6_layout.ipv6); - - if (!is_full_mask(s_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout, - ipv6)) || - !is_full_mask(d_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout, - ipv6))) - return false; - } - - if (!is_full_mask(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c, - outer_esp_spi), - MLX5_FLD_SZ_BYTES(fte_match_set_misc, outer_esp_spi))) - return false; - - return true; -} - -static bool mlx5_is_fpga_ipsec_rule(struct mlx5_core_dev *dev, - u8 match_criteria_enable, - const u32 *match_c, - const u32 *match_v) -{ - u32 ipsec_dev_caps = mlx5_fpga_ipsec_device_caps(dev); - bool ipv6_flow; - - ipv6_flow = mlx5_fs_is_outer_ipv6_flow(dev, match_c, match_v); - - if (!(match_criteria_enable & MLX5_MATCH_OUTER_HEADERS) || - mlx5_fs_is_outer_udp_flow(match_c, match_v) || - mlx5_fs_is_outer_tcp_flow(match_c, match_v) || - mlx5_fs_is_vxlan_flow(match_c) || - !(mlx5_fs_is_outer_ipv4_flow(dev, match_c, match_v) || - ipv6_flow)) - return false; - - if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_DEVICE)) - return false; - - if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_ESP) && - mlx5_fs_is_outer_ipsec_flow(match_c)) - return false; - - if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_IPV6) && - ipv6_flow) - return false; - - if (!validate_fpga_full_mask(dev, match_c, match_v)) - return false; - - return true; -} - -static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev, - u8 match_criteria_enable, - const u32 *match_c, - const u32 *match_v, - struct mlx5_flow_act *flow_act, - struct mlx5_flow_context *flow_context) -{ - const void *outer_c = MLX5_ADDR_OF(fte_match_param, match_c, - outer_headers); - bool is_dmac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_47_16) || - MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_15_0); - bool is_smac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_47_16) || - MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_15_0); - int ret; - - ret = mlx5_is_fpga_ipsec_rule(dev, match_criteria_enable, match_c, - match_v); - if (!ret) - return ret; - - if (is_dmac || is_smac || - (match_criteria_enable & - ~(MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS)) || - (flow_act->action & ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_ALLOW)) || - (flow_context->flags & FLOW_CONTEXT_HAS_TAG)) - return false; - - return true; -} - -static void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev, - struct mlx5_accel_esp_xfrm *accel_xfrm, - const __be32 saddr[4], const __be32 daddr[4], - const __be32 spi, bool is_ipv6, u32 *sa_handle) -{ - struct mlx5_fpga_ipsec_sa_ctx *sa_ctx; - struct mlx5_fpga_esp_xfrm *fpga_xfrm = - container_of(accel_xfrm, typeof(*fpga_xfrm), - accel_xfrm); - struct mlx5_fpga_device *fdev = mdev->fpga; - struct mlx5_fpga_ipsec *fipsec = fdev->ipsec; - int opcode, err; - void *context; - - /* alloc SA */ - sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL); - if (!sa_ctx) - return ERR_PTR(-ENOMEM); - - sa_ctx->dev = mdev; - - /* build candidate SA */ - mlx5_fpga_ipsec_build_hw_sa(mdev, &accel_xfrm->attrs, - saddr, daddr, spi, is_ipv6, - &sa_ctx->hw_sa); - - mutex_lock(&fpga_xfrm->lock); - - if (fpga_xfrm->sa_ctx) { /* multiple rules for same accel_xfrm */ - /* all rules must be with same IPs and SPI */ - if (memcmp(&sa_ctx->hw_sa, &fpga_xfrm->sa_ctx->hw_sa, - sizeof(sa_ctx->hw_sa))) { - context = ERR_PTR(-EINVAL); - goto exists; - } - - ++fpga_xfrm->num_rules; - context = fpga_xfrm->sa_ctx; - goto exists; - } - - if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT) { - err = ida_alloc_min(&fipsec->halloc, 1, GFP_KERNEL); - if (err < 0) { - context = ERR_PTR(err); - goto exists; - } - - sa_ctx->sa_handle = err; - if (sa_handle) - *sa_handle = sa_ctx->sa_handle; - } - /* This is unbounded fpga_xfrm, try to add to hash */ - mutex_lock(&fipsec->sa_hash_lock); - - err = rhashtable_lookup_insert_fast(&fipsec->sa_hash, &sa_ctx->hash, - rhash_sa); - if (err) { - /* Can't bound different accel_xfrm to already existing sa_ctx. - * This is because we can't support multiple ketmats for - * same IPs and SPI - */ - context = ERR_PTR(-EEXIST); - goto unlock_hash; - } - - /* Bound accel_xfrm to sa_ctx */ - opcode = is_v2_sadb_supported(fdev->ipsec) ? - MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 : - MLX5_FPGA_IPSEC_CMD_OP_ADD_SA; - err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode); - sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0; - if (err) { - context = ERR_PTR(err); - goto delete_hash; - } - - mutex_unlock(&fipsec->sa_hash_lock); - - ++fpga_xfrm->num_rules; - fpga_xfrm->sa_ctx = sa_ctx; - sa_ctx->fpga_xfrm = fpga_xfrm; - - mutex_unlock(&fpga_xfrm->lock); - - return sa_ctx; - -delete_hash: - WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash, - rhash_sa)); -unlock_hash: - mutex_unlock(&fipsec->sa_hash_lock); - if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT) - ida_free(&fipsec->halloc, sa_ctx->sa_handle); -exists: - mutex_unlock(&fpga_xfrm->lock); - kfree(sa_ctx); - return context; -} - -static void * -mlx5_fpga_ipsec_fs_create_sa_ctx(struct mlx5_core_dev *mdev, - struct fs_fte *fte, - bool is_egress) -{ - struct mlx5_accel_esp_xfrm *accel_xfrm; - __be32 saddr[4], daddr[4], spi; - struct mlx5_flow_group *fg; - bool is_ipv6 = false; - - fs_get_obj(fg, fte->node.parent); - /* validate */ - if (is_egress && - !mlx5_is_fpga_egress_ipsec_rule(mdev, - fg->mask.match_criteria_enable, - fg->mask.match_criteria, - fte->val, - &fte->action, - &fte->flow_context)) - return ERR_PTR(-EINVAL); - else if (!mlx5_is_fpga_ipsec_rule(mdev, - fg->mask.match_criteria_enable, - fg->mask.match_criteria, - fte->val)) - return ERR_PTR(-EINVAL); - - /* get xfrm context */ - accel_xfrm = - (struct mlx5_accel_esp_xfrm *)fte->action.esp_id; - - /* IPs */ - if (mlx5_fs_is_outer_ipv4_flow(mdev, fg->mask.match_criteria, - fte->val)) { - memcpy(&saddr[3], - MLX5_ADDR_OF(fte_match_set_lyr_2_4, - fte->val, - src_ipv4_src_ipv6.ipv4_layout.ipv4), - sizeof(saddr[3])); - memcpy(&daddr[3], - MLX5_ADDR_OF(fte_match_set_lyr_2_4, - fte->val, - dst_ipv4_dst_ipv6.ipv4_layout.ipv4), - sizeof(daddr[3])); - } else { - memcpy(saddr, - MLX5_ADDR_OF(fte_match_param, - fte->val, - outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), - sizeof(saddr)); - memcpy(daddr, - MLX5_ADDR_OF(fte_match_param, - fte->val, - outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), - sizeof(daddr)); - is_ipv6 = true; - } - - /* SPI */ - spi = MLX5_GET_BE(typeof(spi), - fte_match_param, fte->val, - misc_parameters.outer_esp_spi); - - /* create */ - return mlx5_fpga_ipsec_create_sa_ctx(mdev, accel_xfrm, - saddr, daddr, - spi, is_ipv6, NULL); -} - -static void -mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx) -{ - struct mlx5_fpga_device *fdev = sa_ctx->dev->fpga; - struct mlx5_fpga_ipsec *fipsec = fdev->ipsec; - int opcode = is_v2_sadb_supported(fdev->ipsec) ? - MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 : - MLX5_FPGA_IPSEC_CMD_OP_DEL_SA; - int err; - - err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode); - sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0; - if (err) { - WARN_ON(err); - return; - } - - if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action == - MLX5_ACCEL_ESP_ACTION_DECRYPT) - ida_free(&fipsec->halloc, sa_ctx->sa_handle); - - mutex_lock(&fipsec->sa_hash_lock); - WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash, - rhash_sa)); - mutex_unlock(&fipsec->sa_hash_lock); -} - -static void mlx5_fpga_ipsec_delete_sa_ctx(void *context) -{ - struct mlx5_fpga_esp_xfrm *fpga_xfrm = - ((struct mlx5_fpga_ipsec_sa_ctx *)context)->fpga_xfrm; - - mutex_lock(&fpga_xfrm->lock); - if (!--fpga_xfrm->num_rules) { - mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx); - kfree(fpga_xfrm->sa_ctx); - fpga_xfrm->sa_ctx = NULL; - } - mutex_unlock(&fpga_xfrm->lock); -} - -static inline struct mlx5_fpga_ipsec_rule * -_rule_search(struct rb_root *root, struct fs_fte *fte) -{ - struct rb_node *node = root->rb_node; - - while (node) { - struct mlx5_fpga_ipsec_rule *rule = - container_of(node, struct mlx5_fpga_ipsec_rule, - node); - - if (rule->fte < fte) - node = node->rb_left; - else if (rule->fte > fte) - node = node->rb_right; - else - return rule; - } - return NULL; -} - -static struct mlx5_fpga_ipsec_rule * -rule_search(struct mlx5_fpga_ipsec *ipsec_dev, struct fs_fte *fte) -{ - struct mlx5_fpga_ipsec_rule *rule; - - mutex_lock(&ipsec_dev->rules_rb_lock); - rule = _rule_search(&ipsec_dev->rules_rb, fte); - mutex_unlock(&ipsec_dev->rules_rb_lock); - - return rule; -} - -static inline int _rule_insert(struct rb_root *root, - struct mlx5_fpga_ipsec_rule *rule) -{ - struct rb_node **new = &root->rb_node, *parent = NULL; - - /* Figure out where to put new node */ - while (*new) { - struct mlx5_fpga_ipsec_rule *this = - container_of(*new, struct mlx5_fpga_ipsec_rule, - node); - - parent = *new; - if (rule->fte < this->fte) - new = &((*new)->rb_left); - else if (rule->fte > this->fte) - new = &((*new)->rb_right); - else - return -EEXIST; - } - - /* Add new node and rebalance tree. */ - rb_link_node(&rule->node, parent, new); - rb_insert_color(&rule->node, root); - - return 0; -} - -static int rule_insert(struct mlx5_fpga_ipsec *ipsec_dev, - struct mlx5_fpga_ipsec_rule *rule) -{ - int ret; - - mutex_lock(&ipsec_dev->rules_rb_lock); - ret = _rule_insert(&ipsec_dev->rules_rb, rule); - mutex_unlock(&ipsec_dev->rules_rb_lock); - - return ret; -} - -static inline void _rule_delete(struct mlx5_fpga_ipsec *ipsec_dev, - struct mlx5_fpga_ipsec_rule *rule) -{ - struct rb_root *root = &ipsec_dev->rules_rb; - - mutex_lock(&ipsec_dev->rules_rb_lock); - rb_erase(&rule->node, root); - mutex_unlock(&ipsec_dev->rules_rb_lock); -} - -static void rule_delete(struct mlx5_fpga_ipsec *ipsec_dev, - struct mlx5_fpga_ipsec_rule *rule) -{ - _rule_delete(ipsec_dev, rule); - kfree(rule); -} - -struct mailbox_mod { - uintptr_t saved_esp_id; - u32 saved_action; - u32 saved_outer_esp_spi_value; -}; - -static void restore_spec_mailbox(struct fs_fte *fte, - struct mailbox_mod *mbox_mod) -{ - char *misc_params_v = MLX5_ADDR_OF(fte_match_param, - fte->val, - misc_parameters); - - MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, - mbox_mod->saved_outer_esp_spi_value); - fte->action.action |= mbox_mod->saved_action; - fte->action.esp_id = (uintptr_t)mbox_mod->saved_esp_id; -} - -static void modify_spec_mailbox(struct mlx5_core_dev *mdev, - struct fs_fte *fte, - struct mailbox_mod *mbox_mod) -{ - char *misc_params_v = MLX5_ADDR_OF(fte_match_param, - fte->val, - misc_parameters); - - mbox_mod->saved_esp_id = fte->action.esp_id; - mbox_mod->saved_action = fte->action.action & - (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | - MLX5_FLOW_CONTEXT_ACTION_DECRYPT); - mbox_mod->saved_outer_esp_spi_value = - MLX5_GET(fte_match_set_misc, misc_params_v, - outer_esp_spi); - - fte->action.esp_id = 0; - fte->action.action &= ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | - MLX5_FLOW_CONTEXT_ACTION_DECRYPT); - if (!MLX5_CAP_FLOWTABLE(mdev, - flow_table_properties_nic_receive.ft_field_support.outer_esp_spi)) - MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, 0); -} - -static enum fs_flow_table_type egress_to_fs_ft(bool egress) -{ - return egress ? FS_FT_NIC_TX : FS_FT_NIC_RX; -} - -static int fpga_ipsec_fs_create_flow_group(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - u32 *in, - struct mlx5_flow_group *fg, - bool is_egress) -{ - int (*create_flow_group)(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, u32 *in, - struct mlx5_flow_group *fg) = - mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_flow_group; - char *misc_params_c = MLX5_ADDR_OF(create_flow_group_in, in, - match_criteria.misc_parameters); - struct mlx5_core_dev *dev = ns->dev; - u32 saved_outer_esp_spi_mask; - u8 match_criteria_enable; - int ret; - - if (MLX5_CAP_FLOWTABLE(dev, - flow_table_properties_nic_receive.ft_field_support.outer_esp_spi)) - return create_flow_group(ns, ft, in, fg); - - match_criteria_enable = - MLX5_GET(create_flow_group_in, in, match_criteria_enable); - saved_outer_esp_spi_mask = - MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi); - if (!match_criteria_enable || !saved_outer_esp_spi_mask) - return create_flow_group(ns, ft, in, fg); - - MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 0); - - if (!(*misc_params_c) && - !memcmp(misc_params_c, misc_params_c + 1, MLX5_ST_SZ_BYTES(fte_match_set_misc) - 1)) - MLX5_SET(create_flow_group_in, in, match_criteria_enable, - match_criteria_enable & ~MLX5_MATCH_MISC_PARAMETERS); - - ret = create_flow_group(ns, ft, in, fg); - - MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, saved_outer_esp_spi_mask); - MLX5_SET(create_flow_group_in, in, match_criteria_enable, match_criteria_enable); - - return ret; -} - -static int fpga_ipsec_fs_create_fte(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - struct mlx5_flow_group *fg, - struct fs_fte *fte, - bool is_egress) -{ - int (*create_fte)(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - struct mlx5_flow_group *fg, - struct fs_fte *fte) = - mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_fte; - struct mlx5_core_dev *dev = ns->dev; - struct mlx5_fpga_device *fdev = dev->fpga; - struct mlx5_fpga_ipsec *fipsec = fdev->ipsec; - struct mlx5_fpga_ipsec_rule *rule; - bool is_esp = fte->action.esp_id; - struct mailbox_mod mbox_mod; - int ret; - - if (!is_esp || - !(fte->action.action & - (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | - MLX5_FLOW_CONTEXT_ACTION_DECRYPT))) - return create_fte(ns, ft, fg, fte); - - rule = kzalloc(sizeof(*rule), GFP_KERNEL); - if (!rule) - return -ENOMEM; - - rule->ctx = mlx5_fpga_ipsec_fs_create_sa_ctx(dev, fte, is_egress); - if (IS_ERR(rule->ctx)) { - int err = PTR_ERR(rule->ctx); - - kfree(rule); - return err; - } - - rule->fte = fte; - WARN_ON(rule_insert(fipsec, rule)); - - modify_spec_mailbox(dev, fte, &mbox_mod); - ret = create_fte(ns, ft, fg, fte); - restore_spec_mailbox(fte, &mbox_mod); - if (ret) { - _rule_delete(fipsec, rule); - mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx); - kfree(rule); - } - - return ret; -} - -static int fpga_ipsec_fs_update_fte(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - struct mlx5_flow_group *fg, - int modify_mask, - struct fs_fte *fte, - bool is_egress) -{ - int (*update_fte)(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - struct mlx5_flow_group *fg, - int modify_mask, - struct fs_fte *fte) = - mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->update_fte; - struct mlx5_core_dev *dev = ns->dev; - bool is_esp = fte->action.esp_id; - struct mailbox_mod mbox_mod; - int ret; - - if (!is_esp || - !(fte->action.action & - (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | - MLX5_FLOW_CONTEXT_ACTION_DECRYPT))) - return update_fte(ns, ft, fg, modify_mask, fte); - - modify_spec_mailbox(dev, fte, &mbox_mod); - ret = update_fte(ns, ft, fg, modify_mask, fte); - restore_spec_mailbox(fte, &mbox_mod); - - return ret; -} - -static int fpga_ipsec_fs_delete_fte(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - struct fs_fte *fte, - bool is_egress) -{ - int (*delete_fte)(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - struct fs_fte *fte) = - mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->delete_fte; - struct mlx5_core_dev *dev = ns->dev; - struct mlx5_fpga_device *fdev = dev->fpga; - struct mlx5_fpga_ipsec *fipsec = fdev->ipsec; - struct mlx5_fpga_ipsec_rule *rule; - bool is_esp = fte->action.esp_id; - struct mailbox_mod mbox_mod; - int ret; - - if (!is_esp || - !(fte->action.action & - (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | - MLX5_FLOW_CONTEXT_ACTION_DECRYPT))) - return delete_fte(ns, ft, fte); - - rule = rule_search(fipsec, fte); - if (!rule) - return -ENOENT; - - mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx); - rule_delete(fipsec, rule); - - modify_spec_mailbox(dev, fte, &mbox_mod); - ret = delete_fte(ns, ft, fte); - restore_spec_mailbox(fte, &mbox_mod); - - return ret; -} - -static int -mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - u32 *in, - struct mlx5_flow_group *fg) -{ - return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, true); -} - -static int -mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - struct mlx5_flow_group *fg, - struct fs_fte *fte) -{ - return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, true); -} - -static int -mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - struct mlx5_flow_group *fg, - int modify_mask, - struct fs_fte *fte) -{ - return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte, - true); -} - -static int -mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - struct fs_fte *fte) -{ - return fpga_ipsec_fs_delete_fte(ns, ft, fte, true); -} - -static int -mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - u32 *in, - struct mlx5_flow_group *fg) -{ - return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, false); -} - -static int -mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - struct mlx5_flow_group *fg, - struct fs_fte *fte) -{ - return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, false); -} - -static int -mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - struct mlx5_flow_group *fg, - int modify_mask, - struct fs_fte *fte) -{ - return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte, - false); -} - -static int -mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - struct fs_fte *fte) -{ - return fpga_ipsec_fs_delete_fte(ns, ft, fte, false); -} - -static struct mlx5_flow_cmds fpga_ipsec_ingress; -static struct mlx5_flow_cmds fpga_ipsec_egress; - -const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type) -{ - switch (type) { - case FS_FT_NIC_RX: - return &fpga_ipsec_ingress; - case FS_FT_NIC_TX: - return &fpga_ipsec_egress; - default: - WARN_ON(true); - return NULL; - } -} - -static int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev) -{ - struct mlx5_fpga_conn_attr init_attr = {0}; - struct mlx5_fpga_device *fdev = mdev->fpga; - struct mlx5_fpga_conn *conn; - int err; - - if (!mlx5_fpga_is_ipsec_device(mdev)) - return 0; - - fdev->ipsec = kzalloc(sizeof(*fdev->ipsec), GFP_KERNEL); - if (!fdev->ipsec) - return -ENOMEM; - - fdev->ipsec->fdev = fdev; - - err = mlx5_fpga_get_sbu_caps(fdev, sizeof(fdev->ipsec->caps), - fdev->ipsec->caps); - if (err) { - mlx5_fpga_err(fdev, "Failed to retrieve IPSec extended capabilities: %d\n", - err); - goto error; - } - - INIT_LIST_HEAD(&fdev->ipsec->pending_cmds); - spin_lock_init(&fdev->ipsec->pending_cmds_lock); - - init_attr.rx_size = SBU_QP_QUEUE_SIZE; - init_attr.tx_size = SBU_QP_QUEUE_SIZE; - init_attr.recv_cb = mlx5_fpga_ipsec_recv; - init_attr.cb_arg = fdev; - conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr); - if (IS_ERR(conn)) { - err = PTR_ERR(conn); - mlx5_fpga_err(fdev, "Error creating IPSec command connection %d\n", - err); - goto error; - } - fdev->ipsec->conn = conn; - - err = rhashtable_init(&fdev->ipsec->sa_hash, &rhash_sa); - if (err) - goto err_destroy_conn; - mutex_init(&fdev->ipsec->sa_hash_lock); - - fdev->ipsec->rules_rb = RB_ROOT; - mutex_init(&fdev->ipsec->rules_rb_lock); - - err = mlx5_fpga_ipsec_enable_supported_caps(mdev); - if (err) { - mlx5_fpga_err(fdev, "Failed to enable IPSec extended capabilities: %d\n", - err); - goto err_destroy_hash; - } - - ida_init(&fdev->ipsec->halloc); - - return 0; - -err_destroy_hash: - rhashtable_destroy(&fdev->ipsec->sa_hash); - -err_destroy_conn: - mlx5_fpga_sbu_conn_destroy(conn); - -error: - kfree(fdev->ipsec); - fdev->ipsec = NULL; - return err; -} - -static void destroy_rules_rb(struct rb_root *root) -{ - struct mlx5_fpga_ipsec_rule *r, *tmp; - - rbtree_postorder_for_each_entry_safe(r, tmp, root, node) { - rb_erase(&r->node, root); - mlx5_fpga_ipsec_delete_sa_ctx(r->ctx); - kfree(r); - } -} - -static void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev) -{ - struct mlx5_fpga_device *fdev = mdev->fpga; - - if (!mlx5_fpga_is_ipsec_device(mdev)) - return; - - ida_destroy(&fdev->ipsec->halloc); - destroy_rules_rb(&fdev->ipsec->rules_rb); - rhashtable_destroy(&fdev->ipsec->sa_hash); - - mlx5_fpga_sbu_conn_destroy(fdev->ipsec->conn); - kfree(fdev->ipsec); - fdev->ipsec = NULL; -} - -void mlx5_fpga_ipsec_build_fs_cmds(void) -{ - /* ingress */ - fpga_ipsec_ingress.create_flow_table = - mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->create_flow_table; - fpga_ipsec_ingress.destroy_flow_table = - mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_table; - fpga_ipsec_ingress.modify_flow_table = - mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->modify_flow_table; - fpga_ipsec_ingress.create_flow_group = - mlx5_fpga_ipsec_fs_create_flow_group_ingress; - fpga_ipsec_ingress.destroy_flow_group = - mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_group; - fpga_ipsec_ingress.create_fte = - mlx5_fpga_ipsec_fs_create_fte_ingress; - fpga_ipsec_ingress.update_fte = - mlx5_fpga_ipsec_fs_update_fte_ingress; - fpga_ipsec_ingress.delete_fte = - mlx5_fpga_ipsec_fs_delete_fte_ingress; - fpga_ipsec_ingress.update_root_ft = - mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->update_root_ft; - - /* egress */ - fpga_ipsec_egress.create_flow_table = - mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->create_flow_table; - fpga_ipsec_egress.destroy_flow_table = - mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_table; - fpga_ipsec_egress.modify_flow_table = - mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->modify_flow_table; - fpga_ipsec_egress.create_flow_group = - mlx5_fpga_ipsec_fs_create_flow_group_egress; - fpga_ipsec_egress.destroy_flow_group = - mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_group; - fpga_ipsec_egress.create_fte = - mlx5_fpga_ipsec_fs_create_fte_egress; - fpga_ipsec_egress.update_fte = - mlx5_fpga_ipsec_fs_update_fte_egress; - fpga_ipsec_egress.delete_fte = - mlx5_fpga_ipsec_fs_delete_fte_egress; - fpga_ipsec_egress.update_root_ft = - mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->update_root_ft; -} - -static int -mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev, - const struct mlx5_accel_esp_xfrm_attrs *attrs) -{ - if (attrs->tfc_pad) { - mlx5_core_err(mdev, "Cannot offload xfrm states with tfc padding\n"); - return -EOPNOTSUPP; - } - - if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) { - mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay\n"); - return -EOPNOTSUPP; - } - - if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) { - mlx5_core_err(mdev, "Only aes gcm keymat is supported\n"); - return -EOPNOTSUPP; - } - - if (attrs->keymat.aes_gcm.iv_algo != - MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) { - mlx5_core_err(mdev, "Only iv sequence algo is supported\n"); - return -EOPNOTSUPP; - } - - if (attrs->keymat.aes_gcm.icv_len != 128) { - mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n"); - return -EOPNOTSUPP; - } - - if (attrs->keymat.aes_gcm.key_len != 128 && - attrs->keymat.aes_gcm.key_len != 256) { - mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n"); - return -EOPNOTSUPP; - } - - if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) && - (!MLX5_GET(ipsec_extended_cap, mdev->fpga->ipsec->caps, - v2_command))) { - mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n"); - return -EOPNOTSUPP; - } - - return 0; -} - -static struct mlx5_accel_esp_xfrm * -mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev, - const struct mlx5_accel_esp_xfrm_attrs *attrs, - u32 flags) -{ - struct mlx5_fpga_esp_xfrm *fpga_xfrm; - - if (!(flags & MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA)) { - mlx5_core_warn(mdev, "Tried to create an esp action without metadata\n"); - return ERR_PTR(-EINVAL); - } - - if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) { - mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n"); - return ERR_PTR(-EOPNOTSUPP); - } - - fpga_xfrm = kzalloc(sizeof(*fpga_xfrm), GFP_KERNEL); - if (!fpga_xfrm) - return ERR_PTR(-ENOMEM); - - mutex_init(&fpga_xfrm->lock); - memcpy(&fpga_xfrm->accel_xfrm.attrs, attrs, - sizeof(fpga_xfrm->accel_xfrm.attrs)); - - return &fpga_xfrm->accel_xfrm; -} - -static void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) -{ - struct mlx5_fpga_esp_xfrm *fpga_xfrm = - container_of(xfrm, struct mlx5_fpga_esp_xfrm, - accel_xfrm); - /* assuming no sa_ctx are connected to this xfrm_ctx */ - kfree(fpga_xfrm); -} - -static int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, - const struct mlx5_accel_esp_xfrm_attrs *attrs) -{ - struct mlx5_core_dev *mdev = xfrm->mdev; - struct mlx5_fpga_device *fdev = mdev->fpga; - struct mlx5_fpga_ipsec *fipsec = fdev->ipsec; - struct mlx5_fpga_esp_xfrm *fpga_xfrm; - struct mlx5_ifc_fpga_ipsec_sa org_hw_sa; - - int err = 0; - - if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs))) - return 0; - - if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) { - mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n"); - return -EOPNOTSUPP; - } - - if (is_v2_sadb_supported(fipsec)) { - mlx5_core_warn(mdev, "Modify esp is not supported\n"); - return -EOPNOTSUPP; - } - - fpga_xfrm = container_of(xfrm, struct mlx5_fpga_esp_xfrm, accel_xfrm); - - mutex_lock(&fpga_xfrm->lock); - - if (!fpga_xfrm->sa_ctx) - /* Unbounded xfrm, change only sw attrs */ - goto change_sw_xfrm_attrs; - - /* copy original hw sa */ - memcpy(&org_hw_sa, &fpga_xfrm->sa_ctx->hw_sa, sizeof(org_hw_sa)); - mutex_lock(&fipsec->sa_hash_lock); - /* remove original hw sa from hash */ - WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, - &fpga_xfrm->sa_ctx->hash, rhash_sa)); - /* update hw_sa with new xfrm attrs*/ - mlx5_fpga_ipsec_build_hw_xfrm(xfrm->mdev, attrs, - &fpga_xfrm->sa_ctx->hw_sa); - /* try to insert new hw_sa to hash */ - err = rhashtable_insert_fast(&fipsec->sa_hash, - &fpga_xfrm->sa_ctx->hash, rhash_sa); - if (err) - goto rollback_sa; - - /* modify device with new hw_sa */ - err = mlx5_fpga_ipsec_update_hw_sa(fdev, &fpga_xfrm->sa_ctx->hw_sa, - MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2); - fpga_xfrm->sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0; - if (err) - WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, - &fpga_xfrm->sa_ctx->hash, - rhash_sa)); -rollback_sa: - if (err) { - /* return original hw_sa to hash */ - memcpy(&fpga_xfrm->sa_ctx->hw_sa, &org_hw_sa, - sizeof(org_hw_sa)); - WARN_ON(rhashtable_insert_fast(&fipsec->sa_hash, - &fpga_xfrm->sa_ctx->hash, - rhash_sa)); - } - mutex_unlock(&fipsec->sa_hash_lock); - -change_sw_xfrm_attrs: - if (!err) - memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs)); - mutex_unlock(&fpga_xfrm->lock); - return err; -} - -static const struct mlx5_accel_ipsec_ops fpga_ipsec_ops = { - .device_caps = mlx5_fpga_ipsec_device_caps, - .counters_count = mlx5_fpga_ipsec_counters_count, - .counters_read = mlx5_fpga_ipsec_counters_read, - .create_hw_context = mlx5_fpga_ipsec_create_sa_ctx, - .free_hw_context = mlx5_fpga_ipsec_delete_sa_ctx, - .init = mlx5_fpga_ipsec_init, - .cleanup = mlx5_fpga_ipsec_cleanup, - .esp_create_xfrm = mlx5_fpga_esp_create_xfrm, - .esp_modify_xfrm = mlx5_fpga_esp_modify_xfrm, - .esp_destroy_xfrm = mlx5_fpga_esp_destroy_xfrm, -}; - -const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev) -{ - if (!mlx5_fpga_is_ipsec_device(mdev)) - return NULL; - - return &fpga_ipsec_ops; -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h deleted file mode 100644 index 8931b5584477..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#ifndef __MLX5_FPGA_IPSEC_H__ -#define __MLX5_FPGA_IPSEC_H__ - -#include "accel/ipsec.h" -#include "fs_cmd.h" - -#ifdef CONFIG_MLX5_FPGA_IPSEC -const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev); -u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev); -const struct mlx5_flow_cmds * -mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type); -void mlx5_fpga_ipsec_build_fs_cmds(void); -bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev); -#else -static inline -const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev) -{ return NULL; } -static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; } -static inline const struct mlx5_flow_cmds * -mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type) -{ - return mlx5_fs_cmd_get_default(type); -} - -static inline void mlx5_fpga_ipsec_build_fs_cmds(void) {}; -static inline bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev) { return false; } - -#endif /* CONFIG_MLX5_FPGA_IPSEC */ -#endif /* __MLX5_FPGA_IPSEC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c deleted file mode 100644 index 29b7339ebfa3..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c +++ /dev/null @@ -1,622 +0,0 @@ -/* - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#include <linux/mlx5/device.h> -#include "fpga/tls.h" -#include "fpga/cmd.h" -#include "fpga/sdk.h" -#include "fpga/core.h" -#include "accel/tls.h" - -struct mlx5_fpga_tls_command_context; - -typedef void (*mlx5_fpga_tls_command_complete) - (struct mlx5_fpga_conn *conn, struct mlx5_fpga_device *fdev, - struct mlx5_fpga_tls_command_context *ctx, - struct mlx5_fpga_dma_buf *resp); - -struct mlx5_fpga_tls_command_context { - struct list_head list; - /* There is no guarantee on the order between the TX completion - * and the command response. - * The TX completion is going to touch cmd->buf even in - * the case of successful transmission. - * So instead of requiring separate allocations for cmd - * and cmd->buf we've decided to use a reference counter - */ - refcount_t ref; - struct mlx5_fpga_dma_buf buf; - mlx5_fpga_tls_command_complete complete; -}; - -static void -mlx5_fpga_tls_put_command_ctx(struct mlx5_fpga_tls_command_context *ctx) -{ - if (refcount_dec_and_test(&ctx->ref)) - kfree(ctx); -} - -static void mlx5_fpga_tls_cmd_complete(struct mlx5_fpga_device *fdev, - struct mlx5_fpga_dma_buf *resp) -{ - struct mlx5_fpga_conn *conn = fdev->tls->conn; - struct mlx5_fpga_tls_command_context *ctx; - struct mlx5_fpga_tls *tls = fdev->tls; - unsigned long flags; - - spin_lock_irqsave(&tls->pending_cmds_lock, flags); - ctx = list_first_entry(&tls->pending_cmds, - struct mlx5_fpga_tls_command_context, list); - list_del(&ctx->list); - spin_unlock_irqrestore(&tls->pending_cmds_lock, flags); - ctx->complete(conn, fdev, ctx, resp); -} - -static void mlx5_fpga_cmd_send_complete(struct mlx5_fpga_conn *conn, - struct mlx5_fpga_device *fdev, - struct mlx5_fpga_dma_buf *buf, - u8 status) -{ - struct mlx5_fpga_tls_command_context *ctx = - container_of(buf, struct mlx5_fpga_tls_command_context, buf); - - mlx5_fpga_tls_put_command_ctx(ctx); - - if (unlikely(status)) - mlx5_fpga_tls_cmd_complete(fdev, NULL); -} - -static void mlx5_fpga_tls_cmd_send(struct mlx5_fpga_device *fdev, - struct mlx5_fpga_tls_command_context *cmd, - mlx5_fpga_tls_command_complete complete) -{ - struct mlx5_fpga_tls *tls = fdev->tls; - unsigned long flags; - int ret; - - refcount_set(&cmd->ref, 2); - cmd->complete = complete; - cmd->buf.complete = mlx5_fpga_cmd_send_complete; - - spin_lock_irqsave(&tls->pending_cmds_lock, flags); - /* mlx5_fpga_sbu_conn_sendmsg is called under pending_cmds_lock - * to make sure commands are inserted to the tls->pending_cmds list - * and the command QP in the same order. - */ - ret = mlx5_fpga_sbu_conn_sendmsg(tls->conn, &cmd->buf); - if (likely(!ret)) - list_add_tail(&cmd->list, &tls->pending_cmds); - else - complete(tls->conn, fdev, cmd, NULL); - spin_unlock_irqrestore(&tls->pending_cmds_lock, flags); -} - -/* Start of context identifiers range (inclusive) */ -#define SWID_START 0 -/* End of context identifiers range (exclusive) */ -#define SWID_END BIT(24) - -static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock, - void *ptr) -{ - unsigned long flags; - int ret; - - /* TLS metadata format is 1 byte for syndrome followed - * by 3 bytes of swid (software ID) - * swid must not exceed 3 bytes. - * See tls_rxtx.c:insert_pet() for details - */ - BUILD_BUG_ON((SWID_END - 1) & 0xFF000000); - - idr_preload(GFP_KERNEL); - spin_lock_irqsave(idr_spinlock, flags); - ret = idr_alloc(idr, ptr, SWID_START, SWID_END, GFP_ATOMIC); - spin_unlock_irqrestore(idr_spinlock, flags); - idr_preload_end(); - - return ret; -} - -static void *mlx5_fpga_tls_release_swid(struct idr *idr, - spinlock_t *idr_spinlock, u32 swid) -{ - unsigned long flags; - void *ptr; - - spin_lock_irqsave(idr_spinlock, flags); - ptr = idr_remove(idr, swid); - spin_unlock_irqrestore(idr_spinlock, flags); - return ptr; -} - -static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn, - struct mlx5_fpga_device *fdev, - struct mlx5_fpga_dma_buf *buf, u8 status) -{ - kfree(buf); -} - -static void -mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn, - struct mlx5_fpga_device *fdev, - struct mlx5_fpga_tls_command_context *cmd, - struct mlx5_fpga_dma_buf *resp) -{ - if (resp) { - u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome); - - if (syndrome) - mlx5_fpga_err(fdev, - "Teardown stream failed with syndrome = %d", - syndrome); - } - mlx5_fpga_tls_put_command_ctx(cmd); -} - -static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd) -{ - memcpy(MLX5_ADDR_OF(tls_cmd, cmd, src_port), flow, - MLX5_BYTE_OFF(tls_flow, ipv6)); - - MLX5_SET(tls_cmd, cmd, ipv6, MLX5_GET(tls_flow, flow, ipv6)); - MLX5_SET(tls_cmd, cmd, direction_sx, - MLX5_GET(tls_flow, flow, direction_sx)); -} - -int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle, - u32 seq, __be64 rcd_sn) -{ - struct mlx5_fpga_dma_buf *buf; - int size = sizeof(*buf) + MLX5_TLS_COMMAND_SIZE; - void *flow; - void *cmd; - int ret; - - buf = kzalloc(size, GFP_ATOMIC); - if (!buf) - return -ENOMEM; - - cmd = (buf + 1); - - rcu_read_lock(); - flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle)); - if (unlikely(!flow)) { - rcu_read_unlock(); - WARN_ONCE(1, "Received NULL pointer for handle\n"); - kfree(buf); - return -EINVAL; - } - mlx5_fpga_tls_flow_to_cmd(flow, cmd); - rcu_read_unlock(); - - MLX5_SET(tls_cmd, cmd, swid, ntohl(handle)); - MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn)); - MLX5_SET(tls_cmd, cmd, tcp_sn, seq); - MLX5_SET(tls_cmd, cmd, command_type, CMD_RESYNC_RX); - - buf->sg[0].data = cmd; - buf->sg[0].size = MLX5_TLS_COMMAND_SIZE; - buf->complete = mlx_tls_kfree_complete; - - ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf); - if (ret < 0) - kfree(buf); - - return ret; -} - -static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, - void *flow, u32 swid, gfp_t flags) -{ - struct mlx5_fpga_tls_command_context *ctx; - struct mlx5_fpga_dma_buf *buf; - void *cmd; - - ctx = kzalloc(sizeof(*ctx) + MLX5_TLS_COMMAND_SIZE, flags); - if (!ctx) - return; - - buf = &ctx->buf; - cmd = (ctx + 1); - MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM); - MLX5_SET(tls_cmd, cmd, swid, swid); - - mlx5_fpga_tls_flow_to_cmd(flow, cmd); - kfree(flow); - - buf->sg[0].data = cmd; - buf->sg[0].size = MLX5_TLS_COMMAND_SIZE; - - mlx5_fpga_tls_cmd_send(mdev->fpga, ctx, - mlx5_fpga_tls_teardown_completion); -} - -void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, - gfp_t flags, bool direction_sx) -{ - struct mlx5_fpga_tls *tls = mdev->fpga->tls; - void *flow; - - if (direction_sx) - flow = mlx5_fpga_tls_release_swid(&tls->tx_idr, - &tls->tx_idr_spinlock, - swid); - else - flow = mlx5_fpga_tls_release_swid(&tls->rx_idr, - &tls->rx_idr_spinlock, - swid); - - if (!flow) { - mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n", - swid); - return; - } - - synchronize_rcu(); /* before kfree(flow) */ - mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags); -} - -enum mlx5_fpga_setup_stream_status { - MLX5_FPGA_CMD_PENDING, - MLX5_FPGA_CMD_SEND_FAILED, - MLX5_FPGA_CMD_RESPONSE_RECEIVED, - MLX5_FPGA_CMD_ABANDONED, -}; - -struct mlx5_setup_stream_context { - struct mlx5_fpga_tls_command_context cmd; - atomic_t status; - u32 syndrome; - struct completion comp; -}; - -static void -mlx5_fpga_tls_setup_completion(struct mlx5_fpga_conn *conn, - struct mlx5_fpga_device *fdev, - struct mlx5_fpga_tls_command_context *cmd, - struct mlx5_fpga_dma_buf *resp) -{ - struct mlx5_setup_stream_context *ctx = - container_of(cmd, struct mlx5_setup_stream_context, cmd); - int status = MLX5_FPGA_CMD_SEND_FAILED; - void *tls_cmd = ctx + 1; - - /* If we failed to send to command resp == NULL */ - if (resp) { - ctx->syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome); - status = MLX5_FPGA_CMD_RESPONSE_RECEIVED; - } - - status = atomic_xchg_release(&ctx->status, status); - if (likely(status != MLX5_FPGA_CMD_ABANDONED)) { - complete(&ctx->comp); - return; - } - - mlx5_fpga_err(fdev, "Command was abandoned, syndrome = %u\n", - ctx->syndrome); - - if (!ctx->syndrome) { - /* The process was killed while waiting for the context to be - * added, and the add completed successfully. - * We need to destroy the HW context, and we can't can't reuse - * the command context because we might not have received - * the tx completion yet. - */ - mlx5_fpga_tls_del_flow(fdev->mdev, - MLX5_GET(tls_cmd, tls_cmd, swid), - GFP_ATOMIC, - MLX5_GET(tls_cmd, tls_cmd, - direction_sx)); - } - - mlx5_fpga_tls_put_command_ctx(cmd); -} - -static int mlx5_fpga_tls_setup_stream_cmd(struct mlx5_core_dev *mdev, - struct mlx5_setup_stream_context *ctx) -{ - struct mlx5_fpga_dma_buf *buf; - void *cmd = ctx + 1; - int status, ret = 0; - - buf = &ctx->cmd.buf; - buf->sg[0].data = cmd; - buf->sg[0].size = MLX5_TLS_COMMAND_SIZE; - MLX5_SET(tls_cmd, cmd, command_type, CMD_SETUP_STREAM); - - init_completion(&ctx->comp); - atomic_set(&ctx->status, MLX5_FPGA_CMD_PENDING); - ctx->syndrome = -1; - - mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd, - mlx5_fpga_tls_setup_completion); - wait_for_completion_killable(&ctx->comp); - - status = atomic_xchg_acquire(&ctx->status, MLX5_FPGA_CMD_ABANDONED); - if (unlikely(status == MLX5_FPGA_CMD_PENDING)) - /* ctx is going to be released in mlx5_fpga_tls_setup_completion */ - return -EINTR; - - if (unlikely(ctx->syndrome)) - ret = -ENOMEM; - - mlx5_fpga_tls_put_command_ctx(&ctx->cmd); - return ret; -} - -static void mlx5_fpga_tls_hw_qp_recv_cb(void *cb_arg, - struct mlx5_fpga_dma_buf *buf) -{ - struct mlx5_fpga_device *fdev = (struct mlx5_fpga_device *)cb_arg; - - mlx5_fpga_tls_cmd_complete(fdev, buf); -} - -bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev) -{ - if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga)) - return false; - - if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) != - MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX) - return false; - - if (MLX5_CAP_FPGA(mdev, sandbox_product_id) != - MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS) - return false; - - if (MLX5_CAP_FPGA(mdev, sandbox_product_version) != 0) - return false; - - return true; -} - -static int mlx5_fpga_tls_get_caps(struct mlx5_fpga_device *fdev, - u32 *p_caps) -{ - int err, cap_size = MLX5_ST_SZ_BYTES(tls_extended_cap); - u32 caps = 0; - void *buf; - - buf = kzalloc(cap_size, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - err = mlx5_fpga_get_sbu_caps(fdev, cap_size, buf); - if (err) - goto out; - - if (MLX5_GET(tls_extended_cap, buf, tx)) - caps |= MLX5_ACCEL_TLS_TX; - if (MLX5_GET(tls_extended_cap, buf, rx)) - caps |= MLX5_ACCEL_TLS_RX; - if (MLX5_GET(tls_extended_cap, buf, tls_v12)) - caps |= MLX5_ACCEL_TLS_V12; - if (MLX5_GET(tls_extended_cap, buf, tls_v13)) - caps |= MLX5_ACCEL_TLS_V13; - if (MLX5_GET(tls_extended_cap, buf, lro)) - caps |= MLX5_ACCEL_TLS_LRO; - if (MLX5_GET(tls_extended_cap, buf, ipv6)) - caps |= MLX5_ACCEL_TLS_IPV6; - - if (MLX5_GET(tls_extended_cap, buf, aes_gcm_128)) - caps |= MLX5_ACCEL_TLS_AES_GCM128; - if (MLX5_GET(tls_extended_cap, buf, aes_gcm_256)) - caps |= MLX5_ACCEL_TLS_AES_GCM256; - - *p_caps = caps; - err = 0; -out: - kfree(buf); - return err; -} - -int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev) -{ - struct mlx5_fpga_device *fdev = mdev->fpga; - struct mlx5_fpga_conn_attr init_attr = {0}; - struct mlx5_fpga_conn *conn; - struct mlx5_fpga_tls *tls; - int err = 0; - - if (!mlx5_fpga_is_tls_device(mdev) || !fdev) - return 0; - - tls = kzalloc(sizeof(*tls), GFP_KERNEL); - if (!tls) - return -ENOMEM; - - err = mlx5_fpga_tls_get_caps(fdev, &tls->caps); - if (err) - goto error; - - if (!(tls->caps & (MLX5_ACCEL_TLS_V12 | MLX5_ACCEL_TLS_AES_GCM128))) { - err = -ENOTSUPP; - goto error; - } - - init_attr.rx_size = SBU_QP_QUEUE_SIZE; - init_attr.tx_size = SBU_QP_QUEUE_SIZE; - init_attr.recv_cb = mlx5_fpga_tls_hw_qp_recv_cb; - init_attr.cb_arg = fdev; - conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr); - if (IS_ERR(conn)) { - err = PTR_ERR(conn); - mlx5_fpga_err(fdev, "Error creating TLS command connection %d\n", - err); - goto error; - } - - tls->conn = conn; - spin_lock_init(&tls->pending_cmds_lock); - INIT_LIST_HEAD(&tls->pending_cmds); - - idr_init(&tls->tx_idr); - idr_init(&tls->rx_idr); - spin_lock_init(&tls->tx_idr_spinlock); - spin_lock_init(&tls->rx_idr_spinlock); - fdev->tls = tls; - return 0; - -error: - kfree(tls); - return err; -} - -void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev) -{ - struct mlx5_fpga_device *fdev = mdev->fpga; - - if (!fdev || !fdev->tls) - return; - - mlx5_fpga_sbu_conn_destroy(fdev->tls->conn); - kfree(fdev->tls); - fdev->tls = NULL; -} - -static void mlx5_fpga_tls_set_aes_gcm128_ctx(void *cmd, - struct tls_crypto_info *info, - __be64 *rcd_sn) -{ - struct tls12_crypto_info_aes_gcm_128 *crypto_info = - (struct tls12_crypto_info_aes_gcm_128 *)info; - - memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_rcd_sn), crypto_info->rec_seq, - TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); - - memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_implicit_iv), - crypto_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE); - memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key), - crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE); - - /* in AES-GCM 128 we need to write the key twice */ - memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key) + - TLS_CIPHER_AES_GCM_128_KEY_SIZE, - crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE); - - MLX5_SET(tls_cmd, cmd, alg, MLX5_TLS_ALG_AES_GCM_128); -} - -static int mlx5_fpga_tls_set_key_material(void *cmd, u32 caps, - struct tls_crypto_info *crypto_info) -{ - __be64 rcd_sn; - - switch (crypto_info->cipher_type) { - case TLS_CIPHER_AES_GCM_128: - if (!(caps & MLX5_ACCEL_TLS_AES_GCM128)) - return -EINVAL; - mlx5_fpga_tls_set_aes_gcm128_ctx(cmd, crypto_info, &rcd_sn); - break; - default: - return -EINVAL; - } - - return 0; -} - -static int _mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, - struct tls_crypto_info *crypto_info, - u32 swid, u32 tcp_sn) -{ - u32 caps = mlx5_fpga_tls_device_caps(mdev); - struct mlx5_setup_stream_context *ctx; - int ret = -ENOMEM; - size_t cmd_size; - void *cmd; - - cmd_size = MLX5_TLS_COMMAND_SIZE + sizeof(*ctx); - ctx = kzalloc(cmd_size, GFP_KERNEL); - if (!ctx) - goto out; - - cmd = ctx + 1; - ret = mlx5_fpga_tls_set_key_material(cmd, caps, crypto_info); - if (ret) - goto free_ctx; - - mlx5_fpga_tls_flow_to_cmd(flow, cmd); - - MLX5_SET(tls_cmd, cmd, swid, swid); - MLX5_SET(tls_cmd, cmd, tcp_sn, tcp_sn); - - return mlx5_fpga_tls_setup_stream_cmd(mdev, ctx); - -free_ctx: - kfree(ctx); -out: - return ret; -} - -int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, - struct tls_crypto_info *crypto_info, - u32 start_offload_tcp_sn, u32 *p_swid, - bool direction_sx) -{ - struct mlx5_fpga_tls *tls = mdev->fpga->tls; - int ret = -ENOMEM; - u32 swid; - - if (direction_sx) - ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr, - &tls->tx_idr_spinlock, flow); - else - ret = mlx5_fpga_tls_alloc_swid(&tls->rx_idr, - &tls->rx_idr_spinlock, flow); - - if (ret < 0) - return ret; - - swid = ret; - MLX5_SET(tls_flow, flow, direction_sx, direction_sx ? 1 : 0); - - ret = _mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid, - start_offload_tcp_sn); - if (ret && ret != -EINTR) - goto free_swid; - - *p_swid = swid; - return 0; -free_swid: - if (direction_sx) - mlx5_fpga_tls_release_swid(&tls->tx_idr, - &tls->tx_idr_spinlock, swid); - else - mlx5_fpga_tls_release_swid(&tls->rx_idr, - &tls->rx_idr_spinlock, swid); - - return ret; -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h deleted file mode 100644 index 5714cf391d1b..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#ifndef __MLX5_FPGA_TLS_H__ -#define __MLX5_FPGA_TLS_H__ - -#include <linux/mlx5/driver.h> - -#include <net/tls.h> -#include "fpga/core.h" - -struct mlx5_fpga_tls { - struct list_head pending_cmds; - spinlock_t pending_cmds_lock; /* Protects pending_cmds */ - u32 caps; - struct mlx5_fpga_conn *conn; - - struct idr tx_idr; - struct idr rx_idr; - spinlock_t tx_idr_spinlock; /* protects the IDR */ - spinlock_t rx_idr_spinlock; /* protects the IDR */ -}; - -int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, - struct tls_crypto_info *crypto_info, - u32 start_offload_tcp_sn, u32 *p_swid, - bool direction_sx); - -void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, - gfp_t flags, bool direction_sx); - -bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev); -int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev); -void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev); - -static inline u32 mlx5_fpga_tls_device_caps(struct mlx5_core_dev *mdev) -{ - return mdev->fpga->tls->caps; -} - -int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle, - u32 seq, __be64 rcd_sn); - -#endif /* __MLX5_FPGA_TLS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index a0ac17c3f12f..2ccf7bef9b05 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -455,7 +455,8 @@ static int mlx5_set_extended_dest(struct mlx5_core_dev *dev, return 0; list_for_each_entry(dst, &fte->node.children, node.list) { - if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) + if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER || + dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_NONE) continue; if ((dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT || dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) && @@ -571,18 +572,23 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, int list_size = 0; list_for_each_entry(dst, &fte->node.children, node.list) { - unsigned int id, type = dst->dest_attr.type; + enum mlx5_flow_destination_type type = dst->dest_attr.type; + enum mlx5_ifc_flow_destination_type ifc_type; + unsigned int id; if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) continue; switch (type) { + case MLX5_FLOW_DESTINATION_TYPE_NONE: + continue; case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM: id = dst->dest_attr.ft_num; - type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE; break; case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: id = dst->dest_attr.ft->id; + ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE; break; case MLX5_FLOW_DESTINATION_TYPE_UPLINK: case MLX5_FLOW_DESTINATION_TYPE_VPORT: @@ -596,8 +602,10 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, if (type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) { /* destination_id is reserved */ id = 0; + ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK; break; } + ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT; id = dst->dest_attr.vport.num; if (extended_dest && dst->dest_attr.vport.pkt_reformat) { @@ -612,13 +620,15 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, break; case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER: id = dst->dest_attr.sampler_id; + ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER; break; default: id = dst->dest_attr.tir_num; + ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TIR; } MLX5_SET(dest_format_struct, in_dests, destination_type, - type); + ifc_type); MLX5_SET(dest_format_struct, in_dests, destination_id, id); in_dests += dst_cnt_size; list_size++; @@ -878,9 +888,7 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns, table_type = FS_FT_NIC_RX; break; case MLX5_FLOW_NAMESPACE_EGRESS: -#ifdef CONFIG_MLX5_IPSEC case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL: -#endif max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions); table_type = FS_FT_NIC_TX; break; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 816d991f7621..fb8175672478 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -40,8 +40,6 @@ #include "fs_cmd.h" #include "fs_ft_pool.h" #include "diag/fs_tracepoint.h" -#include "accel/ipsec.h" -#include "fpga/ipsec.h" #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\ sizeof(struct init_tree_node)) @@ -188,24 +186,18 @@ static struct init_tree_node { static struct init_tree_node egress_root_fs = { .type = FS_TYPE_NAMESPACE, -#ifdef CONFIG_MLX5_IPSEC .ar_size = 2, -#else - .ar_size = 1, -#endif .children = (struct init_tree_node[]) { ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0, FS_CHAINING_CAPS_EGRESS, ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, BY_PASS_PRIO_NUM_LEVELS))), -#ifdef CONFIG_MLX5_IPSEC ADD_PRIO(0, KERNEL_TX_MIN_LEVEL, 0, FS_CHAINING_CAPS_EGRESS, ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS, KERNEL_TX_IPSEC_NUM_LEVELS))), -#endif } }; @@ -432,6 +424,16 @@ static bool is_fwd_next_action(u32 action) MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS); } +static bool is_fwd_dest_type(enum mlx5_flow_destination_type type) +{ + return type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM || + type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE || + type == MLX5_FLOW_DESTINATION_TYPE_UPLINK || + type == MLX5_FLOW_DESTINATION_TYPE_VPORT || + type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER || + type == MLX5_FLOW_DESTINATION_TYPE_TIR; +} + static bool check_valid_spec(const struct mlx5_flow_spec *spec) { int i; @@ -558,8 +560,8 @@ static void del_sw_hw_rule(struct fs_node *node) mutex_unlock(&rule->dest_attr.ft->lock); } - if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER && - --fte->dests_size) { + if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) { + --fte->dests_size; fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) | BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS); @@ -567,17 +569,23 @@ static void del_sw_hw_rule(struct fs_node *node) goto out; } - if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT && - --fte->dests_size) { + if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT) { + --fte->dests_size; fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION); fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW; goto out; } - if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && - --fte->dests_size) { + if (is_fwd_dest_type(rule->dest_attr.type)) { + --fte->dests_size; + --fte->fwd_dests; + + if (!fte->fwd_dests) + fte->action.action &= + ~MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST); + goto out; } out: kfree(rule); @@ -597,6 +605,7 @@ static void del_hw_fte(struct fs_node *node) fs_get_obj(ft, fg->node.parent); trace_mlx5_fs_del_fte(fte); + WARN_ON(fte->dests_size); dev = get_dev(&ft->node); root = find_root(&ft->node); if (node->active) { @@ -1296,6 +1305,8 @@ static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest) rule->node.type = FS_TYPE_FLOW_DEST; if (dest) memcpy(&rule->dest_attr, dest, sizeof(*dest)); + else + rule->dest_attr.type = MLX5_FLOW_DESTINATION_TYPE_NONE; return rule; } @@ -1372,6 +1383,9 @@ create_flow_handle(struct fs_fte *fte, if (dest) { fte->dests_size++; + if (is_fwd_dest_type(dest[i].type)) + fte->fwd_dests++; + type = dest[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER; *modify_mask |= type ? count : dst; @@ -2071,16 +2085,16 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle) down_write_ref_node(&fte->node, false); for (i = handle->num_rules - 1; i >= 0; i--) tree_remove_node(&handle->rule[i]->node, true); - if (fte->dests_size) { - if (fte->modify_mask) - modify_fte(fte); - up_write_ref_node(&fte->node, false); - } else if (list_empty(&fte->node.children)) { - del_hw_fte(&fte->node); + if (list_empty(&fte->node.children)) { + fte->node.del_hw_func(&fte->node); /* Avoid double call to del_hw_fte */ fte->node.del_hw_func = NULL; up_write_ref_node(&fte->node, false); tree_put_node(&fte->node, false); + } else if (fte->dests_size) { + if (fte->modify_mask) + modify_fte(fte); + up_write_ref_node(&fte->node, false); } else { up_write_ref_node(&fte->node, false); } @@ -2519,10 +2533,6 @@ static struct mlx5_flow_root_namespace struct mlx5_flow_root_namespace *root_ns; struct mlx5_flow_namespace *ns; - if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE && - (table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX)) - cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type); - /* Create the root namespace */ root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL); if (!root_ns) @@ -3172,8 +3182,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) goto err; } - if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE || - MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) { + if (MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) { err = init_egress_root_ns(steering); if (err) goto err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index c488a7c5b07e..67cad7a6d836 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -226,6 +226,7 @@ struct fs_fte { struct mlx5_fs_dr_rule fs_dr_rule; u32 val[MLX5_ST_SZ_DW_MATCH_PARAM]; u32 dests_size; + u32 fwd_dests; u32 index; struct mlx5_flow_context flow_context; struct mlx5_flow_act action; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 614687e0e3d9..cfb8bedba512 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -35,7 +35,6 @@ #include "mlx5_core.h" #include "../../mlxfw/mlxfw.h" #include "lib/tout.h" -#include "accel/tls.h" enum { MCQS_IDENTIFIER_BOOT_IMG = 0x1, @@ -249,7 +248,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) return err; } - if (mlx5_accel_is_ktls_tx(dev) || mlx5_accel_is_ktls_rx(dev)) { + if (MLX5_CAP_GEN(dev, tls_tx) || MLX5_CAP_GEN(dev, tls_rx)) { err = mlx5_core_get_caps(dev, MLX5_CAP_TLS); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 2589e39eb9c7..95d7712c2d9a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -62,9 +62,7 @@ #include "lib/mlx5.h" #include "lib/tout.h" #include "fpga/core.h" -#include "fpga/ipsec.h" -#include "accel/ipsec.h" -#include "accel/tls.h" +#include "en_accel/ipsec_offload.h" #include "lib/clock.h" #include "lib/vxlan.h" #include "lib/geneve.h" @@ -179,30 +177,29 @@ static struct mlx5_profile profile[] = { }, }; -static int fw_initializing(struct mlx5_core_dev *dev) -{ - return ioread32be(&dev->iseg->initializing) >> 31; -} - static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili, u32 warn_time_mili) { unsigned long warn = jiffies + msecs_to_jiffies(warn_time_mili); unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili); + u32 fw_initializing; int err = 0; - while (fw_initializing(dev)) { + do { + fw_initializing = ioread32be(&dev->iseg->initializing); + if (!(fw_initializing >> 31)) + break; if (time_after(jiffies, end)) { err = -EBUSY; break; } if (warn_time_mili && time_after(jiffies, warn)) { - mlx5_core_warn(dev, "Waiting for FW initialization, timeout abort in %ds\n", - jiffies_to_msecs(end - warn) / 1000); + mlx5_core_warn(dev, "Waiting for FW initialization, timeout abort in %ds (0x%x)\n", + jiffies_to_msecs(end - warn) / 1000, fw_initializing); warn = jiffies + msecs_to_jiffies(warn_time_mili); } msleep(mlx5_tout_ms(dev, FW_PRE_INIT_WAIT)); - } + } while (true); return err; } @@ -1183,14 +1180,6 @@ static int mlx5_load(struct mlx5_core_dev *dev) goto err_fpga_start; } - mlx5_accel_ipsec_init(dev); - - err = mlx5_accel_tls_init(dev); - if (err) { - mlx5_core_err(dev, "TLS device start failed %d\n", err); - goto err_tls_start; - } - err = mlx5_init_fs(dev); if (err) { mlx5_core_err(dev, "Failed to init flow steering\n"); @@ -1238,9 +1227,6 @@ err_vhca: err_set_hca: mlx5_cleanup_fs(dev); err_fs: - mlx5_accel_tls_cleanup(dev); -err_tls_start: - mlx5_accel_ipsec_cleanup(dev); mlx5_fpga_device_stop(dev); err_fpga_start: mlx5_rsc_dump_cleanup(dev); @@ -1266,8 +1252,6 @@ static void mlx5_unload(struct mlx5_core_dev *dev) mlx5_sf_hw_table_destroy(dev); mlx5_vhca_event_stop(dev); mlx5_cleanup_fs(dev); - mlx5_accel_ipsec_cleanup(dev); - mlx5_accel_tls_cleanup(dev); mlx5_fpga_device_stop(dev); mlx5_rsc_dump_cleanup(dev); mlx5_hv_vhca_cleanup(dev->hv_vhca); @@ -1947,7 +1931,6 @@ static int __init init(void) get_random_bytes(&sw_owner_id, sizeof(sw_owner_id)); mlx5_core_verify_params(); - mlx5_fpga_ipsec_build_fs_cmds(); mlx5_register_debugfs(); err = pci_register_driver(&mlx5_core_driver); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index 4dd619d238cc..223c8741b7ae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -311,7 +311,7 @@ int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev, in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination); MLX5_SET(dest_format_struct, in_dests, destination_type, - MLX5_FLOW_DESTINATION_TYPE_VPORT); + MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT); MLX5_SET(dest_format_struct, in_dests, destination_id, vport); err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); @@ -604,7 +604,8 @@ static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev, if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) return 0; for (i = 0; i < fte->dests_size; i++) { - if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) + if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER || + fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_NONE) continue; if ((fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT || fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) && @@ -719,18 +720,24 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev, int list_size = 0; for (i = 0; i < fte->dests_size; i++) { - unsigned int id, type = fte->dest_arr[i].type; + enum mlx5_flow_destination_type type = fte->dest_arr[i].type; + enum mlx5_ifc_flow_destination_type ifc_type; + unsigned int id; if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) continue; switch (type) { + case MLX5_FLOW_DESTINATION_TYPE_NONE: + continue; case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM: id = fte->dest_arr[i].ft_num; - type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE; break; case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: id = fte->dest_arr[i].ft_id; + ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE; + break; case MLX5_FLOW_DESTINATION_TYPE_UPLINK: case MLX5_FLOW_DESTINATION_TYPE_VPORT: @@ -740,8 +747,10 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev, destination_eswitch_owner_vhca_id_valid, !!(fte->dest_arr[i].vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID)); + ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT; } else { id = 0; + ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK; MLX5_SET(dest_format_struct, in_dests, destination_eswitch_owner_vhca_id_valid, 1); } @@ -761,13 +770,15 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev, break; case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER: id = fte->dest_arr[i].sampler_id; + ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER; break; default: id = fte->dest_arr[i].tir_num; + ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TIR; } MLX5_SET(dest_format_struct, in_dests, destination_type, - type); + ifc_type); MLX5_SET(dest_format_struct, in_dests, destination_id, id); in_dests += dst_cnt_size; list_size++; diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 196adeb33495..1a465fd5d8b3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -1,7 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o mlxsw_core-objs := core.o core_acl_flex_keys.o \ - core_acl_flex_actions.o core_env.o + core_acl_flex_actions.o core_env.o \ + core_linecards.o mlxsw_core-$(CONFIG_MLXSW_CORE_HWMON) += core_hwmon.o mlxsw_core-$(CONFIG_MLXSW_CORE_THERMAL) += core_thermal.o obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index b13e0f8d232a..fc52832241b3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -48,6 +48,7 @@ struct mlxsw_core_port { struct devlink_port devlink_port; void *port_driver_priv; u16 local_port; + struct mlxsw_linecard *linecard; }; void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port) @@ -82,6 +83,7 @@ struct mlxsw_core { struct mlxsw_res res; struct mlxsw_hwmon *hwmon; struct mlxsw_thermal *thermal; + struct mlxsw_linecards *linecards; struct mlxsw_core_port *ports; unsigned int max_ports; atomic_t active_ports_count; @@ -94,6 +96,17 @@ struct mlxsw_core { /* driver_priv has to be always the last item */ }; +struct mlxsw_linecards *mlxsw_core_linecards(struct mlxsw_core *mlxsw_core) +{ + return mlxsw_core->linecards; +} + +void mlxsw_core_linecards_set(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecards *linecards) +{ + mlxsw_core->linecards = linecards; +} + #define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40 static u64 mlxsw_ports_occ_get(void *priv) @@ -2145,6 +2158,10 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (err) goto err_fw_rev_validate; + err = mlxsw_linecards_init(mlxsw_core, mlxsw_bus_info); + if (err) + goto err_linecards_init; + err = mlxsw_core_health_init(mlxsw_core); if (err) goto err_health_init; @@ -2158,7 +2175,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (err) goto err_thermal_init; - err = mlxsw_env_init(mlxsw_core, &mlxsw_core->env); + err = mlxsw_env_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->env); if (err) goto err_env_init; @@ -2183,6 +2200,8 @@ err_thermal_init: err_hwmon_init: mlxsw_core_health_fini(mlxsw_core); err_health_init: + mlxsw_linecards_fini(mlxsw_core); +err_linecards_init: err_fw_rev_validate: if (!reload) mlxsw_core_params_unregister(mlxsw_core); @@ -2255,6 +2274,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, mlxsw_thermal_fini(mlxsw_core->thermal); mlxsw_hwmon_fini(mlxsw_core->hwmon); mlxsw_core_health_fini(mlxsw_core); + mlxsw_linecards_fini(mlxsw_core); if (!reload) mlxsw_core_params_unregister(mlxsw_core); mlxsw_emad_fini(mlxsw_core); @@ -2956,7 +2976,7 @@ EXPORT_SYMBOL(mlxsw_core_res_get); static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port, enum devlink_port_flavour flavour, - u32 port_number, bool split, + u8 slot_index, u32 port_number, bool split, u32 split_port_subnumber, bool splittable, u32 lanes, const unsigned char *switch_id, @@ -2979,6 +2999,15 @@ static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port, attrs.switch_id.id_len = switch_id_len; mlxsw_core_port->local_port = local_port; devlink_port_attrs_set(devlink_port, &attrs); + if (slot_index) { + struct mlxsw_linecard *linecard; + + linecard = mlxsw_linecard_get(mlxsw_core->linecards, + slot_index); + mlxsw_core_port->linecard = linecard; + devlink_port_linecard_set(devlink_port, + linecard->devlink_linecard); + } err = devl_port_register(devlink, devlink_port, local_port); if (err) memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); @@ -2996,7 +3025,7 @@ static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port } int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port, - u32 port_number, bool split, + u8 slot_index, u32 port_number, bool split, u32 split_port_subnumber, bool splittable, u32 lanes, const unsigned char *switch_id, @@ -3005,7 +3034,7 @@ int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port, int err; err = __mlxsw_core_port_init(mlxsw_core, local_port, - DEVLINK_PORT_FLAVOUR_PHYSICAL, + DEVLINK_PORT_FLAVOUR_PHYSICAL, slot_index, port_number, split, split_port_subnumber, splittable, lanes, switch_id, switch_id_len); @@ -3036,7 +3065,7 @@ int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core, err = __mlxsw_core_port_init(mlxsw_core, MLXSW_PORT_CPU_PORT, DEVLINK_PORT_FLAVOUR_CPU, - 0, false, 0, false, 0, + 0, 0, false, 0, false, 0, switch_id, switch_id_len); if (err) return err; @@ -3112,6 +3141,16 @@ mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core, } EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get); +struct mlxsw_linecard * +mlxsw_core_port_linecard_get(struct mlxsw_core *mlxsw_core, + u16 local_port) +{ + struct mlxsw_core_port *mlxsw_core_port = + &mlxsw_core->ports[local_port]; + + return mlxsw_core_port->linecard; +} + bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port) { const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info; @@ -3124,6 +3163,15 @@ bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port) } EXPORT_SYMBOL(mlxsw_core_port_is_xm); +void mlxsw_core_ports_remove_selected(struct mlxsw_core *mlxsw_core, + bool (*selector)(void *priv, u16 local_port), + void *priv) +{ + if (WARN_ON_ONCE(!mlxsw_core->driver->ports_remove_selected)) + return; + mlxsw_core->driver->ports_remove_selected(mlxsw_core, selector, priv); +} + struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core) { return mlxsw_core->env; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 16ee5e90973d..d008282d7f2e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -35,6 +35,11 @@ unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core); void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core); +struct mlxsw_linecards *mlxsw_core_linecards(struct mlxsw_core *mlxsw_core); + +void mlxsw_core_linecards_set(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecards *linecard); + bool mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev, const struct mlxsw_fw_rev *req_rev); @@ -231,7 +236,8 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port); int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port, - u32 port_number, bool split, u32 split_port_subnumber, + u8 slot_index, u32 port_number, bool split, + u32 split_port_subnumber, bool splittable, u32 lanes, const unsigned char *switch_id, unsigned char switch_id_len); @@ -252,7 +258,14 @@ enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core, struct devlink_port * mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core, u16 local_port); +struct mlxsw_linecard * +mlxsw_core_port_linecard_get(struct mlxsw_core *mlxsw_core, + u16 local_port); bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port); +void mlxsw_core_ports_remove_selected(struct mlxsw_core *mlxsw_core, + bool (*selector)(void *priv, + u16 local_port), + void *priv); struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core); int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay); @@ -326,6 +339,10 @@ struct mlxsw_driver { unsigned int count, struct netlink_ext_ack *extack); int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u16 local_port, struct netlink_ext_ack *extack); + void (*ports_remove_selected)(struct mlxsw_core *mlxsw_core, + bool (*selector)(void *priv, + u16 local_port), + void *priv); int (*sb_pool_get)(struct mlxsw_core *mlxsw_core, unsigned int sb_index, u16 pool_index, struct devlink_sb_pool_info *pool_info); @@ -543,4 +560,65 @@ static inline struct mlxsw_skb_cb *mlxsw_skb_cb(struct sk_buff *skb) return (struct mlxsw_skb_cb *) skb->cb; } +struct mlxsw_linecards; + +enum mlxsw_linecard_status_event_type { + MLXSW_LINECARD_STATUS_EVENT_TYPE_PROVISION, + MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION, +}; + +struct mlxsw_linecard { + u8 slot_index; + struct mlxsw_linecards *linecards; + struct devlink_linecard *devlink_linecard; + struct mutex lock; /* Locks accesses to the linecard structure */ + char name[MLXSW_REG_MDDQ_SLOT_ASCII_NAME_LEN]; + char mbct_pl[MLXSW_REG_MBCT_LEN]; /* Too big for stack */ + enum mlxsw_linecard_status_event_type status_event_type_to; + struct delayed_work status_event_to_dw; + u8 provisioned:1, + ready:1, + active:1; + u16 hw_revision; + u16 ini_version; + struct list_head device_list; +}; + +struct mlxsw_linecard_types_info; + +struct mlxsw_linecards { + struct mlxsw_core *mlxsw_core; + const struct mlxsw_bus_info *bus_info; + u8 count; + struct mlxsw_linecard_types_info *types_info; + struct list_head event_ops_list; + struct mutex event_ops_list_lock; /* Locks accesses to event ops list */ + struct mlxsw_linecard linecards[]; +}; + +static inline struct mlxsw_linecard * +mlxsw_linecard_get(struct mlxsw_linecards *linecards, u8 slot_index) +{ + return &linecards->linecards[slot_index - 1]; +} + +int mlxsw_linecards_init(struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *bus_info); +void mlxsw_linecards_fini(struct mlxsw_core *mlxsw_core); + +typedef void mlxsw_linecards_event_op_t(struct mlxsw_core *mlxsw_core, + u8 slot_index, void *priv); + +struct mlxsw_linecards_event_ops { + mlxsw_linecards_event_op_t *got_active; + mlxsw_linecards_event_op_t *got_inactive; +}; + +int mlxsw_linecards_event_ops_register(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecards_event_ops *ops, + void *priv); +void mlxsw_linecards_event_ops_unregister(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecards_event_ops *ops, + void *priv); + #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index 29a74b8bd5b5..34bec9cd572c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -21,19 +21,60 @@ struct mlxsw_env_module_info { enum mlxsw_reg_pmtm_module_type type; }; -struct mlxsw_env { - struct mlxsw_core *core; +struct mlxsw_env_line_card { u8 module_count; - struct mutex module_info_lock; /* Protects 'module_info'. */ + bool active; struct mlxsw_env_module_info module_info[]; }; -static int __mlxsw_env_validate_module_type(struct mlxsw_core *core, u8 module) +struct mlxsw_env { + struct mlxsw_core *core; + const struct mlxsw_bus_info *bus_info; + u8 max_module_count; /* Maximum number of modules per-slot. */ + u8 num_of_slots; /* Including the main board. */ + struct mutex line_cards_lock; /* Protects line cards. */ + struct mlxsw_env_line_card *line_cards[]; +}; + +static bool __mlxsw_env_linecard_is_active(struct mlxsw_env *mlxsw_env, + u8 slot_index) +{ + return mlxsw_env->line_cards[slot_index]->active; +} + +static bool mlxsw_env_linecard_is_active(struct mlxsw_env *mlxsw_env, + u8 slot_index) +{ + bool active; + + mutex_lock(&mlxsw_env->line_cards_lock); + active = __mlxsw_env_linecard_is_active(mlxsw_env, slot_index); + mutex_unlock(&mlxsw_env->line_cards_lock); + + return active; +} + +static struct +mlxsw_env_module_info *mlxsw_env_module_info_get(struct mlxsw_core *mlxsw_core, + u8 slot_index, u8 module) +{ + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + + return &mlxsw_env->line_cards[slot_index]->module_info[module]; +} + +static int __mlxsw_env_validate_module_type(struct mlxsw_core *core, + u8 slot_index, u8 module) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(core); + struct mlxsw_env_module_info *module_info; int err; - switch (mlxsw_env->module_info[module].type) { + if (!__mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) + return 0; + + module_info = mlxsw_env_module_info_get(core, slot_index, module); + switch (module_info->type) { case MLXSW_REG_PMTM_MODULE_TYPE_TWISTED_PAIR: err = -EINVAL; break; @@ -44,32 +85,34 @@ static int __mlxsw_env_validate_module_type(struct mlxsw_core *core, u8 module) return err; } -static int mlxsw_env_validate_module_type(struct mlxsw_core *core, u8 module) +static int mlxsw_env_validate_module_type(struct mlxsw_core *core, + u8 slot_index, u8 module) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(core); int err; - mutex_lock(&mlxsw_env->module_info_lock); - err = __mlxsw_env_validate_module_type(core, module); - mutex_unlock(&mlxsw_env->module_info_lock); + mutex_lock(&mlxsw_env->line_cards_lock); + err = __mlxsw_env_validate_module_type(core, slot_index, module); + mutex_unlock(&mlxsw_env->line_cards_lock); return err; } static int -mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id, bool *qsfp, - bool *cmis) +mlxsw_env_validate_cable_ident(struct mlxsw_core *core, u8 slot_index, int id, + bool *qsfp, bool *cmis) { char mcia_pl[MLXSW_REG_MCIA_LEN]; char *eeprom_tmp; u8 ident; int err; - err = mlxsw_env_validate_module_type(core, id); + err = mlxsw_env_validate_module_type(core, slot_index, id); if (err) return err; - mlxsw_reg_mcia_pack(mcia_pl, id, 0, MLXSW_REG_MCIA_PAGE0_LO_OFF, 0, 1, + mlxsw_reg_mcia_pack(mcia_pl, slot_index, id, 0, + MLXSW_REG_MCIA_PAGE0_LO_OFF, 0, 1, MLXSW_REG_MCIA_I2C_ADDR_LOW); err = mlxsw_reg_query(core, MLXSW_REG(mcia), mcia_pl); if (err) @@ -99,8 +142,8 @@ mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id, bool *qsfp, } static int -mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module, - u16 offset, u16 size, void *data, +mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, u8 slot_index, + int module, u16 offset, u16 size, void *data, bool qsfp, unsigned int *p_read_size) { char mcia_pl[MLXSW_REG_MCIA_LEN]; @@ -145,7 +188,8 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module, } } - mlxsw_reg_mcia_pack(mcia_pl, module, 0, page, offset, size, i2c_addr); + mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, page, offset, size, + i2c_addr); err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcia), mcia_pl); if (err) @@ -162,8 +206,9 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module, return 0; } -int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module, - int off, int *temp) +int +mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, u8 slot_index, + int module, int off, int *temp) { unsigned int module_temp, module_crit, module_emerg; union { @@ -177,8 +222,9 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module, int page; int err; - mlxsw_reg_mtmp_pack(mtmp_pl, MLXSW_REG_MTMP_MODULE_INDEX_MIN + module, - false, false); + mlxsw_reg_mtmp_pack(mtmp_pl, slot_index, + MLXSW_REG_MTMP_MODULE_INDEX_MIN + module, false, + false); err = mlxsw_reg_query(core, MLXSW_REG(mtmp), mtmp_pl); if (err) return err; @@ -207,7 +253,8 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module, */ /* Validate module identifier value. */ - err = mlxsw_env_validate_cable_ident(core, module, &qsfp, &cmis); + err = mlxsw_env_validate_cable_ident(core, slot_index, module, &qsfp, + &cmis); if (err) return err; @@ -219,12 +266,12 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module, page = MLXSW_REG_MCIA_TH_PAGE_CMIS_NUM; else page = MLXSW_REG_MCIA_TH_PAGE_NUM; - mlxsw_reg_mcia_pack(mcia_pl, module, 0, page, + mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, page, MLXSW_REG_MCIA_TH_PAGE_OFF + off, MLXSW_REG_MCIA_TH_ITEM_SIZE, MLXSW_REG_MCIA_I2C_ADDR_LOW); } else { - mlxsw_reg_mcia_pack(mcia_pl, module, 0, + mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, MLXSW_REG_MCIA_PAGE0_LO, off, MLXSW_REG_MCIA_TH_ITEM_SIZE, MLXSW_REG_MCIA_I2C_ADDR_HIGH); @@ -242,24 +289,31 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module, } int mlxsw_env_get_module_info(struct net_device *netdev, - struct mlxsw_core *mlxsw_core, int module, - struct ethtool_modinfo *modinfo) + struct mlxsw_core *mlxsw_core, u8 slot_index, + int module, struct ethtool_modinfo *modinfo) { + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); u8 module_info[MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE]; u16 offset = MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE; u8 module_rev_id, module_id, diag_mon; unsigned int read_size; int err; - err = mlxsw_env_validate_module_type(mlxsw_core, module); + if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) { + netdev_err(netdev, "Cannot read EEPROM of module on an inactive line card\n"); + return -EIO; + } + + err = mlxsw_env_validate_module_type(mlxsw_core, slot_index, module); if (err) { netdev_err(netdev, "EEPROM is not equipped on port module type"); return err; } - err = mlxsw_env_query_module_eeprom(mlxsw_core, module, 0, offset, - module_info, false, &read_size); + err = mlxsw_env_query_module_eeprom(mlxsw_core, slot_index, module, 0, + offset, module_info, false, + &read_size); if (err) return err; @@ -288,9 +342,10 @@ int mlxsw_env_get_module_info(struct net_device *netdev, break; case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_SFP: /* Verify if transceiver provides diagnostic monitoring page */ - err = mlxsw_env_query_module_eeprom(mlxsw_core, module, - SFP_DIAGMON, 1, &diag_mon, - false, &read_size); + err = mlxsw_env_query_module_eeprom(mlxsw_core, slot_index, + module, SFP_DIAGMON, 1, + &diag_mon, false, + &read_size); if (err) return err; @@ -329,9 +384,11 @@ int mlxsw_env_get_module_info(struct net_device *netdev, EXPORT_SYMBOL(mlxsw_env_get_module_info); int mlxsw_env_get_module_eeprom(struct net_device *netdev, - struct mlxsw_core *mlxsw_core, int module, - struct ethtool_eeprom *ee, u8 *data) + struct mlxsw_core *mlxsw_core, u8 slot_index, + int module, struct ethtool_eeprom *ee, + u8 *data) { + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); int offset = ee->offset; unsigned int read_size; bool qsfp, cmis; @@ -341,14 +398,21 @@ int mlxsw_env_get_module_eeprom(struct net_device *netdev, if (!ee->len) return -EINVAL; + if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) { + netdev_err(netdev, "Cannot read EEPROM of module on an inactive line card\n"); + return -EIO; + } + memset(data, 0, ee->len); /* Validate module identifier value. */ - err = mlxsw_env_validate_cable_ident(mlxsw_core, module, &qsfp, &cmis); + err = mlxsw_env_validate_cable_ident(mlxsw_core, slot_index, module, + &qsfp, &cmis); if (err) return err; while (i < ee->len) { - err = mlxsw_env_query_module_eeprom(mlxsw_core, module, offset, + err = mlxsw_env_query_module_eeprom(mlxsw_core, slot_index, + module, offset, ee->len - i, data + i, qsfp, &read_size); if (err) { @@ -394,15 +458,23 @@ static int mlxsw_env_mcia_status_process(const char *mcia_pl, } int -mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module, +mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, + u8 slot_index, u8 module, const struct ethtool_module_eeprom *page, struct netlink_ext_ack *extack) { + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); u32 bytes_read = 0; u16 device_addr; int err; - err = mlxsw_env_validate_module_type(mlxsw_core, module); + if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot read EEPROM of module on an inactive line card"); + return -EIO; + } + + err = mlxsw_env_validate_module_type(mlxsw_core, slot_index, module); if (err) { NL_SET_ERR_MSG_MOD(extack, "EEPROM is not equipped on port module type"); return err; @@ -419,7 +491,7 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module, size = min_t(u8, page->length - bytes_read, MLXSW_REG_MCIA_EEPROM_SIZE); - mlxsw_reg_mcia_pack(mcia_pl, module, 0, page->page, + mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, page->page, device_addr + bytes_read, size, page->i2c_address); mlxsw_reg_mcia_bank_number_set(mcia_pl, page->bank); @@ -443,20 +515,23 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module, } EXPORT_SYMBOL(mlxsw_env_get_module_eeprom_by_page); -static int mlxsw_env_module_reset(struct mlxsw_core *mlxsw_core, u8 module) +static int mlxsw_env_module_reset(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module) { char pmaos_pl[MLXSW_REG_PMAOS_LEN]; - mlxsw_reg_pmaos_pack(pmaos_pl, module); + mlxsw_reg_pmaos_pack(pmaos_pl, slot_index, module); mlxsw_reg_pmaos_rst_set(pmaos_pl, true); return mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl); } int mlxsw_env_reset_module(struct net_device *netdev, - struct mlxsw_core *mlxsw_core, u8 module, u32 *flags) + struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module, u32 *flags) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + struct mlxsw_env_module_info *module_info; u32 req = *flags; int err; @@ -464,28 +539,34 @@ int mlxsw_env_reset_module(struct net_device *netdev, !(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT))) return 0; - mutex_lock(&mlxsw_env->module_info_lock); + if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) { + netdev_err(netdev, "Cannot reset module on an inactive line card\n"); + return -EIO; + } + + mutex_lock(&mlxsw_env->line_cards_lock); - err = __mlxsw_env_validate_module_type(mlxsw_core, module); + err = __mlxsw_env_validate_module_type(mlxsw_core, slot_index, module); if (err) { netdev_err(netdev, "Reset module is not supported on port module type\n"); goto out; } - if (mlxsw_env->module_info[module].num_ports_up) { + module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module); + if (module_info->num_ports_up) { netdev_err(netdev, "Cannot reset module when ports using it are administratively up\n"); err = -EINVAL; goto out; } - if (mlxsw_env->module_info[module].num_ports_mapped > 1 && + if (module_info->num_ports_mapped > 1 && !(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT))) { netdev_err(netdev, "Cannot reset module without \"phy-shared\" flag when shared by multiple ports\n"); err = -EINVAL; goto out; } - err = mlxsw_env_module_reset(mlxsw_core, module); + err = mlxsw_env_module_reset(mlxsw_core, slot_index, module); if (err) { netdev_err(netdev, "Failed to reset module\n"); goto out; @@ -494,32 +575,39 @@ int mlxsw_env_reset_module(struct net_device *netdev, *flags &= ~(ETH_RESET_PHY | (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT)); out: - mutex_unlock(&mlxsw_env->module_info_lock); + mutex_unlock(&mlxsw_env->line_cards_lock); return err; } EXPORT_SYMBOL(mlxsw_env_reset_module); int -mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module, +mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module, struct ethtool_module_power_mode_params *params, struct netlink_ext_ack *extack) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + struct mlxsw_env_module_info *module_info; char mcion_pl[MLXSW_REG_MCION_LEN]; u32 status_bits; - int err; + int err = 0; - mutex_lock(&mlxsw_env->module_info_lock); + mutex_lock(&mlxsw_env->line_cards_lock); - err = __mlxsw_env_validate_module_type(mlxsw_core, module); + err = __mlxsw_env_validate_module_type(mlxsw_core, slot_index, module); if (err) { NL_SET_ERR_MSG_MOD(extack, "Power mode is not supported on port module type"); goto out; } - params->policy = mlxsw_env->module_info[module].power_mode_policy; + module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module); + params->policy = module_info->power_mode_policy; + + /* Avoid accessing an inactive line card, as it will result in an error. */ + if (!__mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) + goto out; - mlxsw_reg_mcion_pack(mcion_pl, module); + mlxsw_reg_mcion_pack(mcion_pl, slot_index, module); err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcion), mcion_pl); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed to retrieve module's power mode"); @@ -536,18 +624,18 @@ mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module, params->mode = ETHTOOL_MODULE_POWER_MODE_HIGH; out: - mutex_unlock(&mlxsw_env->module_info_lock); + mutex_unlock(&mlxsw_env->line_cards_lock); return err; } EXPORT_SYMBOL(mlxsw_env_get_module_power_mode); static int mlxsw_env_module_enable_set(struct mlxsw_core *mlxsw_core, - u8 module, bool enable) + u8 slot_index, u8 module, bool enable) { enum mlxsw_reg_pmaos_admin_status admin_status; char pmaos_pl[MLXSW_REG_PMAOS_LEN]; - mlxsw_reg_pmaos_pack(pmaos_pl, module); + mlxsw_reg_pmaos_pack(pmaos_pl, slot_index, module); admin_status = enable ? MLXSW_REG_PMAOS_ADMIN_STATUS_ENABLED : MLXSW_REG_PMAOS_ADMIN_STATUS_DISABLED; mlxsw_reg_pmaos_admin_status_set(pmaos_pl, admin_status); @@ -557,12 +645,13 @@ static int mlxsw_env_module_enable_set(struct mlxsw_core *mlxsw_core, } static int mlxsw_env_module_low_power_set(struct mlxsw_core *mlxsw_core, - u8 module, bool low_power) + u8 slot_index, u8 module, + bool low_power) { u16 eeprom_override_mask, eeprom_override; char pmmp_pl[MLXSW_REG_PMMP_LEN]; - mlxsw_reg_pmmp_pack(pmmp_pl, module); + mlxsw_reg_pmmp_pack(pmmp_pl, slot_index, module); mlxsw_reg_pmmp_sticky_set(pmmp_pl, true); /* Mask all the bits except low power mode. */ eeprom_override_mask = ~MLXSW_REG_PMMP_EEPROM_OVERRIDE_LOW_POWER_MASK; @@ -575,24 +664,34 @@ static int mlxsw_env_module_low_power_set(struct mlxsw_core *mlxsw_core, } static int __mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, - u8 module, bool low_power, + u8 slot_index, u8 module, + bool low_power, struct netlink_ext_ack *extack) { + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); int err; - err = mlxsw_env_module_enable_set(mlxsw_core, module, false); + /* Avoid accessing an inactive line card, as it will result in an error. + * Cached configuration will be applied by mlxsw_env_got_active() when + * line card becomes active. + */ + if (!__mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) + return 0; + + err = mlxsw_env_module_enable_set(mlxsw_core, slot_index, module, false); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed to disable module"); return err; } - err = mlxsw_env_module_low_power_set(mlxsw_core, module, low_power); + err = mlxsw_env_module_low_power_set(mlxsw_core, slot_index, module, + low_power); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed to set module's power mode"); goto err_module_low_power_set; } - err = mlxsw_env_module_enable_set(mlxsw_core, module, true); + err = mlxsw_env_module_enable_set(mlxsw_core, slot_index, module, true); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed to enable module"); goto err_module_enable_set; @@ -601,67 +700,84 @@ static int __mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, return 0; err_module_enable_set: - mlxsw_env_module_low_power_set(mlxsw_core, module, !low_power); + mlxsw_env_module_low_power_set(mlxsw_core, slot_index, module, + !low_power); err_module_low_power_set: - mlxsw_env_module_enable_set(mlxsw_core, module, true); + mlxsw_env_module_enable_set(mlxsw_core, slot_index, module, true); return err; } -int -mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module, - enum ethtool_module_power_mode_policy policy, - struct netlink_ext_ack *extack) +static int +mlxsw_env_set_module_power_mode_apply(struct mlxsw_core *mlxsw_core, + u8 slot_index, u8 module, + enum ethtool_module_power_mode_policy policy, + struct netlink_ext_ack *extack) { - struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + struct mlxsw_env_module_info *module_info; bool low_power; int err = 0; - if (policy != ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH && - policy != ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO) { - NL_SET_ERR_MSG_MOD(extack, "Unsupported power mode policy"); - return -EOPNOTSUPP; - } - - mutex_lock(&mlxsw_env->module_info_lock); - - err = __mlxsw_env_validate_module_type(mlxsw_core, module); + err = __mlxsw_env_validate_module_type(mlxsw_core, slot_index, module); if (err) { NL_SET_ERR_MSG_MOD(extack, "Power mode set is not supported on port module type"); goto out; } - if (mlxsw_env->module_info[module].power_mode_policy == policy) + module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module); + if (module_info->power_mode_policy == policy) goto out; /* If any ports are up, we are already in high power mode. */ - if (mlxsw_env->module_info[module].num_ports_up) + if (module_info->num_ports_up) goto out_set_policy; low_power = policy == ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO; - err = __mlxsw_env_set_module_power_mode(mlxsw_core, module, low_power, - extack); + err = __mlxsw_env_set_module_power_mode(mlxsw_core, slot_index, module, + low_power, extack); if (err) goto out; out_set_policy: - mlxsw_env->module_info[module].power_mode_policy = policy; + module_info->power_mode_policy = policy; out: - mutex_unlock(&mlxsw_env->module_info_lock); + return err; +} + +int +mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module, + enum ethtool_module_power_mode_policy policy, + struct netlink_ext_ack *extack) +{ + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + int err; + + if (policy != ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH && + policy != ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported power mode policy"); + return -EOPNOTSUPP; + } + + mutex_lock(&mlxsw_env->line_cards_lock); + err = mlxsw_env_set_module_power_mode_apply(mlxsw_core, slot_index, + module, policy, extack); + mutex_unlock(&mlxsw_env->line_cards_lock); + return err; } EXPORT_SYMBOL(mlxsw_env_set_module_power_mode); static int mlxsw_env_module_has_temp_sensor(struct mlxsw_core *mlxsw_core, - u8 module, + u8 slot_index, u8 module, bool *p_has_temp_sensor) { char mtbr_pl[MLXSW_REG_MTBR_LEN]; u16 temp; int err; - mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, - 1); + mlxsw_reg_mtbr_pack(mtbr_pl, slot_index, + MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, 1); err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mtbr), mtbr_pl); if (err) return err; @@ -681,13 +797,15 @@ static int mlxsw_env_module_has_temp_sensor(struct mlxsw_core *mlxsw_core, return 0; } -static int mlxsw_env_temp_event_set(struct mlxsw_core *mlxsw_core, - u16 sensor_index, bool enable) +static int +mlxsw_env_temp_event_set(struct mlxsw_core *mlxsw_core, u8 slot_index, + u16 sensor_index, bool enable) { char mtmp_pl[MLXSW_REG_MTMP_LEN] = {0}; enum mlxsw_reg_mtmp_tee tee; int err, threshold_hi; + mlxsw_reg_mtmp_slot_index_set(mtmp_pl, slot_index); mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, sensor_index); err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mtmp), mtmp_pl); if (err) @@ -695,6 +813,7 @@ static int mlxsw_env_temp_event_set(struct mlxsw_core *mlxsw_core, if (enable) { err = mlxsw_env_module_temp_thresholds_get(mlxsw_core, + slot_index, sensor_index - MLXSW_REG_MTMP_MODULE_INDEX_MIN, SFP_TEMP_HIGH_WARN, @@ -721,14 +840,16 @@ static int mlxsw_env_temp_event_set(struct mlxsw_core *mlxsw_core, return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtmp), mtmp_pl); } -static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core) +static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core, + u8 slot_index) { + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); int i, err, sensor_index; bool has_temp_sensor; - for (i = 0; i < mlxsw_core_env(mlxsw_core)->module_count; i++) { - err = mlxsw_env_module_has_temp_sensor(mlxsw_core, i, - &has_temp_sensor); + for (i = 0; i < mlxsw_env->line_cards[slot_index]->module_count; i++) { + err = mlxsw_env_module_has_temp_sensor(mlxsw_core, slot_index, + i, &has_temp_sensor); if (err) return err; @@ -736,7 +857,8 @@ static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core) continue; sensor_index = i + MLXSW_REG_MTMP_MODULE_INDEX_MIN; - err = mlxsw_env_temp_event_set(mlxsw_core, sensor_index, true); + err = mlxsw_env_temp_event_set(mlxsw_core, slot_index, + sensor_index, true); if (err) return err; } @@ -753,6 +875,7 @@ struct mlxsw_env_module_temp_warn_event { static void mlxsw_env_mtwe_event_work(struct work_struct *work) { struct mlxsw_env_module_temp_warn_event *event; + struct mlxsw_env_module_info *module_info; struct mlxsw_env *mlxsw_env; int i, sensor_warning; bool is_overheat; @@ -761,7 +884,7 @@ static void mlxsw_env_mtwe_event_work(struct work_struct *work) work); mlxsw_env = event->mlxsw_env; - for (i = 0; i < mlxsw_env->module_count; i++) { + for (i = 0; i < mlxsw_env->max_module_count; i++) { /* 64-127 of sensor_index are mapped to the port modules * sequentially (module 0 is mapped to sensor_index 64, * module 1 to sensor_index 65 and so on) @@ -769,9 +892,10 @@ static void mlxsw_env_mtwe_event_work(struct work_struct *work) sensor_warning = mlxsw_reg_mtwe_sensor_warning_get(event->mtwe_pl, i + MLXSW_REG_MTMP_MODULE_INDEX_MIN); - mutex_lock(&mlxsw_env->module_info_lock); - is_overheat = - mlxsw_env->module_info[i].is_overheat; + mutex_lock(&mlxsw_env->line_cards_lock); + /* MTWE only supports main board. */ + module_info = mlxsw_env_module_info_get(mlxsw_env->core, 0, i); + is_overheat = module_info->is_overheat; if ((is_overheat && sensor_warning) || (!is_overheat && !sensor_warning)) { @@ -779,21 +903,21 @@ static void mlxsw_env_mtwe_event_work(struct work_struct *work) * warning OR current state in "no warning" and MTWE * does not report warning. */ - mutex_unlock(&mlxsw_env->module_info_lock); + mutex_unlock(&mlxsw_env->line_cards_lock); continue; } else if (is_overheat && !sensor_warning) { /* MTWE reports "no warning", turn is_overheat off. */ - mlxsw_env->module_info[i].is_overheat = false; - mutex_unlock(&mlxsw_env->module_info_lock); + module_info->is_overheat = false; + mutex_unlock(&mlxsw_env->line_cards_lock); } else { /* Current state is "no warning" and MTWE reports * "warning", increase the counter and turn is_overheat * on. */ - mlxsw_env->module_info[i].is_overheat = true; - mlxsw_env->module_info[i].module_overheat_counter++; - mutex_unlock(&mlxsw_env->module_info_lock); + module_info->is_overheat = true; + module_info->module_overheat_counter++; + mutex_unlock(&mlxsw_env->line_cards_lock); } } @@ -837,6 +961,7 @@ static void mlxsw_env_temp_warn_event_unregister(struct mlxsw_env *mlxsw_env) struct mlxsw_env_module_plug_unplug_event { struct mlxsw_env *mlxsw_env; + u8 slot_index; u8 module; struct work_struct work; }; @@ -844,6 +969,7 @@ struct mlxsw_env_module_plug_unplug_event { static void mlxsw_env_pmpe_event_work(struct work_struct *work) { struct mlxsw_env_module_plug_unplug_event *event; + struct mlxsw_env_module_info *module_info; struct mlxsw_env *mlxsw_env; bool has_temp_sensor; u16 sensor_index; @@ -853,11 +979,16 @@ static void mlxsw_env_pmpe_event_work(struct work_struct *work) work); mlxsw_env = event->mlxsw_env; - mutex_lock(&mlxsw_env->module_info_lock); - mlxsw_env->module_info[event->module].is_overheat = false; - mutex_unlock(&mlxsw_env->module_info_lock); + mutex_lock(&mlxsw_env->line_cards_lock); + module_info = mlxsw_env_module_info_get(mlxsw_env->core, + event->slot_index, + event->module); + module_info->is_overheat = false; + mutex_unlock(&mlxsw_env->line_cards_lock); - err = mlxsw_env_module_has_temp_sensor(mlxsw_env->core, event->module, + err = mlxsw_env_module_has_temp_sensor(mlxsw_env->core, + event->slot_index, + event->module, &has_temp_sensor); /* Do not disable events on modules without sensors or faulty sensors * because FW returns errors. @@ -869,7 +1000,8 @@ static void mlxsw_env_pmpe_event_work(struct work_struct *work) goto out; sensor_index = event->module + MLXSW_REG_MTMP_MODULE_INDEX_MIN; - mlxsw_env_temp_event_set(mlxsw_env->core, sensor_index, true); + mlxsw_env_temp_event_set(mlxsw_env->core, event->slot_index, + sensor_index, true); out: kfree(event); @@ -879,12 +1011,14 @@ static void mlxsw_env_pmpe_listener_func(const struct mlxsw_reg_info *reg, char *pmpe_pl, void *priv) { + u8 slot_index = mlxsw_reg_pmpe_slot_index_get(pmpe_pl); struct mlxsw_env_module_plug_unplug_event *event; enum mlxsw_reg_pmpe_module_status module_status; u8 module = mlxsw_reg_pmpe_module_get(pmpe_pl); struct mlxsw_env *mlxsw_env = priv; - if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) + if (WARN_ON_ONCE(module >= mlxsw_env->max_module_count || + slot_index >= mlxsw_env->num_of_slots)) return; module_status = mlxsw_reg_pmpe_module_status_get(pmpe_pl); @@ -896,6 +1030,7 @@ mlxsw_env_pmpe_listener_func(const struct mlxsw_reg_info *reg, char *pmpe_pl, return; event->mlxsw_env = mlxsw_env; + event->slot_index = slot_index; event->module = module; INIT_WORK(&event->work, mlxsw_env_pmpe_event_work); mlxsw_core_schedule_work(&event->work); @@ -923,14 +1058,16 @@ mlxsw_env_module_plug_event_unregister(struct mlxsw_env *mlxsw_env) } static int -mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core) +mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core, + u8 slot_index) { + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); int i, err; - for (i = 0; i < mlxsw_core_env(mlxsw_core)->module_count; i++) { + for (i = 0; i < mlxsw_env->line_cards[slot_index]->module_count; i++) { char pmaos_pl[MLXSW_REG_PMAOS_LEN]; - mlxsw_reg_pmaos_pack(pmaos_pl, i); + mlxsw_reg_pmaos_pack(pmaos_pl, slot_index, i); mlxsw_reg_pmaos_e_set(pmaos_pl, MLXSW_REG_PMAOS_E_GENERATE_EVENT); mlxsw_reg_pmaos_ee_set(pmaos_pl, true); @@ -942,146 +1079,330 @@ mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core) } int -mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module, - u64 *p_counter) +mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module, u64 *p_counter) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + struct mlxsw_env_module_info *module_info; - mutex_lock(&mlxsw_env->module_info_lock); - *p_counter = mlxsw_env->module_info[module].module_overheat_counter; - mutex_unlock(&mlxsw_env->module_info_lock); + mutex_lock(&mlxsw_env->line_cards_lock); + module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module); + *p_counter = module_info->module_overheat_counter; + mutex_unlock(&mlxsw_env->line_cards_lock); return 0; } EXPORT_SYMBOL(mlxsw_env_module_overheat_counter_get); -void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 module) +void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + struct mlxsw_env_module_info *module_info; - mutex_lock(&mlxsw_env->module_info_lock); - mlxsw_env->module_info[module].num_ports_mapped++; - mutex_unlock(&mlxsw_env->module_info_lock); + mutex_lock(&mlxsw_env->line_cards_lock); + module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module); + module_info->num_ports_mapped++; + mutex_unlock(&mlxsw_env->line_cards_lock); } EXPORT_SYMBOL(mlxsw_env_module_port_map); -void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 module) +void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + struct mlxsw_env_module_info *module_info; - mutex_lock(&mlxsw_env->module_info_lock); - mlxsw_env->module_info[module].num_ports_mapped--; - mutex_unlock(&mlxsw_env->module_info_lock); + mutex_lock(&mlxsw_env->line_cards_lock); + module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module); + module_info->num_ports_mapped--; + mutex_unlock(&mlxsw_env->line_cards_lock); } EXPORT_SYMBOL(mlxsw_env_module_port_unmap); -int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module) +int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + struct mlxsw_env_module_info *module_info; int err = 0; - mutex_lock(&mlxsw_env->module_info_lock); + mutex_lock(&mlxsw_env->line_cards_lock); - if (mlxsw_env->module_info[module].power_mode_policy != + module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module); + if (module_info->power_mode_policy != ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO) goto out_inc; - if (mlxsw_env->module_info[module].num_ports_up != 0) + if (module_info->num_ports_up != 0) goto out_inc; /* Transition to high power mode following first port using the module * being put administratively up. */ - err = __mlxsw_env_set_module_power_mode(mlxsw_core, module, false, - NULL); + err = __mlxsw_env_set_module_power_mode(mlxsw_core, slot_index, module, + false, NULL); if (err) goto out_unlock; out_inc: - mlxsw_env->module_info[module].num_ports_up++; + module_info->num_ports_up++; out_unlock: - mutex_unlock(&mlxsw_env->module_info_lock); + mutex_unlock(&mlxsw_env->line_cards_lock); return err; } EXPORT_SYMBOL(mlxsw_env_module_port_up); -void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module) +void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + struct mlxsw_env_module_info *module_info; - mutex_lock(&mlxsw_env->module_info_lock); + mutex_lock(&mlxsw_env->line_cards_lock); - mlxsw_env->module_info[module].num_ports_up--; + module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module); + module_info->num_ports_up--; - if (mlxsw_env->module_info[module].power_mode_policy != + if (module_info->power_mode_policy != ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO) goto out_unlock; - if (mlxsw_env->module_info[module].num_ports_up != 0) + if (module_info->num_ports_up != 0) goto out_unlock; /* Transition to low power mode following last port using the module * being put administratively down. */ - __mlxsw_env_set_module_power_mode(mlxsw_core, module, true, NULL); + __mlxsw_env_set_module_power_mode(mlxsw_core, slot_index, module, true, + NULL); out_unlock: - mutex_unlock(&mlxsw_env->module_info_lock); + mutex_unlock(&mlxsw_env->line_cards_lock); } EXPORT_SYMBOL(mlxsw_env_module_port_down); +static int mlxsw_env_line_cards_alloc(struct mlxsw_env *env) +{ + struct mlxsw_env_module_info *module_info; + int i, j; + + for (i = 0; i < env->num_of_slots; i++) { + env->line_cards[i] = kzalloc(struct_size(env->line_cards[i], + module_info, + env->max_module_count), + GFP_KERNEL); + if (!env->line_cards[i]) + goto kzalloc_err; + + /* Firmware defaults to high power mode policy where modules + * are transitioned to high power mode following plug-in. + */ + for (j = 0; j < env->max_module_count; j++) { + module_info = &env->line_cards[i]->module_info[j]; + module_info->power_mode_policy = + ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH; + } + } + + return 0; + +kzalloc_err: + for (i--; i >= 0; i--) + kfree(env->line_cards[i]); + return -ENOMEM; +} + +static void mlxsw_env_line_cards_free(struct mlxsw_env *env) +{ + int i = env->num_of_slots; + + for (i--; i >= 0; i--) + kfree(env->line_cards[i]); +} + +static int +mlxsw_env_module_event_enable(struct mlxsw_env *mlxsw_env, u8 slot_index) +{ + int err; + + err = mlxsw_env_module_oper_state_event_enable(mlxsw_env->core, + slot_index); + if (err) + return err; + + err = mlxsw_env_module_temp_event_enable(mlxsw_env->core, slot_index); + if (err) + return err; + + return 0; +} + +static void +mlxsw_env_module_event_disable(struct mlxsw_env *mlxsw_env, u8 slot_index) +{ +} + static int -mlxsw_env_module_type_set(struct mlxsw_core *mlxsw_core) +mlxsw_env_module_type_set(struct mlxsw_core *mlxsw_core, u8 slot_index) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); int i; - for (i = 0; i < mlxsw_env->module_count; i++) { + for (i = 0; i < mlxsw_env->line_cards[slot_index]->module_count; i++) { + struct mlxsw_env_module_info *module_info; char pmtm_pl[MLXSW_REG_PMTM_LEN]; int err; - mlxsw_reg_pmtm_pack(pmtm_pl, 0, i); + mlxsw_reg_pmtm_pack(pmtm_pl, slot_index, i); err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtm), pmtm_pl); if (err) return err; - mlxsw_env->module_info[i].type = - mlxsw_reg_pmtm_module_type_get(pmtm_pl); + module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, + i); + module_info->type = mlxsw_reg_pmtm_module_type_get(pmtm_pl); } return 0; } -int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env) +static void +mlxsw_env_linecard_modules_power_mode_apply(struct mlxsw_core *mlxsw_core, + struct mlxsw_env *env, + u8 slot_index) { + int i; + + for (i = 0; i < env->line_cards[slot_index]->module_count; i++) { + enum ethtool_module_power_mode_policy policy; + struct mlxsw_env_module_info *module_info; + struct netlink_ext_ack extack; + int err; + + module_info = &env->line_cards[slot_index]->module_info[i]; + policy = module_info->power_mode_policy; + err = mlxsw_env_set_module_power_mode_apply(mlxsw_core, + slot_index, i, + policy, &extack); + if (err) + dev_err(env->bus_info->dev, "%s\n", extack._msg); + } +} + +static void +mlxsw_env_got_active(struct mlxsw_core *mlxsw_core, u8 slot_index, void *priv) +{ + struct mlxsw_env *mlxsw_env = priv; + char mgpir_pl[MLXSW_REG_MGPIR_LEN]; + int err; + + mutex_lock(&mlxsw_env->line_cards_lock); + if (__mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) + goto out_unlock; + + mlxsw_reg_mgpir_pack(mgpir_pl, slot_index); + err = mlxsw_reg_query(mlxsw_env->core, MLXSW_REG(mgpir), mgpir_pl); + if (err) + goto out_unlock; + + mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, + &mlxsw_env->line_cards[slot_index]->module_count, + NULL); + + err = mlxsw_env_module_event_enable(mlxsw_env, slot_index); + if (err) { + dev_err(mlxsw_env->bus_info->dev, "Failed to enable port module events for line card in slot %d\n", + slot_index); + goto err_mlxsw_env_module_event_enable; + } + err = mlxsw_env_module_type_set(mlxsw_env->core, slot_index); + if (err) { + dev_err(mlxsw_env->bus_info->dev, "Failed to set modules' type for line card in slot %d\n", + slot_index); + goto err_type_set; + } + + mlxsw_env->line_cards[slot_index]->active = true; + /* Apply power mode policy. */ + mlxsw_env_linecard_modules_power_mode_apply(mlxsw_core, mlxsw_env, + slot_index); + mutex_unlock(&mlxsw_env->line_cards_lock); + + return; + +err_type_set: + mlxsw_env_module_event_disable(mlxsw_env, slot_index); +err_mlxsw_env_module_event_enable: +out_unlock: + mutex_unlock(&mlxsw_env->line_cards_lock); +} + +static void +mlxsw_env_got_inactive(struct mlxsw_core *mlxsw_core, u8 slot_index, + void *priv) +{ + struct mlxsw_env *mlxsw_env = priv; + + mutex_lock(&mlxsw_env->line_cards_lock); + if (!__mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) + goto out_unlock; + mlxsw_env->line_cards[slot_index]->active = false; + mlxsw_env_module_event_disable(mlxsw_env, slot_index); + mlxsw_env->line_cards[slot_index]->module_count = 0; +out_unlock: + mutex_unlock(&mlxsw_env->line_cards_lock); +} + +static struct mlxsw_linecards_event_ops mlxsw_env_event_ops = { + .got_active = mlxsw_env_got_active, + .got_inactive = mlxsw_env_got_inactive, +}; + +int mlxsw_env_init(struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *bus_info, + struct mlxsw_env **p_env) +{ + u8 module_count, num_of_slots, max_module_count; char mgpir_pl[MLXSW_REG_MGPIR_LEN]; struct mlxsw_env *env; - u8 module_count; - int i, err; + int err; - mlxsw_reg_mgpir_pack(mgpir_pl); + mlxsw_reg_mgpir_pack(mgpir_pl, 0); err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgpir), mgpir_pl); if (err) return err; - mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, &module_count); + mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, &module_count, + &num_of_slots); + /* If the system is modular, get the maximum number of modules per-slot. + * Otherwise, get the maximum number of modules on the main board. + */ + max_module_count = num_of_slots ? + mlxsw_reg_mgpir_max_modules_per_slot_get(mgpir_pl) : + module_count; - env = kzalloc(struct_size(env, module_info, module_count), GFP_KERNEL); + env = kzalloc(struct_size(env, line_cards, num_of_slots + 1), + GFP_KERNEL); if (!env) return -ENOMEM; - /* Firmware defaults to high power mode policy where modules are - * transitioned to high power mode following plug-in. - */ - for (i = 0; i < module_count; i++) - env->module_info[i].power_mode_policy = - ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH; - - mutex_init(&env->module_info_lock); env->core = mlxsw_core; - env->module_count = module_count; + env->bus_info = bus_info; + env->num_of_slots = num_of_slots + 1; + env->max_module_count = max_module_count; + err = mlxsw_env_line_cards_alloc(env); + if (err) + goto err_mlxsw_env_line_cards_alloc; + + mutex_init(&env->line_cards_lock); *p_env = env; + err = mlxsw_linecards_event_ops_register(env->core, + &mlxsw_env_event_ops, env); + if (err) + goto err_linecards_event_ops_register; + err = mlxsw_env_temp_warn_event_register(mlxsw_core); if (err) goto err_temp_warn_event_register; @@ -1090,38 +1411,54 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env) if (err) goto err_module_plug_event_register; - err = mlxsw_env_module_oper_state_event_enable(mlxsw_core); - if (err) - goto err_oper_state_event_enable; - - err = mlxsw_env_module_temp_event_enable(mlxsw_core); + /* Set 'module_count' only for main board. Actual count for line card + * is to be set after line card is activated. + */ + env->line_cards[0]->module_count = num_of_slots ? 0 : module_count; + /* Enable events only for main board. Line card events are to be + * configured only after line card is activated. Before that, access to + * modules on line cards is not allowed. + */ + err = mlxsw_env_module_event_enable(env, 0); if (err) - goto err_temp_event_enable; + goto err_mlxsw_env_module_event_enable; - err = mlxsw_env_module_type_set(mlxsw_core); + err = mlxsw_env_module_type_set(mlxsw_core, 0); if (err) goto err_type_set; + env->line_cards[0]->active = true; + return 0; err_type_set: -err_temp_event_enable: -err_oper_state_event_enable: + mlxsw_env_module_event_disable(env, 0); +err_mlxsw_env_module_event_enable: mlxsw_env_module_plug_event_unregister(env); err_module_plug_event_register: mlxsw_env_temp_warn_event_unregister(env); err_temp_warn_event_register: - mutex_destroy(&env->module_info_lock); + mlxsw_linecards_event_ops_unregister(env->core, + &mlxsw_env_event_ops, env); +err_linecards_event_ops_register: + mutex_destroy(&env->line_cards_lock); + mlxsw_env_line_cards_free(env); +err_mlxsw_env_line_cards_alloc: kfree(env); return err; } void mlxsw_env_fini(struct mlxsw_env *env) { + env->line_cards[0]->active = false; + mlxsw_env_module_event_disable(env, 0); mlxsw_env_module_plug_event_unregister(env); /* Make sure there is no more event work scheduled. */ mlxsw_core_flush_owq(); mlxsw_env_temp_warn_event_unregister(env); - mutex_destroy(&env->module_info_lock); + mlxsw_linecards_event_ops_unregister(env->core, + &mlxsw_env_event_ops, env); + mutex_destroy(&env->line_cards_lock); + mlxsw_env_line_cards_free(env); kfree(env); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h index ec6564e5d2ee..a197e3ae069c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h @@ -9,49 +9,60 @@ struct ethtool_modinfo; struct ethtool_eeprom; -int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module, - int off, int *temp); +int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, + u8 slot_index, int module, int off, + int *temp); int mlxsw_env_get_module_info(struct net_device *netdev, - struct mlxsw_core *mlxsw_core, int module, - struct ethtool_modinfo *modinfo); + struct mlxsw_core *mlxsw_core, u8 slot_index, + int module, struct ethtool_modinfo *modinfo); int mlxsw_env_get_module_eeprom(struct net_device *netdev, - struct mlxsw_core *mlxsw_core, int module, - struct ethtool_eeprom *ee, u8 *data); + struct mlxsw_core *mlxsw_core, u8 slot_index, + int module, struct ethtool_eeprom *ee, + u8 *data); int -mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module, +mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, + u8 slot_index, u8 module, const struct ethtool_module_eeprom *page, struct netlink_ext_ack *extack); int mlxsw_env_reset_module(struct net_device *netdev, - struct mlxsw_core *mlxsw_core, u8 module, - u32 *flags); + struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module, u32 *flags); int -mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module, +mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module, struct ethtool_module_power_mode_params *params, struct netlink_ext_ack *extack); int -mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module, +mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module, enum ethtool_module_power_mode_policy policy, struct netlink_ext_ack *extack); int -mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module, - u64 *p_counter); +mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module, u64 *p_counter); -void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 module); +void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module); -void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 module); +void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module); -int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module); +int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module); -void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module); +void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module); -int mlxsw_env_init(struct mlxsw_core *core, struct mlxsw_env **p_env); +int mlxsw_env_init(struct mlxsw_core *core, + const struct mlxsw_bus_info *bus_info, + struct mlxsw_env **p_env); void mlxsw_env_fini(struct mlxsw_env *env); #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c index 8b170ad92302..70735068cf29 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c @@ -19,6 +19,7 @@ #define MLXSW_HWMON_ATTR_PER_SENSOR 3 #define MLXSW_HWMON_ATTR_PER_MODULE 7 #define MLXSW_HWMON_ATTR_PER_GEARBOX 4 +#define MLXSW_HWMON_DEV_NAME_LEN_MAX 16 #define MLXSW_HWMON_ATTR_COUNT (MLXSW_HWMON_SENSORS_MAX_COUNT * MLXSW_HWMON_ATTR_PER_SENSOR + \ MLXSW_HWMON_MODULES_MAX_COUNT * MLXSW_HWMON_ATTR_PER_MODULE + \ @@ -27,7 +28,7 @@ struct mlxsw_hwmon_attr { struct device_attribute dev_attr; - struct mlxsw_hwmon *hwmon; + struct mlxsw_hwmon_dev *mlxsw_hwmon_dev; unsigned int type_index; char name[32]; }; @@ -40,9 +41,9 @@ static int mlxsw_hwmon_get_attr_index(int index, int count) return index; } -struct mlxsw_hwmon { - struct mlxsw_core *core; - const struct mlxsw_bus_info *bus_info; +struct mlxsw_hwmon_dev { + char name[MLXSW_HWMON_DEV_NAME_LEN_MAX]; + struct mlxsw_hwmon *hwmon; struct device *hwmon_dev; struct attribute_group group; const struct attribute_group *groups[2]; @@ -51,6 +52,14 @@ struct mlxsw_hwmon { unsigned int attrs_count; u8 sensor_count; u8 module_sensor_max; + u8 slot_index; + bool active; +}; + +struct mlxsw_hwmon { + struct mlxsw_core *core; + const struct mlxsw_bus_info *bus_info; + struct mlxsw_hwmon_dev line_cards[]; }; static ssize_t mlxsw_hwmon_temp_show(struct device *dev, @@ -59,14 +68,16 @@ static ssize_t mlxsw_hwmon_temp_show(struct device *dev, { struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; + struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon; char mtmp_pl[MLXSW_REG_MTMP_LEN]; int temp, index; int err; index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index, - mlxsw_hwmon->module_sensor_max); - mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false); + mlxsw_hwmon_dev->module_sensor_max); + mlxsw_reg_mtmp_pack(mtmp_pl, mlxsw_hwmon_dev->slot_index, index, false, + false); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); if (err) { dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query temp sensor\n"); @@ -82,14 +93,16 @@ static ssize_t mlxsw_hwmon_temp_max_show(struct device *dev, { struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; + struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon; char mtmp_pl[MLXSW_REG_MTMP_LEN]; int temp_max, index; int err; index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index, - mlxsw_hwmon->module_sensor_max); - mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false); + mlxsw_hwmon_dev->module_sensor_max); + mlxsw_reg_mtmp_pack(mtmp_pl, mlxsw_hwmon_dev->slot_index, index, false, + false); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); if (err) { dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query temp sensor\n"); @@ -105,8 +118,9 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev, { struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; - char mtmp_pl[MLXSW_REG_MTMP_LEN] = {0}; + struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon; + char mtmp_pl[MLXSW_REG_MTMP_LEN]; unsigned long val; int index; int err; @@ -118,8 +132,9 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev, return -EINVAL; index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index, - mlxsw_hwmon->module_sensor_max); + mlxsw_hwmon_dev->module_sensor_max); + mlxsw_reg_mtmp_slot_index_set(mtmp_pl, mlxsw_hwmon_dev->slot_index); mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, index); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); if (err) @@ -140,7 +155,8 @@ static ssize_t mlxsw_hwmon_fan_rpm_show(struct device *dev, { struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; + struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon; char mfsm_pl[MLXSW_REG_MFSM_LEN]; int err; @@ -159,7 +175,8 @@ static ssize_t mlxsw_hwmon_fan_fault_show(struct device *dev, { struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; + struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon; char fore_pl[MLXSW_REG_FORE_LEN]; bool fault; int err; @@ -180,7 +197,8 @@ static ssize_t mlxsw_hwmon_pwm_show(struct device *dev, { struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; + struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon; char mfsc_pl[MLXSW_REG_MFSC_LEN]; int err; @@ -200,7 +218,8 @@ static ssize_t mlxsw_hwmon_pwm_store(struct device *dev, { struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; + struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon; char mfsc_pl[MLXSW_REG_MFSC_LEN]; unsigned long val; int err; @@ -226,14 +245,16 @@ static int mlxsw_hwmon_module_temp_get(struct device *dev, { struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; + struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon; char mtmp_pl[MLXSW_REG_MTMP_LEN]; u8 module; int err; - module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; - mlxsw_reg_mtmp_pack(mtmp_pl, MLXSW_REG_MTMP_MODULE_INDEX_MIN + module, - false, false); + module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count; + mlxsw_reg_mtmp_pack(mtmp_pl, mlxsw_hwmon_dev->slot_index, + MLXSW_REG_MTMP_MODULE_INDEX_MIN + module, false, + false); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); if (err) { dev_err(dev, "Failed to query module temperature\n"); @@ -263,15 +284,16 @@ static ssize_t mlxsw_hwmon_module_temp_fault_show(struct device *dev, { struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; + struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon; char mtbr_pl[MLXSW_REG_MTBR_LEN] = {0}; u8 module, fault; u16 temp; int err; - module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; - mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, - 1); + module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count; + mlxsw_reg_mtbr_pack(mtbr_pl, mlxsw_hwmon_dev->slot_index, + MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, 1); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtbr), mtbr_pl); if (err) { dev_err(dev, "Failed to query module temperature sensor\n"); @@ -305,13 +327,16 @@ static int mlxsw_hwmon_module_temp_critical_get(struct device *dev, { struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; + struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon; u8 module; int err; - module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; - err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module, - SFP_TEMP_HIGH_WARN, p_temp); + module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count; + err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, + mlxsw_hwmon_dev->slot_index, + module, SFP_TEMP_HIGH_WARN, + p_temp); if (err) { dev_err(dev, "Failed to query module temperature thresholds\n"); return err; @@ -339,13 +364,16 @@ static int mlxsw_hwmon_module_temp_emergency_get(struct device *dev, { struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; + struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon; u8 module; int err; - module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; - err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module, - SFP_TEMP_HIGH_ALARM, p_temp); + module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count; + err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, + mlxsw_hwmon_dev->slot_index, + module, SFP_TEMP_HIGH_ALARM, + p_temp); if (err) { dev_err(dev, "Failed to query module temperature thresholds\n"); return err; @@ -387,9 +415,9 @@ mlxsw_hwmon_gbox_temp_label_show(struct device *dev, { struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; + struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev; int index = mlxsw_hwmon_attr->type_index - - mlxsw_hwmon->module_sensor_max + 1; + mlxsw_hwmon_dev->module_sensor_max + 1; return sprintf(buf, "gearbox %03u\n", index); } @@ -458,14 +486,15 @@ enum mlxsw_hwmon_attr_type { MLXSW_HWMON_ATTR_TYPE_TEMP_EMERGENCY_ALARM, }; -static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon, +static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev, enum mlxsw_hwmon_attr_type attr_type, - unsigned int type_index, unsigned int num) { + unsigned int type_index, unsigned int num) +{ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr; unsigned int attr_index; - attr_index = mlxsw_hwmon->attrs_count; - mlxsw_hwmon_attr = &mlxsw_hwmon->hwmon_attrs[attr_index]; + attr_index = mlxsw_hwmon_dev->attrs_count; + mlxsw_hwmon_attr = &mlxsw_hwmon_dev->hwmon_attrs[attr_index]; switch (attr_type) { case MLXSW_HWMON_ATTR_TYPE_TEMP: @@ -565,16 +594,17 @@ static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon, } mlxsw_hwmon_attr->type_index = type_index; - mlxsw_hwmon_attr->hwmon = mlxsw_hwmon; + mlxsw_hwmon_attr->mlxsw_hwmon_dev = mlxsw_hwmon_dev; mlxsw_hwmon_attr->dev_attr.attr.name = mlxsw_hwmon_attr->name; sysfs_attr_init(&mlxsw_hwmon_attr->dev_attr.attr); - mlxsw_hwmon->attrs[attr_index] = &mlxsw_hwmon_attr->dev_attr.attr; - mlxsw_hwmon->attrs_count++; + mlxsw_hwmon_dev->attrs[attr_index] = &mlxsw_hwmon_attr->dev_attr.attr; + mlxsw_hwmon_dev->attrs_count++; } -static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon) +static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev) { + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon; char mtcap_pl[MLXSW_REG_MTCAP_LEN] = {0}; int i; int err; @@ -584,10 +614,12 @@ static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon) dev_err(mlxsw_hwmon->bus_info->dev, "Failed to get number of temp sensors\n"); return err; } - mlxsw_hwmon->sensor_count = mlxsw_reg_mtcap_sensor_count_get(mtcap_pl); - for (i = 0; i < mlxsw_hwmon->sensor_count; i++) { + mlxsw_hwmon_dev->sensor_count = mlxsw_reg_mtcap_sensor_count_get(mtcap_pl); + for (i = 0; i < mlxsw_hwmon_dev->sensor_count; i++) { char mtmp_pl[MLXSW_REG_MTMP_LEN] = {0}; + mlxsw_reg_mtmp_slot_index_set(mtmp_pl, + mlxsw_hwmon_dev->slot_index); mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, i); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); @@ -602,18 +634,19 @@ static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon) i); return err; } - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_TEMP, i, i); - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_TEMP_MAX, i, i); - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_TEMP_RST, i, i); } return 0; } -static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon) +static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev) { + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon; char mfcr_pl[MLXSW_REG_MFCR_LEN] = {0}; enum mlxsw_reg_mfcr_pwm_frequency freq; unsigned int type_index; @@ -631,10 +664,10 @@ static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon) num = 0; for (type_index = 0; type_index < MLXSW_MFCR_TACHOS_MAX; type_index++) { if (tacho_active & BIT(type_index)) { - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_FAN_RPM, type_index, num); - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_FAN_FAULT, type_index, num++); } @@ -642,54 +675,55 @@ static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon) num = 0; for (type_index = 0; type_index < MLXSW_MFCR_PWMS_MAX; type_index++) { if (pwm_active & BIT(type_index)) - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_PWM, type_index, num++); } return 0; } -static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon) +static int mlxsw_hwmon_module_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev) { + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon; char mgpir_pl[MLXSW_REG_MGPIR_LEN]; u8 module_sensor_max; int i, err; - mlxsw_reg_mgpir_pack(mgpir_pl); + mlxsw_reg_mgpir_pack(mgpir_pl, mlxsw_hwmon_dev->slot_index); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mgpir), mgpir_pl); if (err) return err; mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, - &module_sensor_max); + &module_sensor_max, NULL); /* Add extra attributes for module temperature. Sensor index is * assigned to sensor_count value, while all indexed before * sensor_count are already utilized by the sensors connected through * mtmp register by mlxsw_hwmon_temp_init(). */ - mlxsw_hwmon->module_sensor_max = mlxsw_hwmon->sensor_count + - module_sensor_max; - for (i = mlxsw_hwmon->sensor_count; - i < mlxsw_hwmon->module_sensor_max; i++) { - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_dev->module_sensor_max = mlxsw_hwmon_dev->sensor_count + + module_sensor_max; + for (i = mlxsw_hwmon_dev->sensor_count; + i < mlxsw_hwmon_dev->module_sensor_max; i++) { + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE, i, i); - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_FAULT, i, i); - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_CRIT, i, i); - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_EMERG, i, i); - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL, i, i); - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_TEMP_CRIT_ALARM, i, i); - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_TEMP_EMERGENCY_ALARM, i, i); } @@ -697,8 +731,9 @@ static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon) return 0; } -static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon) +static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev) { + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon; enum mlxsw_reg_mgpir_device_type device_type; int index, max_index, sensor_index; char mgpir_pl[MLXSW_REG_MGPIR_LEN]; @@ -706,22 +741,24 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon) u8 gbox_num; int err; - mlxsw_reg_mgpir_pack(mgpir_pl); + mlxsw_reg_mgpir_pack(mgpir_pl, mlxsw_hwmon_dev->slot_index); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mgpir), mgpir_pl); if (err) return err; - mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL, NULL); + mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL, NULL, + NULL); if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE || !gbox_num) return 0; - index = mlxsw_hwmon->module_sensor_max; - max_index = mlxsw_hwmon->module_sensor_max + gbox_num; + index = mlxsw_hwmon_dev->module_sensor_max; + max_index = mlxsw_hwmon_dev->module_sensor_max + gbox_num; while (index < max_index) { - sensor_index = index % mlxsw_hwmon->module_sensor_max + + sensor_index = index % mlxsw_hwmon_dev->module_sensor_max + MLXSW_REG_MTMP_GBOX_INDEX_MIN; - mlxsw_reg_mtmp_pack(mtmp_pl, sensor_index, true, true); + mlxsw_reg_mtmp_pack(mtmp_pl, mlxsw_hwmon_dev->slot_index, + sensor_index, true, true); err = mlxsw_reg_write(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); if (err) { @@ -729,15 +766,15 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon) sensor_index); return err; } - mlxsw_hwmon_attr_add(mlxsw_hwmon, MLXSW_HWMON_ATTR_TYPE_TEMP, - index, index); - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, + MLXSW_HWMON_ATTR_TYPE_TEMP, index, index); + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_TEMP_MAX, index, index); - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_TEMP_RST, index, index); - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_TEMP_GBOX_LABEL, index, index); index++; @@ -746,51 +783,144 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon) return 0; } +static void +mlxsw_hwmon_got_active(struct mlxsw_core *mlxsw_core, u8 slot_index, + void *priv) +{ + struct mlxsw_hwmon *hwmon = priv; + struct mlxsw_hwmon_dev *linecard; + struct device *dev; + int err; + + dev = hwmon->bus_info->dev; + linecard = &hwmon->line_cards[slot_index]; + if (linecard->active) + return; + /* For the main board, module sensor indexes start from 1, sensor index + * 0 is used for the ASIC. Use the same numbering for line cards. + */ + linecard->sensor_count = 1; + linecard->slot_index = slot_index; + linecard->hwmon = hwmon; + err = mlxsw_hwmon_module_init(linecard); + if (err) { + dev_err(dev, "Failed to configure hwmon objects for line card modules in slot %d\n", + slot_index); + return; + } + + err = mlxsw_hwmon_gearbox_init(linecard); + if (err) { + dev_err(dev, "Failed to configure hwmon objects for line card gearboxes in slot %d\n", + slot_index); + return; + } + + linecard->groups[0] = &linecard->group; + linecard->group.attrs = linecard->attrs; + sprintf(linecard->name, "%s#%02u", "linecard", slot_index); + linecard->hwmon_dev = + hwmon_device_register_with_groups(dev, linecard->name, + linecard, linecard->groups); + if (IS_ERR(linecard->hwmon_dev)) { + dev_err(dev, "Failed to register hwmon objects for line card in slot %d\n", + slot_index); + return; + } + + linecard->active = true; +} + +static void +mlxsw_hwmon_got_inactive(struct mlxsw_core *mlxsw_core, u8 slot_index, + void *priv) +{ + struct mlxsw_hwmon *hwmon = priv; + struct mlxsw_hwmon_dev *linecard; + + linecard = &hwmon->line_cards[slot_index]; + if (!linecard->active) + return; + linecard->active = false; + hwmon_device_unregister(linecard->hwmon_dev); + /* Reset attributes counter */ + linecard->attrs_count = 0; +} + +static struct mlxsw_linecards_event_ops mlxsw_hwmon_event_ops = { + .got_active = mlxsw_hwmon_got_active, + .got_inactive = mlxsw_hwmon_got_inactive, +}; + int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core, const struct mlxsw_bus_info *mlxsw_bus_info, struct mlxsw_hwmon **p_hwmon) { + char mgpir_pl[MLXSW_REG_MGPIR_LEN]; struct mlxsw_hwmon *mlxsw_hwmon; struct device *hwmon_dev; + u8 num_of_slots; int err; - mlxsw_hwmon = kzalloc(sizeof(*mlxsw_hwmon), GFP_KERNEL); + mlxsw_reg_mgpir_pack(mgpir_pl, 0); + err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgpir), mgpir_pl); + if (err) + return err; + + mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, NULL, + &num_of_slots); + + mlxsw_hwmon = kzalloc(struct_size(mlxsw_hwmon, line_cards, + num_of_slots + 1), GFP_KERNEL); if (!mlxsw_hwmon) return -ENOMEM; + mlxsw_hwmon->core = mlxsw_core; mlxsw_hwmon->bus_info = mlxsw_bus_info; + mlxsw_hwmon->line_cards[0].hwmon = mlxsw_hwmon; + mlxsw_hwmon->line_cards[0].slot_index = 0; - err = mlxsw_hwmon_temp_init(mlxsw_hwmon); + err = mlxsw_hwmon_temp_init(&mlxsw_hwmon->line_cards[0]); if (err) goto err_temp_init; - err = mlxsw_hwmon_fans_init(mlxsw_hwmon); + err = mlxsw_hwmon_fans_init(&mlxsw_hwmon->line_cards[0]); if (err) goto err_fans_init; - err = mlxsw_hwmon_module_init(mlxsw_hwmon); + err = mlxsw_hwmon_module_init(&mlxsw_hwmon->line_cards[0]); if (err) goto err_temp_module_init; - err = mlxsw_hwmon_gearbox_init(mlxsw_hwmon); + err = mlxsw_hwmon_gearbox_init(&mlxsw_hwmon->line_cards[0]); if (err) goto err_temp_gearbox_init; - mlxsw_hwmon->groups[0] = &mlxsw_hwmon->group; - mlxsw_hwmon->group.attrs = mlxsw_hwmon->attrs; + mlxsw_hwmon->line_cards[0].groups[0] = &mlxsw_hwmon->line_cards[0].group; + mlxsw_hwmon->line_cards[0].group.attrs = mlxsw_hwmon->line_cards[0].attrs; hwmon_dev = hwmon_device_register_with_groups(mlxsw_bus_info->dev, - "mlxsw", mlxsw_hwmon, - mlxsw_hwmon->groups); + "mlxsw", + &mlxsw_hwmon->line_cards[0], + mlxsw_hwmon->line_cards[0].groups); if (IS_ERR(hwmon_dev)) { err = PTR_ERR(hwmon_dev); goto err_hwmon_register; } - mlxsw_hwmon->hwmon_dev = hwmon_dev; + err = mlxsw_linecards_event_ops_register(mlxsw_hwmon->core, + &mlxsw_hwmon_event_ops, + mlxsw_hwmon); + if (err) + goto err_linecards_event_ops_register; + + mlxsw_hwmon->line_cards[0].hwmon_dev = hwmon_dev; + mlxsw_hwmon->line_cards[0].active = true; *p_hwmon = mlxsw_hwmon; return 0; +err_linecards_event_ops_register: + hwmon_device_unregister(mlxsw_hwmon->line_cards[0].hwmon_dev); err_hwmon_register: err_temp_gearbox_init: err_temp_module_init: @@ -802,6 +932,9 @@ err_temp_init: void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon) { - hwmon_device_unregister(mlxsw_hwmon->hwmon_dev); + mlxsw_hwmon->line_cards[0].active = false; + mlxsw_linecards_event_ops_unregister(mlxsw_hwmon->core, + &mlxsw_hwmon_event_ops, mlxsw_hwmon); + hwmon_device_unregister(mlxsw_hwmon->line_cards[0].hwmon_dev); kfree(mlxsw_hwmon); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c new file mode 100644 index 000000000000..2abd31a62776 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c @@ -0,0 +1,1373 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2022 NVIDIA Corporation and Mellanox Technologies. All rights reserved */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/err.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/workqueue.h> +#include <linux/gfp.h> +#include <linux/slab.h> +#include <linux/list.h> +#include <linux/vmalloc.h> + +#include "core.h" + +struct mlxsw_linecard_ini_file { + __le16 size; + union { + u8 data[0]; + struct { + __be16 hw_revision; + __be16 ini_version; + u8 __dontcare[3]; + u8 type; + u8 name[20]; + } format; + }; +}; + +struct mlxsw_linecard_types_info { + struct mlxsw_linecard_ini_file **ini_files; + unsigned int count; + size_t data_size; + char *data; +}; + +#define MLXSW_LINECARD_STATUS_EVENT_TO (10 * MSEC_PER_SEC) + +static void +mlxsw_linecard_status_event_to_schedule(struct mlxsw_linecard *linecard, + enum mlxsw_linecard_status_event_type status_event_type) +{ + cancel_delayed_work_sync(&linecard->status_event_to_dw); + linecard->status_event_type_to = status_event_type; + mlxsw_core_schedule_dw(&linecard->status_event_to_dw, + msecs_to_jiffies(MLXSW_LINECARD_STATUS_EVENT_TO)); +} + +static void +mlxsw_linecard_status_event_done(struct mlxsw_linecard *linecard, + enum mlxsw_linecard_status_event_type status_event_type) +{ + if (linecard->status_event_type_to == status_event_type) + cancel_delayed_work_sync(&linecard->status_event_to_dw); +} + +static const char * +mlxsw_linecard_types_lookup(struct mlxsw_linecards *linecards, u8 card_type) +{ + struct mlxsw_linecard_types_info *types_info; + struct mlxsw_linecard_ini_file *ini_file; + int i; + + types_info = linecards->types_info; + if (!types_info) + return NULL; + for (i = 0; i < types_info->count; i++) { + ini_file = linecards->types_info->ini_files[i]; + if (ini_file->format.type == card_type) + return ini_file->format.name; + } + return NULL; +} + +static const char *mlxsw_linecard_type_name(struct mlxsw_linecard *linecard) +{ + struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core; + char mddq_pl[MLXSW_REG_MDDQ_LEN]; + int err; + + mlxsw_reg_mddq_slot_name_pack(mddq_pl, linecard->slot_index); + err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl); + if (err) + return ERR_PTR(err); + mlxsw_reg_mddq_slot_name_unpack(mddq_pl, linecard->name); + return linecard->name; +} + +struct mlxsw_linecard_device_info { + u16 fw_major; + u16 fw_minor; + u16 fw_sub_minor; +}; + +struct mlxsw_linecard_device { + struct list_head list; + u8 index; + struct mlxsw_linecard *linecard; + struct devlink_linecard_device *devlink_device; + struct mlxsw_linecard_device_info info; +}; + +static struct mlxsw_linecard_device * +mlxsw_linecard_device_lookup(struct mlxsw_linecard *linecard, u8 index) +{ + struct mlxsw_linecard_device *device; + + list_for_each_entry(device, &linecard->device_list, list) + if (device->index == index) + return device; + return NULL; +} + +static int mlxsw_linecard_device_attach(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecard *linecard, + u8 device_index, bool flash_owner) +{ + struct mlxsw_linecard_device *device; + int err; + + device = kzalloc(sizeof(*device), GFP_KERNEL); + if (!device) + return -ENOMEM; + device->index = device_index; + device->linecard = linecard; + + device->devlink_device = devlink_linecard_device_create(linecard->devlink_linecard, + device_index, device); + if (IS_ERR(device->devlink_device)) { + err = PTR_ERR(device->devlink_device); + goto err_devlink_linecard_device_attach; + } + + list_add_tail(&device->list, &linecard->device_list); + return 0; + +err_devlink_linecard_device_attach: + kfree(device); + return err; +} + +static void mlxsw_linecard_device_detach(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecard *linecard, + struct mlxsw_linecard_device *device) +{ + list_del(&device->list); + devlink_linecard_device_destroy(linecard->devlink_linecard, + device->devlink_device); + kfree(device); +} + +static void mlxsw_linecard_devices_detach(struct mlxsw_linecard *linecard) +{ + struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core; + struct mlxsw_linecard_device *device, *tmp; + + list_for_each_entry_safe(device, tmp, &linecard->device_list, list) + mlxsw_linecard_device_detach(mlxsw_core, linecard, device); +} + +static int mlxsw_linecard_devices_attach(struct mlxsw_linecard *linecard) +{ + struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core; + u8 msg_seq = 0; + int err; + + do { + char mddq_pl[MLXSW_REG_MDDQ_LEN]; + bool flash_owner; + bool data_valid; + u8 device_index; + + mlxsw_reg_mddq_device_info_pack(mddq_pl, linecard->slot_index, + msg_seq); + err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl); + if (err) + return err; + mlxsw_reg_mddq_device_info_unpack(mddq_pl, &msg_seq, + &data_valid, &flash_owner, + &device_index, NULL, + NULL, NULL); + if (!data_valid) + break; + err = mlxsw_linecard_device_attach(mlxsw_core, linecard, + device_index, flash_owner); + if (err) + goto rollback; + } while (msg_seq); + + return 0; + +rollback: + mlxsw_linecard_devices_detach(linecard); + return err; +} + +static void mlxsw_linecard_device_update(struct mlxsw_linecard *linecard, + u8 device_index, + struct mlxsw_linecard_device_info *info) +{ + struct mlxsw_linecard_device *device; + + device = mlxsw_linecard_device_lookup(linecard, device_index); + if (!device) + return; + device->info = *info; +} + +static int mlxsw_linecard_devices_update(struct mlxsw_linecard *linecard) +{ + struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core; + u8 msg_seq = 0; + + do { + struct mlxsw_linecard_device_info info; + char mddq_pl[MLXSW_REG_MDDQ_LEN]; + bool data_valid; + u8 device_index; + int err; + + mlxsw_reg_mddq_device_info_pack(mddq_pl, linecard->slot_index, + msg_seq); + err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl); + if (err) + return err; + mlxsw_reg_mddq_device_info_unpack(mddq_pl, &msg_seq, + &data_valid, NULL, + &device_index, + &info.fw_major, + &info.fw_minor, + &info.fw_sub_minor); + if (!data_valid) + break; + mlxsw_linecard_device_update(linecard, device_index, &info); + } while (msg_seq); + + return 0; +} + +static int +mlxsw_linecard_device_info_get(struct devlink_linecard_device *devlink_linecard_device, + void *priv, struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct mlxsw_linecard_device *device = priv; + struct mlxsw_linecard_device_info *info; + struct mlxsw_linecard *linecard; + char buf[32]; + + linecard = device->linecard; + mutex_lock(&linecard->lock); + if (!linecard->active) { + mutex_unlock(&linecard->lock); + return 0; + } + + info = &device->info; + + sprintf(buf, "%u.%u.%u", info->fw_major, info->fw_minor, + info->fw_sub_minor); + mutex_unlock(&linecard->lock); + + return devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW, + buf); +} + +static void mlxsw_linecard_provision_fail(struct mlxsw_linecard *linecard) +{ + linecard->provisioned = false; + linecard->ready = false; + linecard->active = false; + mlxsw_linecard_devices_detach(linecard); + devlink_linecard_provision_fail(linecard->devlink_linecard); +} + +struct mlxsw_linecards_event_ops_item { + struct list_head list; + const struct mlxsw_linecards_event_ops *event_ops; + void *priv; +}; + +static void +mlxsw_linecard_event_op_call(struct mlxsw_linecard *linecard, + mlxsw_linecards_event_op_t *op, void *priv) +{ + struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core; + + if (!op) + return; + op(mlxsw_core, linecard->slot_index, priv); +} + +static void +mlxsw_linecard_active_ops_call(struct mlxsw_linecard *linecard) +{ + struct mlxsw_linecards *linecards = linecard->linecards; + struct mlxsw_linecards_event_ops_item *item; + + mutex_lock(&linecards->event_ops_list_lock); + list_for_each_entry(item, &linecards->event_ops_list, list) + mlxsw_linecard_event_op_call(linecard, + item->event_ops->got_active, + item->priv); + mutex_unlock(&linecards->event_ops_list_lock); +} + +static void +mlxsw_linecard_inactive_ops_call(struct mlxsw_linecard *linecard) +{ + struct mlxsw_linecards *linecards = linecard->linecards; + struct mlxsw_linecards_event_ops_item *item; + + mutex_lock(&linecards->event_ops_list_lock); + list_for_each_entry(item, &linecards->event_ops_list, list) + mlxsw_linecard_event_op_call(linecard, + item->event_ops->got_inactive, + item->priv); + mutex_unlock(&linecards->event_ops_list_lock); +} + +static void +mlxsw_linecards_event_ops_register_call(struct mlxsw_linecards *linecards, + const struct mlxsw_linecards_event_ops_item *item) +{ + struct mlxsw_linecard *linecard; + int i; + + for (i = 0; i < linecards->count; i++) { + linecard = mlxsw_linecard_get(linecards, i + 1); + mutex_lock(&linecard->lock); + if (linecard->active) + mlxsw_linecard_event_op_call(linecard, + item->event_ops->got_active, + item->priv); + mutex_unlock(&linecard->lock); + } +} + +static void +mlxsw_linecards_event_ops_unregister_call(struct mlxsw_linecards *linecards, + const struct mlxsw_linecards_event_ops_item *item) +{ + struct mlxsw_linecard *linecard; + int i; + + for (i = 0; i < linecards->count; i++) { + linecard = mlxsw_linecard_get(linecards, i + 1); + mutex_lock(&linecard->lock); + if (linecard->active) + mlxsw_linecard_event_op_call(linecard, + item->event_ops->got_inactive, + item->priv); + mutex_unlock(&linecard->lock); + } +} + +int mlxsw_linecards_event_ops_register(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecards_event_ops *ops, + void *priv) +{ + struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core); + struct mlxsw_linecards_event_ops_item *item; + + if (!linecards) + return 0; + item = kzalloc(sizeof(*item), GFP_KERNEL); + if (!item) + return -ENOMEM; + item->event_ops = ops; + item->priv = priv; + + mutex_lock(&linecards->event_ops_list_lock); + list_add_tail(&item->list, &linecards->event_ops_list); + mutex_unlock(&linecards->event_ops_list_lock); + mlxsw_linecards_event_ops_register_call(linecards, item); + return 0; +} +EXPORT_SYMBOL(mlxsw_linecards_event_ops_register); + +void mlxsw_linecards_event_ops_unregister(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecards_event_ops *ops, + void *priv) +{ + struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core); + struct mlxsw_linecards_event_ops_item *item, *tmp; + bool found = false; + + if (!linecards) + return; + mutex_lock(&linecards->event_ops_list_lock); + list_for_each_entry_safe(item, tmp, &linecards->event_ops_list, list) { + if (item->event_ops == ops && item->priv == priv) { + list_del(&item->list); + found = true; + break; + } + } + mutex_unlock(&linecards->event_ops_list_lock); + + if (!found) + return; + mlxsw_linecards_event_ops_unregister_call(linecards, item); + kfree(item); +} +EXPORT_SYMBOL(mlxsw_linecards_event_ops_unregister); + +static int +mlxsw_linecard_provision_set(struct mlxsw_linecard *linecard, u8 card_type, + u16 hw_revision, u16 ini_version) +{ + struct mlxsw_linecards *linecards = linecard->linecards; + const char *type; + int err; + + type = mlxsw_linecard_types_lookup(linecards, card_type); + mlxsw_linecard_status_event_done(linecard, + MLXSW_LINECARD_STATUS_EVENT_TYPE_PROVISION); + if (!type) { + /* It is possible for a line card to be provisioned before + * driver initialization. Due to a missing INI bundle file + * or an outdated one, the queried card's type might not + * be recognized by the driver. In this case, try to query + * the card's name from the device. + */ + type = mlxsw_linecard_type_name(linecard); + if (IS_ERR(type)) { + mlxsw_linecard_provision_fail(linecard); + return PTR_ERR(type); + } + } + err = mlxsw_linecard_devices_attach(linecard); + if (err) { + mlxsw_linecard_provision_fail(linecard); + return err; + } + linecard->provisioned = true; + linecard->hw_revision = hw_revision; + linecard->ini_version = ini_version; + devlink_linecard_provision_set(linecard->devlink_linecard, type); + return 0; +} + +static void mlxsw_linecard_provision_clear(struct mlxsw_linecard *linecard) +{ + mlxsw_linecard_status_event_done(linecard, + MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION); + linecard->provisioned = false; + mlxsw_linecard_devices_detach(linecard); + devlink_linecard_provision_clear(linecard->devlink_linecard); +} + +static int mlxsw_linecard_ready_set(struct mlxsw_linecard *linecard) +{ + struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core; + char mddc_pl[MLXSW_REG_MDDC_LEN]; + int err; + + mlxsw_reg_mddc_pack(mddc_pl, linecard->slot_index, false, true); + err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddc), mddc_pl); + if (err) + return err; + linecard->ready = true; + return 0; +} + +static int mlxsw_linecard_ready_clear(struct mlxsw_linecard *linecard) +{ + struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core; + char mddc_pl[MLXSW_REG_MDDC_LEN]; + int err; + + mlxsw_reg_mddc_pack(mddc_pl, linecard->slot_index, false, false); + err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddc), mddc_pl); + if (err) + return err; + linecard->ready = false; + return 0; +} + +static int mlxsw_linecard_active_set(struct mlxsw_linecard *linecard) +{ + int err; + + err = mlxsw_linecard_devices_update(linecard); + if (err) + return err; + + mlxsw_linecard_active_ops_call(linecard); + linecard->active = true; + devlink_linecard_activate(linecard->devlink_linecard); + return 0; +} + +static void mlxsw_linecard_active_clear(struct mlxsw_linecard *linecard) +{ + mlxsw_linecard_inactive_ops_call(linecard); + linecard->active = false; + devlink_linecard_deactivate(linecard->devlink_linecard); +} + +static int mlxsw_linecard_status_process(struct mlxsw_linecards *linecards, + struct mlxsw_linecard *linecard, + const char *mddq_pl) +{ + enum mlxsw_reg_mddq_slot_info_ready ready; + bool provisioned, sr_valid, active; + u16 ini_version, hw_revision; + u8 slot_index, card_type; + int err = 0; + + mlxsw_reg_mddq_slot_info_unpack(mddq_pl, &slot_index, &provisioned, + &sr_valid, &ready, &active, + &hw_revision, &ini_version, + &card_type); + + if (linecard) { + if (WARN_ON(slot_index != linecard->slot_index)) + return -EINVAL; + } else { + if (WARN_ON(slot_index > linecards->count)) + return -EINVAL; + linecard = mlxsw_linecard_get(linecards, slot_index); + } + + mutex_lock(&linecard->lock); + + if (provisioned && linecard->provisioned != provisioned) { + err = mlxsw_linecard_provision_set(linecard, card_type, + hw_revision, ini_version); + if (err) + goto out; + } + + if (ready == MLXSW_REG_MDDQ_SLOT_INFO_READY_READY && !linecard->ready) { + err = mlxsw_linecard_ready_set(linecard); + if (err) + goto out; + } + + if (active && linecard->active != active) { + err = mlxsw_linecard_active_set(linecard); + if (err) + goto out; + } + + if (!active && linecard->active != active) + mlxsw_linecard_active_clear(linecard); + + if (ready != MLXSW_REG_MDDQ_SLOT_INFO_READY_READY && + linecard->ready) { + err = mlxsw_linecard_ready_clear(linecard); + if (err) + goto out; + } + + if (!provisioned && linecard->provisioned != provisioned) + mlxsw_linecard_provision_clear(linecard); + +out: + mutex_unlock(&linecard->lock); + return err; +} + +static int mlxsw_linecard_status_get_and_process(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecards *linecards, + struct mlxsw_linecard *linecard) +{ + char mddq_pl[MLXSW_REG_MDDQ_LEN]; + int err; + + mlxsw_reg_mddq_slot_info_pack(mddq_pl, linecard->slot_index, false); + err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl); + if (err) + return err; + + return mlxsw_linecard_status_process(linecards, linecard, mddq_pl); +} + +static const char * const mlxsw_linecard_status_event_type_name[] = { + [MLXSW_LINECARD_STATUS_EVENT_TYPE_PROVISION] = "provision", + [MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION] = "unprovision", +}; + +static void mlxsw_linecard_status_event_to_work(struct work_struct *work) +{ + struct mlxsw_linecard *linecard = + container_of(work, struct mlxsw_linecard, + status_event_to_dw.work); + + mutex_lock(&linecard->lock); + dev_err(linecard->linecards->bus_info->dev, "linecard %u: Timeout reached waiting on %s status event", + linecard->slot_index, + mlxsw_linecard_status_event_type_name[linecard->status_event_type_to]); + mlxsw_linecard_provision_fail(linecard); + mutex_unlock(&linecard->lock); +} + +static int __mlxsw_linecard_fix_fsm_state(struct mlxsw_linecard *linecard) +{ + dev_info(linecard->linecards->bus_info->dev, "linecard %u: Clearing FSM state error", + linecard->slot_index); + mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index, + MLXSW_REG_MBCT_OP_CLEAR_ERRORS, false); + return mlxsw_reg_write(linecard->linecards->mlxsw_core, + MLXSW_REG(mbct), linecard->mbct_pl); +} + +static int mlxsw_linecard_fix_fsm_state(struct mlxsw_linecard *linecard, + enum mlxsw_reg_mbct_fsm_state fsm_state) +{ + if (fsm_state != MLXSW_REG_MBCT_FSM_STATE_ERROR) + return 0; + return __mlxsw_linecard_fix_fsm_state(linecard); +} + +static int +mlxsw_linecard_query_ini_status(struct mlxsw_linecard *linecard, + enum mlxsw_reg_mbct_status *status, + enum mlxsw_reg_mbct_fsm_state *fsm_state, + struct netlink_ext_ack *extack) +{ + int err; + + mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index, + MLXSW_REG_MBCT_OP_QUERY_STATUS, false); + err = mlxsw_reg_query(linecard->linecards->mlxsw_core, MLXSW_REG(mbct), + linecard->mbct_pl); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to query linecard INI status"); + return err; + } + mlxsw_reg_mbct_unpack(linecard->mbct_pl, NULL, status, fsm_state); + return err; +} + +static int +mlxsw_linecard_ini_transfer(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecard *linecard, + const struct mlxsw_linecard_ini_file *ini_file, + struct netlink_ext_ack *extack) +{ + enum mlxsw_reg_mbct_fsm_state fsm_state; + enum mlxsw_reg_mbct_status status; + size_t size_left; + const u8 *data; + int err; + + size_left = le16_to_cpu(ini_file->size); + data = ini_file->data; + while (size_left) { + size_t data_size = MLXSW_REG_MBCT_DATA_LEN; + bool is_last = false; + + if (size_left <= MLXSW_REG_MBCT_DATA_LEN) { + data_size = size_left; + is_last = true; + } + + mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index, + MLXSW_REG_MBCT_OP_DATA_TRANSFER, false); + mlxsw_reg_mbct_dt_pack(linecard->mbct_pl, data_size, + is_last, data); + err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mbct), + linecard->mbct_pl); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to issue linecard INI data transfer"); + return err; + } + mlxsw_reg_mbct_unpack(linecard->mbct_pl, NULL, + &status, &fsm_state); + if ((!is_last && status != MLXSW_REG_MBCT_STATUS_PART_DATA) || + (is_last && status != MLXSW_REG_MBCT_STATUS_LAST_DATA)) { + NL_SET_ERR_MSG_MOD(extack, "Failed to transfer linecard INI data"); + mlxsw_linecard_fix_fsm_state(linecard, fsm_state); + return -EINVAL; + } + size_left -= data_size; + data += data_size; + } + + return 0; +} + +static int +mlxsw_linecard_ini_erase(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecard *linecard, + struct netlink_ext_ack *extack) +{ + enum mlxsw_reg_mbct_fsm_state fsm_state; + enum mlxsw_reg_mbct_status status; + int err; + + mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index, + MLXSW_REG_MBCT_OP_ERASE_INI_IMAGE, false); + err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mbct), + linecard->mbct_pl); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to issue linecard INI erase"); + return err; + } + mlxsw_reg_mbct_unpack(linecard->mbct_pl, NULL, &status, &fsm_state); + switch (status) { + case MLXSW_REG_MBCT_STATUS_ERASE_COMPLETE: + break; + default: + /* Should not happen */ + fallthrough; + case MLXSW_REG_MBCT_STATUS_ERASE_FAILED: + NL_SET_ERR_MSG_MOD(extack, "Failed to erase linecard INI"); + goto fix_fsm_err_out; + case MLXSW_REG_MBCT_STATUS_ERROR_INI_IN_USE: + NL_SET_ERR_MSG_MOD(extack, "Failed to erase linecard INI while being used"); + goto fix_fsm_err_out; + } + return 0; + +fix_fsm_err_out: + mlxsw_linecard_fix_fsm_state(linecard, fsm_state); + return -EINVAL; +} + +static void mlxsw_linecard_bct_process(struct mlxsw_core *mlxsw_core, + const char *mbct_pl) +{ + struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core); + enum mlxsw_reg_mbct_fsm_state fsm_state; + enum mlxsw_reg_mbct_status status; + struct mlxsw_linecard *linecard; + u8 slot_index; + + mlxsw_reg_mbct_unpack(mbct_pl, &slot_index, &status, &fsm_state); + if (WARN_ON(slot_index > linecards->count)) + return; + linecard = mlxsw_linecard_get(linecards, slot_index); + mutex_lock(&linecard->lock); + if (status == MLXSW_REG_MBCT_STATUS_ACTIVATION_FAILED) { + dev_err(linecards->bus_info->dev, "linecard %u: Failed to activate INI", + linecard->slot_index); + goto fix_fsm_out; + } + mutex_unlock(&linecard->lock); + return; + +fix_fsm_out: + mlxsw_linecard_fix_fsm_state(linecard, fsm_state); + mlxsw_linecard_provision_fail(linecard); + mutex_unlock(&linecard->lock); +} + +static int +mlxsw_linecard_ini_activate(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecard *linecard, + struct netlink_ext_ack *extack) +{ + enum mlxsw_reg_mbct_fsm_state fsm_state; + enum mlxsw_reg_mbct_status status; + int err; + + mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index, + MLXSW_REG_MBCT_OP_ACTIVATE, true); + err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mbct), linecard->mbct_pl); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to issue linecard INI activation"); + return err; + } + mlxsw_reg_mbct_unpack(linecard->mbct_pl, NULL, &status, &fsm_state); + if (status == MLXSW_REG_MBCT_STATUS_ACTIVATION_FAILED) { + NL_SET_ERR_MSG_MOD(extack, "Failed to activate linecard INI"); + goto fix_fsm_err_out; + } + + return 0; + +fix_fsm_err_out: + mlxsw_linecard_fix_fsm_state(linecard, fsm_state); + return -EINVAL; +} + +#define MLXSW_LINECARD_INI_WAIT_RETRIES 10 +#define MLXSW_LINECARD_INI_WAIT_MS 500 + +static int +mlxsw_linecard_ini_in_use_wait(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecard *linecard, + struct netlink_ext_ack *extack) +{ + enum mlxsw_reg_mbct_fsm_state fsm_state; + enum mlxsw_reg_mbct_status status; + unsigned int ini_wait_retries = 0; + int err; + +query_ini_status: + err = mlxsw_linecard_query_ini_status(linecard, &status, + &fsm_state, extack); + if (err) + return err; + + switch (fsm_state) { + case MLXSW_REG_MBCT_FSM_STATE_INI_IN_USE: + if (ini_wait_retries++ > MLXSW_LINECARD_INI_WAIT_RETRIES) { + NL_SET_ERR_MSG_MOD(extack, "Failed to wait for linecard INI to be unused"); + return -EINVAL; + } + mdelay(MLXSW_LINECARD_INI_WAIT_MS); + goto query_ini_status; + default: + break; + } + return 0; +} + +static bool mlxsw_linecard_port_selector(void *priv, u16 local_port) +{ + struct mlxsw_linecard *linecard = priv; + struct mlxsw_core *mlxsw_core; + + mlxsw_core = linecard->linecards->mlxsw_core; + return linecard == mlxsw_core_port_linecard_get(mlxsw_core, local_port); +} + +static int mlxsw_linecard_provision(struct devlink_linecard *devlink_linecard, + void *priv, const char *type, + const void *type_priv, + struct netlink_ext_ack *extack) +{ + const struct mlxsw_linecard_ini_file *ini_file = type_priv; + struct mlxsw_linecard *linecard = priv; + struct mlxsw_core *mlxsw_core; + int err; + + mutex_lock(&linecard->lock); + + mlxsw_core = linecard->linecards->mlxsw_core; + + err = mlxsw_linecard_ini_erase(mlxsw_core, linecard, extack); + if (err) + goto err_out; + + err = mlxsw_linecard_ini_transfer(mlxsw_core, linecard, + ini_file, extack); + if (err) + goto err_out; + + mlxsw_linecard_status_event_to_schedule(linecard, + MLXSW_LINECARD_STATUS_EVENT_TYPE_PROVISION); + err = mlxsw_linecard_ini_activate(mlxsw_core, linecard, extack); + if (err) + goto err_out; + + goto out; + +err_out: + mlxsw_linecard_provision_fail(linecard); +out: + mutex_unlock(&linecard->lock); + return err; +} + +static int mlxsw_linecard_unprovision(struct devlink_linecard *devlink_linecard, + void *priv, + struct netlink_ext_ack *extack) +{ + struct mlxsw_linecard *linecard = priv; + struct mlxsw_core *mlxsw_core; + int err; + + mutex_lock(&linecard->lock); + + mlxsw_core = linecard->linecards->mlxsw_core; + + mlxsw_core_ports_remove_selected(mlxsw_core, + mlxsw_linecard_port_selector, + linecard); + + err = mlxsw_linecard_ini_in_use_wait(mlxsw_core, linecard, extack); + if (err) + goto err_out; + + mlxsw_linecard_status_event_to_schedule(linecard, + MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION); + err = mlxsw_linecard_ini_erase(mlxsw_core, linecard, extack); + if (err) + goto err_out; + + goto out; + +err_out: + mlxsw_linecard_provision_fail(linecard); +out: + mutex_unlock(&linecard->lock); + return err; +} + +static bool mlxsw_linecard_same_provision(struct devlink_linecard *devlink_linecard, + void *priv, const char *type, + const void *type_priv) +{ + const struct mlxsw_linecard_ini_file *ini_file = type_priv; + struct mlxsw_linecard *linecard = priv; + bool ret; + + mutex_lock(&linecard->lock); + ret = linecard->hw_revision == be16_to_cpu(ini_file->format.hw_revision) && + linecard->ini_version == be16_to_cpu(ini_file->format.ini_version); + mutex_unlock(&linecard->lock); + return ret; +} + +static unsigned int +mlxsw_linecard_types_count(struct devlink_linecard *devlink_linecard, + void *priv) +{ + struct mlxsw_linecard *linecard = priv; + + return linecard->linecards->types_info ? + linecard->linecards->types_info->count : 0; +} + +static void mlxsw_linecard_types_get(struct devlink_linecard *devlink_linecard, + void *priv, unsigned int index, + const char **type, const void **type_priv) +{ + struct mlxsw_linecard_types_info *types_info; + struct mlxsw_linecard_ini_file *ini_file; + struct mlxsw_linecard *linecard = priv; + + types_info = linecard->linecards->types_info; + if (WARN_ON_ONCE(!types_info)) + return; + ini_file = types_info->ini_files[index]; + *type = ini_file->format.name; + *type_priv = ini_file; +} + +static int +mlxsw_linecard_info_get(struct devlink_linecard *devlink_linecard, void *priv, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct mlxsw_linecard *linecard = priv; + char buf[32]; + int err; + + mutex_lock(&linecard->lock); + if (!linecard->provisioned) { + err = 0; + goto unlock; + } + + sprintf(buf, "%d", linecard->hw_revision); + err = devlink_info_version_fixed_put(req, "hw.revision", buf); + if (err) + goto unlock; + + sprintf(buf, "%d", linecard->ini_version); + err = devlink_info_version_running_put(req, "ini.version", buf); + if (err) + goto unlock; + +unlock: + mutex_unlock(&linecard->lock); + return err; +} + +static const struct devlink_linecard_ops mlxsw_linecard_ops = { + .provision = mlxsw_linecard_provision, + .unprovision = mlxsw_linecard_unprovision, + .same_provision = mlxsw_linecard_same_provision, + .types_count = mlxsw_linecard_types_count, + .types_get = mlxsw_linecard_types_get, + .info_get = mlxsw_linecard_info_get, + .device_info_get = mlxsw_linecard_device_info_get, +}; + +struct mlxsw_linecard_status_event { + struct mlxsw_core *mlxsw_core; + char mddq_pl[MLXSW_REG_MDDQ_LEN]; + struct work_struct work; +}; + +static void mlxsw_linecard_status_event_work(struct work_struct *work) +{ + struct mlxsw_linecard_status_event *event; + struct mlxsw_linecards *linecards; + struct mlxsw_core *mlxsw_core; + + event = container_of(work, struct mlxsw_linecard_status_event, work); + mlxsw_core = event->mlxsw_core; + linecards = mlxsw_core_linecards(mlxsw_core); + mlxsw_linecard_status_process(linecards, NULL, event->mddq_pl); + kfree(event); +} + +static void +mlxsw_linecard_status_listener_func(const struct mlxsw_reg_info *reg, + char *mddq_pl, void *priv) +{ + struct mlxsw_linecard_status_event *event; + struct mlxsw_core *mlxsw_core = priv; + + event = kmalloc(sizeof(*event), GFP_ATOMIC); + if (!event) + return; + event->mlxsw_core = mlxsw_core; + memcpy(event->mddq_pl, mddq_pl, sizeof(event->mddq_pl)); + INIT_WORK(&event->work, mlxsw_linecard_status_event_work); + mlxsw_core_schedule_work(&event->work); +} + +struct mlxsw_linecard_bct_event { + struct mlxsw_core *mlxsw_core; + char mbct_pl[MLXSW_REG_MBCT_LEN]; + struct work_struct work; +}; + +static void mlxsw_linecard_bct_event_work(struct work_struct *work) +{ + struct mlxsw_linecard_bct_event *event; + struct mlxsw_core *mlxsw_core; + + event = container_of(work, struct mlxsw_linecard_bct_event, work); + mlxsw_core = event->mlxsw_core; + mlxsw_linecard_bct_process(mlxsw_core, event->mbct_pl); + kfree(event); +} + +static void +mlxsw_linecard_bct_listener_func(const struct mlxsw_reg_info *reg, + char *mbct_pl, void *priv) +{ + struct mlxsw_linecard_bct_event *event; + struct mlxsw_core *mlxsw_core = priv; + + event = kmalloc(sizeof(*event), GFP_ATOMIC); + if (!event) + return; + event->mlxsw_core = mlxsw_core; + memcpy(event->mbct_pl, mbct_pl, sizeof(event->mbct_pl)); + INIT_WORK(&event->work, mlxsw_linecard_bct_event_work); + mlxsw_core_schedule_work(&event->work); +} + +static const struct mlxsw_listener mlxsw_linecard_listener[] = { + MLXSW_CORE_EVENTL(mlxsw_linecard_status_listener_func, DSDSC), + MLXSW_CORE_EVENTL(mlxsw_linecard_bct_listener_func, BCTOE), +}; + +static int mlxsw_linecard_event_delivery_set(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecard *linecard, + bool enable) +{ + char mddq_pl[MLXSW_REG_MDDQ_LEN]; + + mlxsw_reg_mddq_slot_info_pack(mddq_pl, linecard->slot_index, enable); + return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddq), mddq_pl); +} + +static int mlxsw_linecard_init(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecards *linecards, + u8 slot_index) +{ + struct devlink_linecard *devlink_linecard; + struct mlxsw_linecard *linecard; + int err; + + linecard = mlxsw_linecard_get(linecards, slot_index); + linecard->slot_index = slot_index; + linecard->linecards = linecards; + mutex_init(&linecard->lock); + INIT_LIST_HEAD(&linecard->device_list); + + devlink_linecard = devlink_linecard_create(priv_to_devlink(mlxsw_core), + slot_index, &mlxsw_linecard_ops, + linecard); + if (IS_ERR(devlink_linecard)) { + err = PTR_ERR(devlink_linecard); + goto err_devlink_linecard_create; + } + linecard->devlink_linecard = devlink_linecard; + INIT_DELAYED_WORK(&linecard->status_event_to_dw, + &mlxsw_linecard_status_event_to_work); + + err = mlxsw_linecard_event_delivery_set(mlxsw_core, linecard, true); + if (err) + goto err_event_delivery_set; + + err = mlxsw_linecard_status_get_and_process(mlxsw_core, linecards, + linecard); + if (err) + goto err_status_get_and_process; + + return 0; + +err_status_get_and_process: + mlxsw_linecard_event_delivery_set(mlxsw_core, linecard, false); +err_event_delivery_set: + devlink_linecard_destroy(linecard->devlink_linecard); +err_devlink_linecard_create: + mutex_destroy(&linecard->lock); + return err; +} + +static void mlxsw_linecard_fini(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecards *linecards, + u8 slot_index) +{ + struct mlxsw_linecard *linecard; + + linecard = mlxsw_linecard_get(linecards, slot_index); + mlxsw_linecard_event_delivery_set(mlxsw_core, linecard, false); + cancel_delayed_work_sync(&linecard->status_event_to_dw); + /* Make sure all scheduled events are processed */ + mlxsw_core_flush_owq(); + if (linecard->active) + mlxsw_linecard_active_clear(linecard); + mlxsw_linecard_devices_detach(linecard); + devlink_linecard_destroy(linecard->devlink_linecard); + mutex_destroy(&linecard->lock); +} + +/* LINECARDS INI BUNDLE FILE + * +----------------------------------+ + * | MAGIC ("NVLCINI+") | + * +----------------------------------+ +--------------------+ + * | INI 0 +---> | __le16 size | + * +----------------------------------+ | __be16 hw_revision | + * | INI 1 | | __be16 ini_version | + * +----------------------------------+ | u8 __dontcare[3] | + * | ... | | u8 type | + * +----------------------------------+ | u8 name[20] | + * | INI N | | ... | + * +----------------------------------+ +--------------------+ + */ + +#define MLXSW_LINECARDS_INI_BUNDLE_MAGIC "NVLCINI+" + +static int +mlxsw_linecard_types_file_validate(struct mlxsw_linecards *linecards, + struct mlxsw_linecard_types_info *types_info) +{ + size_t magic_size = strlen(MLXSW_LINECARDS_INI_BUNDLE_MAGIC); + struct mlxsw_linecard_ini_file *ini_file; + size_t size = types_info->data_size; + const u8 *data = types_info->data; + unsigned int count = 0; + u16 ini_file_size; + + if (size < magic_size) { + dev_warn(linecards->bus_info->dev, "Invalid linecards INIs file size, smaller than magic size\n"); + return -EINVAL; + } + if (memcmp(data, MLXSW_LINECARDS_INI_BUNDLE_MAGIC, magic_size)) { + dev_warn(linecards->bus_info->dev, "Invalid linecards INIs file magic pattern\n"); + return -EINVAL; + } + + data += magic_size; + size -= magic_size; + + while (size > 0) { + if (size < sizeof(*ini_file)) { + dev_warn(linecards->bus_info->dev, "Linecards INIs file contains INI which is smaller than bare minimum\n"); + return -EINVAL; + } + ini_file = (struct mlxsw_linecard_ini_file *) data; + ini_file_size = le16_to_cpu(ini_file->size); + if (ini_file_size + sizeof(__le16) > size) { + dev_warn(linecards->bus_info->dev, "Linecards INIs file appears to be truncated\n"); + return -EINVAL; + } + if (ini_file_size % 4) { + dev_warn(linecards->bus_info->dev, "Linecards INIs file contains INI with invalid size\n"); + return -EINVAL; + } + data += ini_file_size + sizeof(__le16); + size -= ini_file_size + sizeof(__le16); + count++; + } + if (!count) { + dev_warn(linecards->bus_info->dev, "Linecards INIs file does not contain any INI\n"); + return -EINVAL; + } + types_info->count = count; + return 0; +} + +static void +mlxsw_linecard_types_file_parse(struct mlxsw_linecard_types_info *types_info) +{ + size_t magic_size = strlen(MLXSW_LINECARDS_INI_BUNDLE_MAGIC); + size_t size = types_info->data_size - magic_size; + const u8 *data = types_info->data + magic_size; + struct mlxsw_linecard_ini_file *ini_file; + unsigned int count = 0; + u16 ini_file_size; + int i; + + while (size) { + ini_file = (struct mlxsw_linecard_ini_file *) data; + ini_file_size = le16_to_cpu(ini_file->size); + for (i = 0; i < ini_file_size / 4; i++) { + u32 *val = &((u32 *) ini_file->data)[i]; + + *val = swab32(*val); + } + types_info->ini_files[count] = ini_file; + data += ini_file_size + sizeof(__le16); + size -= ini_file_size + sizeof(__le16); + count++; + } +} + +#define MLXSW_LINECARDS_INI_BUNDLE_FILENAME_FMT \ + "mellanox/lc_ini_bundle_%u_%u.bin" +#define MLXSW_LINECARDS_INI_BUNDLE_FILENAME_LEN \ + (sizeof(MLXSW_LINECARDS_INI_BUNDLE_FILENAME_FMT) + 4) + +static int mlxsw_linecard_types_init(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecards *linecards) +{ + const struct mlxsw_fw_rev *rev = &linecards->bus_info->fw_rev; + char filename[MLXSW_LINECARDS_INI_BUNDLE_FILENAME_LEN]; + struct mlxsw_linecard_types_info *types_info; + const struct firmware *firmware; + int err; + + err = snprintf(filename, sizeof(filename), + MLXSW_LINECARDS_INI_BUNDLE_FILENAME_FMT, + rev->minor, rev->subminor); + WARN_ON(err >= sizeof(filename)); + + err = request_firmware_direct(&firmware, filename, + linecards->bus_info->dev); + if (err) { + dev_warn(linecards->bus_info->dev, "Could not request linecards INI file \"%s\", provisioning will not be possible\n", + filename); + return 0; + } + + types_info = kzalloc(sizeof(*types_info), GFP_KERNEL); + if (!types_info) { + release_firmware(firmware); + return -ENOMEM; + } + linecards->types_info = types_info; + + types_info->data_size = firmware->size; + types_info->data = vmalloc(types_info->data_size); + if (!types_info->data) { + err = -ENOMEM; + release_firmware(firmware); + goto err_data_alloc; + } + memcpy(types_info->data, firmware->data, types_info->data_size); + release_firmware(firmware); + + err = mlxsw_linecard_types_file_validate(linecards, types_info); + if (err) { + err = 0; + goto err_type_file_file_validate; + } + + types_info->ini_files = kmalloc_array(types_info->count, + sizeof(struct mlxsw_linecard_ini_file *), + GFP_KERNEL); + if (!types_info->ini_files) { + err = -ENOMEM; + goto err_ini_files_alloc; + } + + mlxsw_linecard_types_file_parse(types_info); + + return 0; + +err_ini_files_alloc: +err_type_file_file_validate: + vfree(types_info->data); +err_data_alloc: + kfree(types_info); + return err; +} + +static void mlxsw_linecard_types_fini(struct mlxsw_linecards *linecards) +{ + struct mlxsw_linecard_types_info *types_info = linecards->types_info; + + if (!types_info) + return; + kfree(types_info->ini_files); + vfree(types_info->data); + kfree(types_info); +} + +int mlxsw_linecards_init(struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *bus_info) +{ + char mgpir_pl[MLXSW_REG_MGPIR_LEN]; + struct mlxsw_linecards *linecards; + u8 slot_count; + int err; + int i; + + mlxsw_reg_mgpir_pack(mgpir_pl, 0); + err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgpir), mgpir_pl); + if (err) + return err; + + mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, + NULL, &slot_count); + if (!slot_count) + return 0; + + linecards = vzalloc(struct_size(linecards, linecards, slot_count)); + if (!linecards) + return -ENOMEM; + linecards->count = slot_count; + linecards->mlxsw_core = mlxsw_core; + linecards->bus_info = bus_info; + INIT_LIST_HEAD(&linecards->event_ops_list); + mutex_init(&linecards->event_ops_list_lock); + + err = mlxsw_linecard_types_init(mlxsw_core, linecards); + if (err) + goto err_types_init; + + err = mlxsw_core_traps_register(mlxsw_core, mlxsw_linecard_listener, + ARRAY_SIZE(mlxsw_linecard_listener), + mlxsw_core); + if (err) + goto err_traps_register; + + mlxsw_core_linecards_set(mlxsw_core, linecards); + + for (i = 0; i < linecards->count; i++) { + err = mlxsw_linecard_init(mlxsw_core, linecards, i + 1); + if (err) + goto err_linecard_init; + } + + return 0; + +err_linecard_init: + for (i--; i >= 0; i--) + mlxsw_linecard_fini(mlxsw_core, linecards, i + 1); + mlxsw_core_traps_unregister(mlxsw_core, mlxsw_linecard_listener, + ARRAY_SIZE(mlxsw_linecard_listener), + mlxsw_core); +err_traps_register: + mlxsw_linecard_types_fini(linecards); +err_types_init: + vfree(linecards); + return err; +} + +void mlxsw_linecards_fini(struct mlxsw_core *mlxsw_core) +{ + struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core); + int i; + + if (!linecards) + return; + for (i = 0; i < linecards->count; i++) + mlxsw_linecard_fini(mlxsw_core, linecards, i + 1); + mlxsw_core_traps_unregister(mlxsw_core, mlxsw_linecard_listener, + ARRAY_SIZE(mlxsw_linecard_listener), + mlxsw_core); + mlxsw_linecard_types_fini(linecards); + mutex_destroy(&linecards->event_ops_list_lock); + WARN_ON(!list_empty(&linecards->event_ops_list)); + vfree(linecards); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index 05f54bd982c0..3548fe1df7c8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@ -21,7 +21,6 @@ #define MLXSW_THERMAL_ASIC_TEMP_HOT 105000 /* 105C */ #define MLXSW_THERMAL_HYSTERESIS_TEMP 5000 /* 5C */ #define MLXSW_THERMAL_MODULE_TEMP_SHIFT (MLXSW_THERMAL_HYSTERESIS_TEMP * 2) -#define MLXSW_THERMAL_ZONE_MAX_NAME 16 #define MLXSW_THERMAL_TEMP_SCORE_MAX GENMASK(31, 0) #define MLXSW_THERMAL_MAX_STATE 10 #define MLXSW_THERMAL_MIN_STATE 2 @@ -82,6 +81,16 @@ struct mlxsw_thermal_module { struct thermal_zone_device *tzdev; struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS]; int module; /* Module or gearbox number */ + u8 slot_index; +}; + +struct mlxsw_thermal_area { + struct mlxsw_thermal_module *tz_module_arr; + u8 tz_module_num; + struct mlxsw_thermal_module *tz_gearbox_arr; + u8 tz_gearbox_num; + u8 slot_index; + bool active; }; struct mlxsw_thermal { @@ -92,12 +101,9 @@ struct mlxsw_thermal { struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX]; u8 cooling_levels[MLXSW_THERMAL_MAX_STATE + 1]; struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS]; - struct mlxsw_thermal_module *tz_module_arr; - u8 tz_module_num; - struct mlxsw_thermal_module *tz_gearbox_arr; - u8 tz_gearbox_num; unsigned int tz_highest_score; struct thermal_zone_device *tz_highest_dev; + struct mlxsw_thermal_area line_cards[]; }; static inline u8 mlxsw_state_to_duty(int state) @@ -123,8 +129,7 @@ static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal, /* Allow mlxsw thermal zone binding to an external cooling device */ for (i = 0; i < ARRAY_SIZE(mlxsw_thermal_external_allowed_cdev); i++) { - if (strnstr(cdev->type, mlxsw_thermal_external_allowed_cdev[i], - strlen(cdev->type))) + if (!strcmp(cdev->type, mlxsw_thermal_external_allowed_cdev[i])) return 0; } @@ -150,13 +155,15 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core, * EEPROM if we got valid thresholds from MTMP. */ if (!emerg_temp || !crit_temp) { - err = mlxsw_env_module_temp_thresholds_get(core, tz->module, + err = mlxsw_env_module_temp_thresholds_get(core, tz->slot_index, + tz->module, SFP_TEMP_HIGH_WARN, &crit_temp); if (err) return err; - err = mlxsw_env_module_temp_thresholds_get(core, tz->module, + err = mlxsw_env_module_temp_thresholds_get(core, tz->slot_index, + tz->module, SFP_TEMP_HIGH_ALARM, &emerg_temp); if (err) @@ -271,7 +278,7 @@ static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev, int temp; int err; - mlxsw_reg_mtmp_pack(mtmp_pl, 0, false, false); + mlxsw_reg_mtmp_pack(mtmp_pl, 0, 0, false, false); err = mlxsw_reg_query(thermal->core, MLXSW_REG(mtmp), mtmp_pl); if (err) { @@ -423,15 +430,16 @@ static int mlxsw_thermal_module_unbind(struct thermal_zone_device *tzdev, static void mlxsw_thermal_module_temp_and_thresholds_get(struct mlxsw_core *core, - u16 sensor_index, int *p_temp, - int *p_crit_temp, + u8 slot_index, u16 sensor_index, + int *p_temp, int *p_crit_temp, int *p_emerg_temp) { char mtmp_pl[MLXSW_REG_MTMP_LEN]; int err; /* Read module temperature and thresholds. */ - mlxsw_reg_mtmp_pack(mtmp_pl, sensor_index, false, false); + mlxsw_reg_mtmp_pack(mtmp_pl, slot_index, sensor_index, + false, false); err = mlxsw_reg_query(core, MLXSW_REG(mtmp), mtmp_pl); if (err) { /* Set temperature and thresholds to zero to avoid passing @@ -462,6 +470,7 @@ static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev, /* Read module temperature and thresholds. */ mlxsw_thermal_module_temp_and_thresholds_get(thermal->core, + tz->slot_index, sensor_index, &temp, &crit_temp, &emerg_temp); *p_temp = temp; @@ -576,7 +585,7 @@ static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev, int err; index = MLXSW_REG_MTMP_GBOX_INDEX_MIN + tz->module; - mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false); + mlxsw_reg_mtmp_pack(mtmp_pl, tz->slot_index, index, false, false); err = mlxsw_reg_query(thermal->core, MLXSW_REG(mtmp), mtmp_pl); if (err) @@ -672,11 +681,15 @@ static const struct thermal_cooling_device_ops mlxsw_cooling_ops = { static int mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz) { - char tz_name[MLXSW_THERMAL_ZONE_MAX_NAME]; + char tz_name[THERMAL_NAME_LENGTH]; int err; - snprintf(tz_name, sizeof(tz_name), "mlxsw-module%d", - module_tz->module + 1); + if (module_tz->slot_index) + snprintf(tz_name, sizeof(tz_name), "mlxsw-lc%d-module%d", + module_tz->slot_index, module_tz->module + 1); + else + snprintf(tz_name, sizeof(tz_name), "mlxsw-module%d", + module_tz->module + 1); module_tz->tzdev = thermal_zone_device_register(tz_name, MLXSW_THERMAL_NUM_TRIPS, MLXSW_THERMAL_TRIP_MASK, @@ -704,25 +717,28 @@ static void mlxsw_thermal_module_tz_fini(struct thermal_zone_device *tzdev) static int mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core, - struct mlxsw_thermal *thermal, u8 module) + struct mlxsw_thermal *thermal, + struct mlxsw_thermal_area *area, u8 module) { struct mlxsw_thermal_module *module_tz; int dummy_temp, crit_temp, emerg_temp; u16 sensor_index; sensor_index = MLXSW_REG_MTMP_MODULE_INDEX_MIN + module; - module_tz = &thermal->tz_module_arr[module]; + module_tz = &area->tz_module_arr[module]; /* Skip if parent is already set (case of port split). */ if (module_tz->parent) return 0; module_tz->module = module; + module_tz->slot_index = area->slot_index; module_tz->parent = thermal; memcpy(module_tz->trips, default_thermal_trips, sizeof(thermal->trips)); /* Initialize all trip point. */ mlxsw_thermal_module_trips_reset(module_tz); /* Read module temperature and thresholds. */ - mlxsw_thermal_module_temp_and_thresholds_get(core, sensor_index, &dummy_temp, + mlxsw_thermal_module_temp_and_thresholds_get(core, area->slot_index, + sensor_index, &dummy_temp, &crit_temp, &emerg_temp); /* Update trip point according to the module data. */ return mlxsw_thermal_module_trips_update(dev, core, module_tz, @@ -740,34 +756,39 @@ static void mlxsw_thermal_module_fini(struct mlxsw_thermal_module *module_tz) static int mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core, - struct mlxsw_thermal *thermal) + struct mlxsw_thermal *thermal, + struct mlxsw_thermal_area *area) { struct mlxsw_thermal_module *module_tz; char mgpir_pl[MLXSW_REG_MGPIR_LEN]; int i, err; - mlxsw_reg_mgpir_pack(mgpir_pl); + mlxsw_reg_mgpir_pack(mgpir_pl, area->slot_index); err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl); if (err) return err; mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, - &thermal->tz_module_num); + &area->tz_module_num, NULL); + + /* For modular system module counter could be zero. */ + if (!area->tz_module_num) + return 0; - thermal->tz_module_arr = kcalloc(thermal->tz_module_num, - sizeof(*thermal->tz_module_arr), - GFP_KERNEL); - if (!thermal->tz_module_arr) + area->tz_module_arr = kcalloc(area->tz_module_num, + sizeof(*area->tz_module_arr), + GFP_KERNEL); + if (!area->tz_module_arr) return -ENOMEM; - for (i = 0; i < thermal->tz_module_num; i++) { - err = mlxsw_thermal_module_init(dev, core, thermal, i); + for (i = 0; i < area->tz_module_num; i++) { + err = mlxsw_thermal_module_init(dev, core, thermal, area, i); if (err) goto err_thermal_module_init; } - for (i = 0; i < thermal->tz_module_num; i++) { - module_tz = &thermal->tz_module_arr[i]; + for (i = 0; i < area->tz_module_num; i++) { + module_tz = &area->tz_module_arr[i]; if (!module_tz->parent) continue; err = mlxsw_thermal_module_tz_init(module_tz); @@ -779,30 +800,35 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core, err_thermal_module_tz_init: err_thermal_module_init: - for (i = thermal->tz_module_num - 1; i >= 0; i--) - mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]); - kfree(thermal->tz_module_arr); + for (i = area->tz_module_num - 1; i >= 0; i--) + mlxsw_thermal_module_fini(&area->tz_module_arr[i]); + kfree(area->tz_module_arr); return err; } static void -mlxsw_thermal_modules_fini(struct mlxsw_thermal *thermal) +mlxsw_thermal_modules_fini(struct mlxsw_thermal *thermal, + struct mlxsw_thermal_area *area) { int i; - for (i = thermal->tz_module_num - 1; i >= 0; i--) - mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]); - kfree(thermal->tz_module_arr); + for (i = area->tz_module_num - 1; i >= 0; i--) + mlxsw_thermal_module_fini(&area->tz_module_arr[i]); + kfree(area->tz_module_arr); } static int mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz) { - char tz_name[MLXSW_THERMAL_ZONE_MAX_NAME]; + char tz_name[THERMAL_NAME_LENGTH]; int ret; - snprintf(tz_name, sizeof(tz_name), "mlxsw-gearbox%d", - gearbox_tz->module + 1); + if (gearbox_tz->slot_index) + snprintf(tz_name, sizeof(tz_name), "mlxsw-lc%d-gearbox%d", + gearbox_tz->slot_index, gearbox_tz->module + 1); + else + snprintf(tz_name, sizeof(tz_name), "mlxsw-gearbox%d", + gearbox_tz->module + 1); gearbox_tz->tzdev = thermal_zone_device_register(tz_name, MLXSW_THERMAL_NUM_TRIPS, MLXSW_THERMAL_TRIP_MASK, @@ -828,7 +854,8 @@ mlxsw_thermal_gearbox_tz_fini(struct mlxsw_thermal_module *gearbox_tz) static int mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core, - struct mlxsw_thermal *thermal) + struct mlxsw_thermal *thermal, + struct mlxsw_thermal_area *area) { enum mlxsw_reg_mgpir_device_type device_type; struct mlxsw_thermal_module *gearbox_tz; @@ -837,30 +864,31 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core, int i; int err; - mlxsw_reg_mgpir_pack(mgpir_pl); + mlxsw_reg_mgpir_pack(mgpir_pl, area->slot_index); err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl); if (err) return err; mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL, - NULL); + NULL, NULL); if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE || !gbox_num) return 0; - thermal->tz_gearbox_num = gbox_num; - thermal->tz_gearbox_arr = kcalloc(thermal->tz_gearbox_num, - sizeof(*thermal->tz_gearbox_arr), - GFP_KERNEL); - if (!thermal->tz_gearbox_arr) + area->tz_gearbox_num = gbox_num; + area->tz_gearbox_arr = kcalloc(area->tz_gearbox_num, + sizeof(*area->tz_gearbox_arr), + GFP_KERNEL); + if (!area->tz_gearbox_arr) return -ENOMEM; - for (i = 0; i < thermal->tz_gearbox_num; i++) { - gearbox_tz = &thermal->tz_gearbox_arr[i]; + for (i = 0; i < area->tz_gearbox_num; i++) { + gearbox_tz = &area->tz_gearbox_arr[i]; memcpy(gearbox_tz->trips, default_thermal_trips, sizeof(thermal->trips)); gearbox_tz->module = i; gearbox_tz->parent = thermal; + gearbox_tz->slot_index = area->slot_index; err = mlxsw_thermal_gearbox_tz_init(gearbox_tz); if (err) goto err_thermal_gearbox_tz_init; @@ -870,21 +898,80 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core, err_thermal_gearbox_tz_init: for (i--; i >= 0; i--) - mlxsw_thermal_gearbox_tz_fini(&thermal->tz_gearbox_arr[i]); - kfree(thermal->tz_gearbox_arr); + mlxsw_thermal_gearbox_tz_fini(&area->tz_gearbox_arr[i]); + kfree(area->tz_gearbox_arr); return err; } static void -mlxsw_thermal_gearboxes_fini(struct mlxsw_thermal *thermal) +mlxsw_thermal_gearboxes_fini(struct mlxsw_thermal *thermal, + struct mlxsw_thermal_area *area) { int i; - for (i = thermal->tz_gearbox_num - 1; i >= 0; i--) - mlxsw_thermal_gearbox_tz_fini(&thermal->tz_gearbox_arr[i]); - kfree(thermal->tz_gearbox_arr); + for (i = area->tz_gearbox_num - 1; i >= 0; i--) + mlxsw_thermal_gearbox_tz_fini(&area->tz_gearbox_arr[i]); + kfree(area->tz_gearbox_arr); +} + +static void +mlxsw_thermal_got_active(struct mlxsw_core *mlxsw_core, u8 slot_index, + void *priv) +{ + struct mlxsw_thermal *thermal = priv; + struct mlxsw_thermal_area *linecard; + int err; + + linecard = &thermal->line_cards[slot_index]; + + if (linecard->active) + return; + + linecard->slot_index = slot_index; + err = mlxsw_thermal_modules_init(thermal->bus_info->dev, thermal->core, + thermal, linecard); + if (err) { + dev_err(thermal->bus_info->dev, "Failed to configure thermal objects for line card modules in slot %d\n", + slot_index); + return; + } + + err = mlxsw_thermal_gearboxes_init(thermal->bus_info->dev, + thermal->core, thermal, linecard); + if (err) { + dev_err(thermal->bus_info->dev, "Failed to configure thermal objects for line card gearboxes in slot %d\n", + slot_index); + goto err_thermal_linecard_gearboxes_init; + } + + linecard->active = true; + + return; + +err_thermal_linecard_gearboxes_init: + mlxsw_thermal_modules_fini(thermal, linecard); +} + +static void +mlxsw_thermal_got_inactive(struct mlxsw_core *mlxsw_core, u8 slot_index, + void *priv) +{ + struct mlxsw_thermal *thermal = priv; + struct mlxsw_thermal_area *linecard; + + linecard = &thermal->line_cards[slot_index]; + if (!linecard->active) + return; + linecard->active = false; + mlxsw_thermal_gearboxes_fini(thermal, linecard); + mlxsw_thermal_modules_fini(thermal, linecard); } +static struct mlxsw_linecards_event_ops mlxsw_thermal_event_ops = { + .got_active = mlxsw_thermal_got_active, + .got_inactive = mlxsw_thermal_got_inactive, +}; + int mlxsw_thermal_init(struct mlxsw_core *core, const struct mlxsw_bus_info *bus_info, struct mlxsw_thermal **p_thermal) @@ -892,19 +979,29 @@ int mlxsw_thermal_init(struct mlxsw_core *core, char mfcr_pl[MLXSW_REG_MFCR_LEN] = { 0 }; enum mlxsw_reg_mfcr_pwm_frequency freq; struct device *dev = bus_info->dev; + char mgpir_pl[MLXSW_REG_MGPIR_LEN]; struct mlxsw_thermal *thermal; + u8 pwm_active, num_of_slots; u16 tacho_active; - u8 pwm_active; int err, i; - thermal = devm_kzalloc(dev, sizeof(*thermal), - GFP_KERNEL); + mlxsw_reg_mgpir_pack(mgpir_pl, 0); + err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl); + if (err) + return err; + + mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, NULL, + &num_of_slots); + + thermal = kzalloc(struct_size(thermal, line_cards, num_of_slots + 1), + GFP_KERNEL); if (!thermal) return -ENOMEM; thermal->core = core; thermal->bus_info = bus_info; memcpy(thermal->trips, default_thermal_trips, sizeof(thermal->trips)); + thermal->line_cards[0].slot_index = 0; err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfcr), mfcr_pl); if (err) { @@ -970,25 +1067,38 @@ int mlxsw_thermal_init(struct mlxsw_core *core, goto err_thermal_zone_device_register; } - err = mlxsw_thermal_modules_init(dev, core, thermal); + err = mlxsw_thermal_modules_init(dev, core, thermal, + &thermal->line_cards[0]); if (err) goto err_thermal_modules_init; - err = mlxsw_thermal_gearboxes_init(dev, core, thermal); + err = mlxsw_thermal_gearboxes_init(dev, core, thermal, + &thermal->line_cards[0]); if (err) goto err_thermal_gearboxes_init; + err = mlxsw_linecards_event_ops_register(core, + &mlxsw_thermal_event_ops, + thermal); + if (err) + goto err_linecards_event_ops_register; + err = thermal_zone_device_enable(thermal->tzdev); if (err) goto err_thermal_zone_device_enable; + thermal->line_cards[0].active = true; *p_thermal = thermal; return 0; err_thermal_zone_device_enable: - mlxsw_thermal_gearboxes_fini(thermal); + mlxsw_linecards_event_ops_unregister(thermal->core, + &mlxsw_thermal_event_ops, + thermal); +err_linecards_event_ops_register: + mlxsw_thermal_gearboxes_fini(thermal, &thermal->line_cards[0]); err_thermal_gearboxes_init: - mlxsw_thermal_modules_fini(thermal); + mlxsw_thermal_modules_fini(thermal, &thermal->line_cards[0]); err_thermal_modules_init: if (thermal->tzdev) { thermal_zone_device_unregister(thermal->tzdev); @@ -1001,7 +1111,7 @@ err_thermal_cooling_device_register: thermal_cooling_device_unregister(thermal->cdevs[i]); err_reg_write: err_reg_query: - devm_kfree(dev, thermal); + kfree(thermal); return err; } @@ -1009,8 +1119,12 @@ void mlxsw_thermal_fini(struct mlxsw_thermal *thermal) { int i; - mlxsw_thermal_gearboxes_fini(thermal); - mlxsw_thermal_modules_fini(thermal); + thermal->line_cards[0].active = false; + mlxsw_linecards_event_ops_unregister(thermal->core, + &mlxsw_thermal_event_ops, + thermal); + mlxsw_thermal_gearboxes_fini(thermal, &thermal->line_cards[0]); + mlxsw_thermal_modules_fini(thermal, &thermal->line_cards[0]); if (thermal->tzdev) { thermal_zone_device_unregister(thermal->tzdev); thermal->tzdev = NULL; @@ -1023,5 +1137,5 @@ void mlxsw_thermal_fini(struct mlxsw_thermal *thermal) } } - devm_kfree(thermal->bus_info->dev, thermal); + kfree(thermal); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c index 939b692ffc33..ce843ea91464 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c +++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c @@ -650,6 +650,7 @@ static int mlxsw_i2c_probe(struct i2c_client *client, return 0; errout: + mutex_destroy(&mlxsw_i2c->cmd.lock); i2c_set_clientdata(client, NULL); return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c index 3bc012dafd08..d9660d4cce96 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c @@ -59,7 +59,8 @@ static int mlxsw_m_port_open(struct net_device *dev) struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev); struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m; - return mlxsw_env_module_port_up(mlxsw_m->core, mlxsw_m_port->module); + return mlxsw_env_module_port_up(mlxsw_m->core, 0, + mlxsw_m_port->module); } static int mlxsw_m_port_stop(struct net_device *dev) @@ -67,7 +68,7 @@ static int mlxsw_m_port_stop(struct net_device *dev) struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev); struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m; - mlxsw_env_module_port_down(mlxsw_m->core, mlxsw_m_port->module); + mlxsw_env_module_port_down(mlxsw_m->core, 0, mlxsw_m_port->module); return 0; } @@ -110,7 +111,7 @@ static int mlxsw_m_get_module_info(struct net_device *netdev, struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev); struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core; - return mlxsw_env_get_module_info(netdev, core, mlxsw_m_port->module, + return mlxsw_env_get_module_info(netdev, core, 0, mlxsw_m_port->module, modinfo); } @@ -121,8 +122,8 @@ mlxsw_m_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev); struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core; - return mlxsw_env_get_module_eeprom(netdev, core, mlxsw_m_port->module, - ee, data); + return mlxsw_env_get_module_eeprom(netdev, core, 0, + mlxsw_m_port->module, ee, data); } static int @@ -133,7 +134,8 @@ mlxsw_m_get_module_eeprom_by_page(struct net_device *netdev, struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev); struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core; - return mlxsw_env_get_module_eeprom_by_page(core, mlxsw_m_port->module, + return mlxsw_env_get_module_eeprom_by_page(core, 0, + mlxsw_m_port->module, page, extack); } @@ -142,7 +144,7 @@ static int mlxsw_m_reset(struct net_device *netdev, u32 *flags) struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev); struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core; - return mlxsw_env_reset_module(netdev, core, mlxsw_m_port->module, + return mlxsw_env_reset_module(netdev, core, 0, mlxsw_m_port->module, flags); } @@ -154,7 +156,7 @@ mlxsw_m_get_module_power_mode(struct net_device *netdev, struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev); struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core; - return mlxsw_env_get_module_power_mode(core, mlxsw_m_port->module, + return mlxsw_env_get_module_power_mode(core, 0, mlxsw_m_port->module, params, extack); } @@ -166,7 +168,7 @@ mlxsw_m_set_module_power_mode(struct net_device *netdev, struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev); struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core; - return mlxsw_env_set_module_power_mode(core, mlxsw_m_port->module, + return mlxsw_env_set_module_power_mode(core, 0, mlxsw_m_port->module, params->policy, extack); } @@ -221,7 +223,7 @@ mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u16 local_port, u8 module) struct net_device *dev; int err; - err = mlxsw_core_port_init(mlxsw_m->core, local_port, + err = mlxsw_core_port_init(mlxsw_m->core, local_port, 0, module + 1, false, 0, false, 0, mlxsw_m->base_mac, sizeof(mlxsw_m->base_mac)); @@ -311,7 +313,7 @@ static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u16 local_port, if (WARN_ON_ONCE(module >= max_ports)) return -EINVAL; - mlxsw_env_module_port_map(mlxsw_m->core, module); + mlxsw_env_module_port_map(mlxsw_m->core, 0, module); mlxsw_m->module_to_port[module] = ++mlxsw_m->max_ports; return 0; @@ -320,12 +322,13 @@ static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u16 local_port, static void mlxsw_m_port_module_unmap(struct mlxsw_m *mlxsw_m, u8 module) { mlxsw_m->module_to_port[module] = -1; - mlxsw_env_module_port_unmap(mlxsw_m->core, module); + mlxsw_env_module_port_unmap(mlxsw_m->core, 0, module); } static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m) { unsigned int max_ports = mlxsw_core_max_ports(mlxsw_m->core); + struct devlink *devlink = priv_to_devlink(mlxsw_m->core); u8 last_module = max_ports; int i; int err; @@ -354,6 +357,7 @@ static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m) } /* Create port objects for each valid entry */ + devl_lock(devlink); for (i = 0; i < mlxsw_m->max_ports; i++) { if (mlxsw_m->module_to_port[i] > 0 && !mlxsw_core_port_is_xm(mlxsw_m->core, i)) { @@ -364,6 +368,7 @@ static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m) goto err_module_to_port_create; } } + devl_unlock(devlink); return 0; @@ -373,6 +378,7 @@ err_module_to_port_create: mlxsw_m_port_remove(mlxsw_m, mlxsw_m->module_to_port[i]); } + devl_unlock(devlink); i = max_ports; err_module_to_port_map: for (i--; i > 0; i--) @@ -385,8 +391,10 @@ err_module_to_port_alloc: static void mlxsw_m_ports_remove(struct mlxsw_m *mlxsw_m) { + struct devlink *devlink = priv_to_devlink(mlxsw_m->core); int i; + devl_lock(devlink); for (i = 0; i < mlxsw_m->max_ports; i++) { if (mlxsw_m->module_to_port[i] > 0) { mlxsw_m_port_remove(mlxsw_m, @@ -394,6 +402,7 @@ static void mlxsw_m_ports_remove(struct mlxsw_m *mlxsw_m) mlxsw_m_port_module_unmap(mlxsw_m, i); } } + devl_unlock(devlink); kfree(mlxsw_m->module_to_port); kfree(mlxsw_m->ports); @@ -422,7 +431,6 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core, struct netlink_ext_ack *extack) { struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core); - struct devlink *devlink = priv_to_devlink(mlxsw_core); int err; mlxsw_m->core = mlxsw_core; @@ -438,9 +446,7 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core, return err; } - devl_lock(devlink); err = mlxsw_m_ports_create(mlxsw_m); - devl_unlock(devlink); if (err) { dev_err(mlxsw_m->bus_info->dev, "Failed to create ports\n"); return err; @@ -452,11 +458,8 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core, static void mlxsw_m_fini(struct mlxsw_core *mlxsw_core) { struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core); - struct devlink *devlink = priv_to_devlink(mlxsw_core); - devl_lock(devlink); mlxsw_m_ports_remove(mlxsw_m); - devl_unlock(devlink); } static const struct mlxsw_config_profile mlxsw_m_config_profile; diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 67b1a2f8397f..078e3aa04383 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -4325,6 +4325,15 @@ MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8); */ MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0x00, false); +/* reg_pmlp_slot_index + * Module number. + * Slot_index + * Slot_index = 0 represent the onboard (motherboard). + * In case of non-modular system only slot_index = 0 is available. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, pmlp, slot_index, 0x04, 8, 4, 0x04, 0x00, false); + /* reg_pmlp_tx_lane * Tx Lane. When rxtx field is cleared, this field is used for Rx as well. * Access: RW @@ -5769,9 +5778,10 @@ enum mlxsw_reg_pmaos_e { */ MLXSW_ITEM32(reg, pmaos, e, 0x04, 0, 2); -static inline void mlxsw_reg_pmaos_pack(char *payload, u8 module) +static inline void mlxsw_reg_pmaos_pack(char *payload, u8 slot_index, u8 module) { MLXSW_REG_ZERO(pmaos, payload); + mlxsw_reg_pmaos_slot_index_set(payload, slot_index); mlxsw_reg_pmaos_module_set(payload, module); } @@ -5874,6 +5884,69 @@ static inline void mlxsw_reg_pmtdb_pack(char *payload, u8 slot_index, u8 module, mlxsw_reg_pmtdb_num_ports_set(payload, num_ports); } +/* PMECR - Ports Mapping Event Configuration Register + * -------------------------------------------------- + * The PMECR register is used to enable/disable event triggering + * in case of local port mapping change. + */ +#define MLXSW_REG_PMECR_ID 0x501B +#define MLXSW_REG_PMECR_LEN 0x20 + +MLXSW_REG_DEFINE(pmecr, MLXSW_REG_PMECR_ID, MLXSW_REG_PMECR_LEN); + +/* reg_pmecr_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32_LP(reg, pmecr, 0x00, 16, 0x00, 12); + +/* reg_pmecr_ee + * Event update enable. If this bit is set, event generation will be updated + * based on the e field. Only relevant on Set operations. + * Access: WO + */ +MLXSW_ITEM32(reg, pmecr, ee, 0x04, 30, 1); + +/* reg_pmecr_eswi + * Software ignore enable bit. If this bit is set, the value of swi is used. + * If this bit is clear, the value of swi is ignored. + * Only relevant on Set operations. + * Access: WO + */ +MLXSW_ITEM32(reg, pmecr, eswi, 0x04, 24, 1); + +/* reg_pmecr_swi + * Software ignore. If this bit is set, the device shouldn't generate events + * in case of PMLP SET operation but only upon self local port mapping change + * (if applicable according to e configuration). This is supplementary + * configuration on top of e value. + * Access: RW + */ +MLXSW_ITEM32(reg, pmecr, swi, 0x04, 8, 1); + +enum mlxsw_reg_pmecr_e { + MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT, + MLXSW_REG_PMECR_E_GENERATE_EVENT, + MLXSW_REG_PMECR_E_GENERATE_SINGLE_EVENT, +}; + +/* reg_pmecr_e + * Event generation on local port mapping change. + * Access: RW + */ +MLXSW_ITEM32(reg, pmecr, e, 0x04, 0, 2); + +static inline void mlxsw_reg_pmecr_pack(char *payload, u16 local_port, + enum mlxsw_reg_pmecr_e e) +{ + MLXSW_REG_ZERO(pmecr, payload); + mlxsw_reg_pmecr_local_port_set(payload, local_port); + mlxsw_reg_pmecr_e_set(payload, e); + mlxsw_reg_pmecr_ee_set(payload, true); + mlxsw_reg_pmecr_swi_set(payload, true); + mlxsw_reg_pmecr_eswi_set(payload, true); +} + /* PMPE - Port Module Plug/Unplug Event Register * --------------------------------------------- * This register reports any operational status change of a module. @@ -5984,6 +6057,12 @@ MLXSW_REG_DEFINE(pmmp, MLXSW_REG_PMMP_ID, MLXSW_REG_PMMP_LEN); */ MLXSW_ITEM32(reg, pmmp, module, 0x00, 16, 8); +/* reg_pmmp_slot_index + * Slot index. + * Access: Index + */ +MLXSW_ITEM32(reg, pmmp, slot_index, 0x00, 24, 4); + /* reg_pmmp_sticky * When set, will keep eeprom_override values after plug-out event. * Access: OP @@ -6011,9 +6090,10 @@ enum { */ MLXSW_ITEM32(reg, pmmp, eeprom_override, 0x04, 0, 16); -static inline void mlxsw_reg_pmmp_pack(char *payload, u8 module) +static inline void mlxsw_reg_pmmp_pack(char *payload, u8 slot_index, u8 module) { MLXSW_REG_ZERO(pmmp, payload); + mlxsw_reg_pmmp_slot_index_set(payload, slot_index); mlxsw_reg_pmmp_module_set(payload, module); } @@ -9721,6 +9801,12 @@ MLXSW_ITEM32(reg, mtcap, sensor_count, 0x00, 0, 7); MLXSW_REG_DEFINE(mtmp, MLXSW_REG_MTMP_ID, MLXSW_REG_MTMP_LEN); +/* reg_mtmp_slot_index + * Slot index (0: Main board). + * Access: Index + */ +MLXSW_ITEM32(reg, mtmp, slot_index, 0x00, 16, 4); + #define MLXSW_REG_MTMP_MODULE_INDEX_MIN 64 #define MLXSW_REG_MTMP_GBOX_INDEX_MIN 256 /* reg_mtmp_sensor_index @@ -9810,11 +9896,12 @@ MLXSW_ITEM32(reg, mtmp, temperature_threshold_lo, 0x10, 0, 16); */ MLXSW_ITEM_BUF(reg, mtmp, sensor_name, 0x18, MLXSW_REG_MTMP_SENSOR_NAME_SIZE); -static inline void mlxsw_reg_mtmp_pack(char *payload, u16 sensor_index, - bool max_temp_enable, +static inline void mlxsw_reg_mtmp_pack(char *payload, u8 slot_index, + u16 sensor_index, bool max_temp_enable, bool max_temp_reset) { MLXSW_REG_ZERO(mtmp, payload); + mlxsw_reg_mtmp_slot_index_set(payload, slot_index); mlxsw_reg_mtmp_sensor_index_set(payload, sensor_index); mlxsw_reg_mtmp_mte_set(payload, max_temp_enable); mlxsw_reg_mtmp_mtr_set(payload, max_temp_reset); @@ -9880,6 +9967,12 @@ MLXSW_ITEM_BIT_ARRAY(reg, mtwe, sensor_warning, 0x0, 0x10, 1); MLXSW_REG_DEFINE(mtbr, MLXSW_REG_MTBR_ID, MLXSW_REG_MTBR_LEN); +/* reg_mtbr_slot_index + * Slot index (0: Main board). + * Access: Index + */ +MLXSW_ITEM32(reg, mtbr, slot_index, 0x00, 16, 4); + /* reg_mtbr_base_sensor_index * Base sensors index to access (0 - ASIC sensor, 1-63 - ambient sensors, * 64-127 are mapped to the SFP+/QSFP modules sequentially). @@ -9912,10 +10005,11 @@ MLXSW_ITEM32_INDEXED(reg, mtbr, rec_max_temp, MLXSW_REG_MTBR_BASE_LEN, 16, MLXSW_ITEM32_INDEXED(reg, mtbr, rec_temp, MLXSW_REG_MTBR_BASE_LEN, 0, 16, MLXSW_REG_MTBR_REC_LEN, 0x00, false); -static inline void mlxsw_reg_mtbr_pack(char *payload, u16 base_sensor_index, - u8 num_rec) +static inline void mlxsw_reg_mtbr_pack(char *payload, u8 slot_index, + u16 base_sensor_index, u8 num_rec) { MLXSW_REG_ZERO(mtbr, payload); + mlxsw_reg_mtbr_slot_index_set(payload, slot_index); mlxsw_reg_mtbr_base_sensor_index_set(payload, base_sensor_index); mlxsw_reg_mtbr_num_rec_set(payload, num_rec); } @@ -9964,6 +10058,12 @@ MLXSW_ITEM32(reg, mcia, l, 0x00, 31, 1); */ MLXSW_ITEM32(reg, mcia, module, 0x00, 16, 8); +/* reg_mcia_slot_index + * Slot index (0: Main board) + * Access: Index + */ +MLXSW_ITEM32(reg, mcia, slot, 0x00, 12, 4); + enum { MLXSW_REG_MCIA_STATUS_GOOD = 0, /* No response from module's EEPROM. */ @@ -10063,11 +10163,13 @@ MLXSW_ITEM_BUF(reg, mcia, eeprom, 0x10, MLXSW_REG_MCIA_EEPROM_SIZE); MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) / \ MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH + 1) -static inline void mlxsw_reg_mcia_pack(char *payload, u8 module, u8 lock, - u8 page_number, u16 device_addr, - u8 size, u8 i2c_device_addr) +static inline void mlxsw_reg_mcia_pack(char *payload, u8 slot_index, u8 module, + u8 lock, u8 page_number, + u16 device_addr, u8 size, + u8 i2c_device_addr) { MLXSW_REG_ZERO(mcia, payload); + mlxsw_reg_mcia_slot_set(payload, slot_index); mlxsw_reg_mcia_module_set(payload, module); mlxsw_reg_mcia_l_set(payload, lock); mlxsw_reg_mcia_page_number_set(payload, page_number); @@ -10499,6 +10601,12 @@ MLXSW_REG_DEFINE(mcion, MLXSW_REG_MCION_ID, MLXSW_REG_MCION_LEN); */ MLXSW_ITEM32(reg, mcion, module, 0x00, 16, 8); +/* reg_mcion_slot_index + * Slot index. + * Access: Index + */ +MLXSW_ITEM32(reg, mcion, slot_index, 0x00, 12, 4); + enum { MLXSW_REG_MCION_MODULE_STATUS_BITS_PRESENT_MASK = BIT(0), MLXSW_REG_MCION_MODULE_STATUS_BITS_LOW_POWER_MASK = BIT(8), @@ -10510,9 +10618,10 @@ enum { */ MLXSW_ITEM32(reg, mcion, module_status_bits, 0x04, 0, 16); -static inline void mlxsw_reg_mcion_pack(char *payload, u8 module) +static inline void mlxsw_reg_mcion_pack(char *payload, u8 slot_index, u8 module) { MLXSW_REG_ZERO(mcion, payload); + mlxsw_reg_mcion_slot_index_set(payload, slot_index); mlxsw_reg_mcion_module_set(payload, module); } @@ -11326,6 +11435,12 @@ enum mlxsw_reg_mgpir_device_type { MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE, }; +/* mgpir_slot_index + * Slot index (0: Main board). + * Access: Index + */ +MLXSW_ITEM32(reg, mgpir, slot_index, 0x00, 28, 4); + /* mgpir_device_type * Access: RO */ @@ -11343,21 +11458,35 @@ MLXSW_ITEM32(reg, mgpir, devices_per_flash, 0x00, 16, 8); */ MLXSW_ITEM32(reg, mgpir, num_of_devices, 0x00, 0, 8); +/* max_modules_per_slot + * Maximum number of modules that can be connected per slot. + * Access: RO + */ +MLXSW_ITEM32(reg, mgpir, max_modules_per_slot, 0x04, 16, 8); + +/* mgpir_num_of_slots + * Number of slots in the system. + * Access: RO + */ +MLXSW_ITEM32(reg, mgpir, num_of_slots, 0x04, 8, 8); + /* mgpir_num_of_modules * Number of modules. * Access: RO */ MLXSW_ITEM32(reg, mgpir, num_of_modules, 0x04, 0, 8); -static inline void mlxsw_reg_mgpir_pack(char *payload) +static inline void mlxsw_reg_mgpir_pack(char *payload, u8 slot_index) { MLXSW_REG_ZERO(mgpir, payload); + mlxsw_reg_mgpir_slot_index_set(payload, slot_index); } static inline void mlxsw_reg_mgpir_unpack(char *payload, u8 *num_of_devices, enum mlxsw_reg_mgpir_device_type *device_type, - u8 *devices_per_flash, u8 *num_of_modules) + u8 *devices_per_flash, u8 *num_of_modules, + u8 *num_of_slots) { if (num_of_devices) *num_of_devices = mlxsw_reg_mgpir_num_of_devices_get(payload); @@ -11368,6 +11497,393 @@ mlxsw_reg_mgpir_unpack(char *payload, u8 *num_of_devices, mlxsw_reg_mgpir_devices_per_flash_get(payload); if (num_of_modules) *num_of_modules = mlxsw_reg_mgpir_num_of_modules_get(payload); + if (num_of_slots) + *num_of_slots = mlxsw_reg_mgpir_num_of_slots_get(payload); +} + +/* MBCT - Management Binary Code Transfer Register + * ----------------------------------------------- + * This register allows to transfer binary codes from the host to + * the management FW by transferring it by chunks of maximum 1KB. + */ +#define MLXSW_REG_MBCT_ID 0x9120 +#define MLXSW_REG_MBCT_LEN 0x420 + +MLXSW_REG_DEFINE(mbct, MLXSW_REG_MBCT_ID, MLXSW_REG_MBCT_LEN); + +/* reg_mbct_slot_index + * Slot index. 0 is reserved. + * Access: Index + */ +MLXSW_ITEM32(reg, mbct, slot_index, 0x00, 0, 4); + +/* reg_mbct_data_size + * Actual data field size in bytes for the current data transfer. + * Access: WO + */ +MLXSW_ITEM32(reg, mbct, data_size, 0x04, 0, 11); + +enum mlxsw_reg_mbct_op { + MLXSW_REG_MBCT_OP_ERASE_INI_IMAGE = 1, + MLXSW_REG_MBCT_OP_DATA_TRANSFER, /* Download */ + MLXSW_REG_MBCT_OP_ACTIVATE, + MLXSW_REG_MBCT_OP_CLEAR_ERRORS = 6, + MLXSW_REG_MBCT_OP_QUERY_STATUS, +}; + +/* reg_mbct_op + * Access: WO + */ +MLXSW_ITEM32(reg, mbct, op, 0x08, 28, 4); + +/* reg_mbct_last + * Indicates that the current data field is the last chunk of the INI. + * Access: WO + */ +MLXSW_ITEM32(reg, mbct, last, 0x08, 26, 1); + +/* reg_mbct_oee + * Opcode Event Enable. When set a BCTOE event will be sent once the opcode + * was executed and the fsm_state has changed. + * Access: WO + */ +MLXSW_ITEM32(reg, mbct, oee, 0x08, 25, 1); + +enum mlxsw_reg_mbct_status { + /* Partial data transfer completed successfully and ready for next + * data transfer. + */ + MLXSW_REG_MBCT_STATUS_PART_DATA = 2, + MLXSW_REG_MBCT_STATUS_LAST_DATA, + MLXSW_REG_MBCT_STATUS_ERASE_COMPLETE, + /* Error - trying to erase INI while it being used. */ + MLXSW_REG_MBCT_STATUS_ERROR_INI_IN_USE, + /* Last data transfer completed, applying magic pattern. */ + MLXSW_REG_MBCT_STATUS_ERASE_FAILED = 7, + MLXSW_REG_MBCT_STATUS_INI_ERROR, + MLXSW_REG_MBCT_STATUS_ACTIVATION_FAILED, + MLXSW_REG_MBCT_STATUS_ILLEGAL_OPERATION = 11, +}; + +/* reg_mbct_status + * Status. + * Access: RO + */ +MLXSW_ITEM32(reg, mbct, status, 0x0C, 24, 5); + +enum mlxsw_reg_mbct_fsm_state { + MLXSW_REG_MBCT_FSM_STATE_INI_IN_USE = 5, + MLXSW_REG_MBCT_FSM_STATE_ERROR, +}; + +/* reg_mbct_fsm_state + * FSM state. + * Access: RO + */ +MLXSW_ITEM32(reg, mbct, fsm_state, 0x0C, 16, 4); + +#define MLXSW_REG_MBCT_DATA_LEN 1024 + +/* reg_mbct_data + * Up to 1KB of data. + * Access: WO + */ +MLXSW_ITEM_BUF(reg, mbct, data, 0x20, MLXSW_REG_MBCT_DATA_LEN); + +static inline void mlxsw_reg_mbct_pack(char *payload, u8 slot_index, + enum mlxsw_reg_mbct_op op, bool oee) +{ + MLXSW_REG_ZERO(mbct, payload); + mlxsw_reg_mbct_slot_index_set(payload, slot_index); + mlxsw_reg_mbct_op_set(payload, op); + mlxsw_reg_mbct_oee_set(payload, oee); +} + +static inline void mlxsw_reg_mbct_dt_pack(char *payload, + u16 data_size, bool last, + const char *data) +{ + if (WARN_ON(data_size > MLXSW_REG_MBCT_DATA_LEN)) + return; + mlxsw_reg_mbct_data_size_set(payload, data_size); + mlxsw_reg_mbct_last_set(payload, last); + mlxsw_reg_mbct_data_memcpy_to(payload, data); +} + +static inline void +mlxsw_reg_mbct_unpack(const char *payload, u8 *p_slot_index, + enum mlxsw_reg_mbct_status *p_status, + enum mlxsw_reg_mbct_fsm_state *p_fsm_state) +{ + if (p_slot_index) + *p_slot_index = mlxsw_reg_mbct_slot_index_get(payload); + *p_status = mlxsw_reg_mbct_status_get(payload); + if (p_fsm_state) + *p_fsm_state = mlxsw_reg_mbct_fsm_state_get(payload); +} + +/* MDDQ - Management DownStream Device Query Register + * -------------------------------------------------- + * This register allows to query the DownStream device properties. The desired + * information is chosen upon the query_type field and is delivered by 32B + * of data blocks. + */ +#define MLXSW_REG_MDDQ_ID 0x9161 +#define MLXSW_REG_MDDQ_LEN 0x30 + +MLXSW_REG_DEFINE(mddq, MLXSW_REG_MDDQ_ID, MLXSW_REG_MDDQ_LEN); + +/* reg_mddq_sie + * Slot info event enable. + * When set to '1', each change in the slot_info.provisioned / sr_valid / + * active / ready will generate a DSDSC event. + * Access: RW + */ +MLXSW_ITEM32(reg, mddq, sie, 0x00, 31, 1); + +enum mlxsw_reg_mddq_query_type { + MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_INFO = 1, + MLXSW_REG_MDDQ_QUERY_TYPE_DEVICE_INFO, /* If there are no devices + * on the slot, data_valid + * will be '0'. + */ + MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_NAME, +}; + +/* reg_mddq_query_type + * Access: Index + */ +MLXSW_ITEM32(reg, mddq, query_type, 0x00, 16, 8); + +/* reg_mddq_slot_index + * Slot index. 0 is reserved. + * Access: Index + */ +MLXSW_ITEM32(reg, mddq, slot_index, 0x00, 0, 4); + +/* reg_mddq_response_msg_seq + * Response message sequential number. For a specific request, the response + * message sequential number is the following one. In addition, the last + * message should be 0. + * Access: RO + */ +MLXSW_ITEM32(reg, mddq, response_msg_seq, 0x04, 16, 8); + +/* reg_mddq_request_msg_seq + * Request message sequential number. + * The first message number should be 0. + * Access: Index + */ +MLXSW_ITEM32(reg, mddq, request_msg_seq, 0x04, 0, 8); + +/* reg_mddq_data_valid + * If set, the data in the data field is valid and contain the information + * for the queried index. + * Access: RO + */ +MLXSW_ITEM32(reg, mddq, data_valid, 0x08, 31, 1); + +/* reg_mddq_slot_info_provisioned + * If set, the INI file is applied and the card is provisioned. + * Access: RO + */ +MLXSW_ITEM32(reg, mddq, slot_info_provisioned, 0x10, 31, 1); + +/* reg_mddq_slot_info_sr_valid + * If set, Shift Register is valid (after being provisioned) and data + * can be sent from the switch ASIC to the line-card CPLD over Shift-Register. + * Access: RO + */ +MLXSW_ITEM32(reg, mddq, slot_info_sr_valid, 0x10, 30, 1); + +enum mlxsw_reg_mddq_slot_info_ready { + MLXSW_REG_MDDQ_SLOT_INFO_READY_NOT_READY, + MLXSW_REG_MDDQ_SLOT_INFO_READY_READY, + MLXSW_REG_MDDQ_SLOT_INFO_READY_ERROR, +}; + +/* reg_mddq_slot_info_lc_ready + * If set, the LC is powered on, matching the INI version and a new FW + * version can be burnt (if necessary). + * Access: RO + */ +MLXSW_ITEM32(reg, mddq, slot_info_lc_ready, 0x10, 28, 2); + +/* reg_mddq_slot_info_active + * If set, the FW has completed the MDDC.device_enable command. + * Access: RO + */ +MLXSW_ITEM32(reg, mddq, slot_info_active, 0x10, 27, 1); + +/* reg_mddq_slot_info_hw_revision + * Major user-configured version number of the current INI file. + * Valid only when active or ready are '1'. + * Access: RO + */ +MLXSW_ITEM32(reg, mddq, slot_info_hw_revision, 0x14, 16, 16); + +/* reg_mddq_slot_info_ini_file_version + * User-configured version number of the current INI file. + * Valid only when active or lc_ready are '1'. + * Access: RO + */ +MLXSW_ITEM32(reg, mddq, slot_info_ini_file_version, 0x14, 0, 16); + +/* reg_mddq_slot_info_card_type + * Access: RO + */ +MLXSW_ITEM32(reg, mddq, slot_info_card_type, 0x18, 0, 8); + +static inline void +__mlxsw_reg_mddq_pack(char *payload, u8 slot_index, + enum mlxsw_reg_mddq_query_type query_type) +{ + MLXSW_REG_ZERO(mddq, payload); + mlxsw_reg_mddq_slot_index_set(payload, slot_index); + mlxsw_reg_mddq_query_type_set(payload, query_type); +} + +static inline void +mlxsw_reg_mddq_slot_info_pack(char *payload, u8 slot_index, bool sie) +{ + __mlxsw_reg_mddq_pack(payload, slot_index, + MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_INFO); + mlxsw_reg_mddq_sie_set(payload, sie); +} + +static inline void +mlxsw_reg_mddq_slot_info_unpack(const char *payload, u8 *p_slot_index, + bool *p_provisioned, bool *p_sr_valid, + enum mlxsw_reg_mddq_slot_info_ready *p_lc_ready, + bool *p_active, u16 *p_hw_revision, + u16 *p_ini_file_version, + u8 *p_card_type) +{ + *p_slot_index = mlxsw_reg_mddq_slot_index_get(payload); + *p_provisioned = mlxsw_reg_mddq_slot_info_provisioned_get(payload); + *p_sr_valid = mlxsw_reg_mddq_slot_info_sr_valid_get(payload); + *p_lc_ready = mlxsw_reg_mddq_slot_info_lc_ready_get(payload); + *p_active = mlxsw_reg_mddq_slot_info_active_get(payload); + *p_hw_revision = mlxsw_reg_mddq_slot_info_hw_revision_get(payload); + *p_ini_file_version = mlxsw_reg_mddq_slot_info_ini_file_version_get(payload); + *p_card_type = mlxsw_reg_mddq_slot_info_card_type_get(payload); +} + +/* reg_mddq_device_info_flash_owner + * If set, the device is the flash owner. Otherwise, a shared flash + * is used by this device (another device is the flash owner). + * Access: RO + */ +MLXSW_ITEM32(reg, mddq, device_info_flash_owner, 0x10, 30, 1); + +/* reg_mddq_device_info_device_index + * Device index. The first device should number 0. + * Access: RO + */ +MLXSW_ITEM32(reg, mddq, device_info_device_index, 0x10, 0, 8); + +/* reg_mddq_device_info_fw_major + * Major FW version number. + * Access: RO + */ +MLXSW_ITEM32(reg, mddq, device_info_fw_major, 0x14, 16, 16); + +/* reg_mddq_device_info_fw_minor + * Minor FW version number. + * Access: RO + */ +MLXSW_ITEM32(reg, mddq, device_info_fw_minor, 0x18, 16, 16); + +/* reg_mddq_device_info_fw_sub_minor + * Sub-minor FW version number. + * Access: RO + */ +MLXSW_ITEM32(reg, mddq, device_info_fw_sub_minor, 0x18, 0, 16); + +static inline void +mlxsw_reg_mddq_device_info_pack(char *payload, u8 slot_index, + u8 request_msg_seq) +{ + __mlxsw_reg_mddq_pack(payload, slot_index, + MLXSW_REG_MDDQ_QUERY_TYPE_DEVICE_INFO); + mlxsw_reg_mddq_request_msg_seq_set(payload, request_msg_seq); +} + +static inline void +mlxsw_reg_mddq_device_info_unpack(const char *payload, u8 *p_response_msg_seq, + bool *p_data_valid, bool *p_flash_owner, + u8 *p_device_index, u16 *p_fw_major, + u16 *p_fw_minor, u16 *p_fw_sub_minor) +{ + *p_response_msg_seq = mlxsw_reg_mddq_response_msg_seq_get(payload); + *p_data_valid = mlxsw_reg_mddq_data_valid_get(payload); + if (p_flash_owner) + *p_flash_owner = mlxsw_reg_mddq_device_info_flash_owner_get(payload); + *p_device_index = mlxsw_reg_mddq_device_info_device_index_get(payload); + if (p_fw_major) + *p_fw_major = mlxsw_reg_mddq_device_info_fw_major_get(payload); + if (p_fw_minor) + *p_fw_minor = mlxsw_reg_mddq_device_info_fw_minor_get(payload); + if (p_fw_sub_minor) + *p_fw_sub_minor = mlxsw_reg_mddq_device_info_fw_sub_minor_get(payload); +} + +#define MLXSW_REG_MDDQ_SLOT_ASCII_NAME_LEN 20 + +/* reg_mddq_slot_ascii_name + * Slot's ASCII name. + * Access: RO + */ +MLXSW_ITEM_BUF(reg, mddq, slot_ascii_name, 0x10, + MLXSW_REG_MDDQ_SLOT_ASCII_NAME_LEN); + +static inline void +mlxsw_reg_mddq_slot_name_pack(char *payload, u8 slot_index) +{ + __mlxsw_reg_mddq_pack(payload, slot_index, + MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_NAME); +} + +static inline void +mlxsw_reg_mddq_slot_name_unpack(const char *payload, char *slot_ascii_name) +{ + mlxsw_reg_mddq_slot_ascii_name_memcpy_from(payload, slot_ascii_name); +} + +/* MDDC - Management DownStream Device Control Register + * ---------------------------------------------------- + * This register allows to control downstream devices and line cards. + */ +#define MLXSW_REG_MDDC_ID 0x9163 +#define MLXSW_REG_MDDC_LEN 0x30 + +MLXSW_REG_DEFINE(mddc, MLXSW_REG_MDDC_ID, MLXSW_REG_MDDC_LEN); + +/* reg_mddc_slot_index + * Slot index. 0 is reserved. + * Access: Index + */ +MLXSW_ITEM32(reg, mddc, slot_index, 0x00, 0, 4); + +/* reg_mddc_rst + * Reset request. + * Access: OP + */ +MLXSW_ITEM32(reg, mddc, rst, 0x04, 29, 1); + +/* reg_mddc_device_enable + * When set, FW is the manager and allowed to program the downstream device. + * Access: RW + */ +MLXSW_ITEM32(reg, mddc, device_enable, 0x04, 28, 1); + +static inline void mlxsw_reg_mddc_pack(char *payload, u8 slot_index, bool rst, + bool device_enable) +{ + MLXSW_REG_ZERO(mddc, payload); + mlxsw_reg_mddc_slot_index_set(payload, slot_index); + mlxsw_reg_mddc_rst_set(payload, rst); + mlxsw_reg_mddc_device_enable_set(payload, device_enable); } /* MFDE - Monitoring FW Debug Register @@ -12125,6 +12641,12 @@ static inline void mlxsw_reg_tidem_pack(char *payload, u8 underlay_ecn, MLXSW_REG_DEFINE(sbpr, MLXSW_REG_SBPR_ID, MLXSW_REG_SBPR_LEN); +/* reg_sbpr_desc + * When set, configures descriptor buffer. + * Access: Index + */ +MLXSW_ITEM32(reg, sbpr, desc, 0x00, 31, 1); + /* shared direstion enum for SBPR, SBCM, SBPM */ enum mlxsw_reg_sbxx_dir { MLXSW_REG_SBXX_DIR_INGRESS, @@ -12619,6 +13141,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(pmaos), MLXSW_REG(pplr), MLXSW_REG(pmtdb), + MLXSW_REG(pmecr), MLXSW_REG(pmpe), MLXSW_REG(pddr), MLXSW_REG(pmmp), @@ -12688,6 +13211,9 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(mtptpt), MLXSW_REG(mfgd), MLXSW_REG(mgpir), + MLXSW_REG(mbct), + MLXSW_REG(mddq), + MLXSW_REG(mddc), MLXSW_REG(mfde), MLXSW_REG(tngcr), MLXSW_REG(tnumt), diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 8eb05090ffec..ac6348e2ff1f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -89,6 +89,11 @@ static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { "." __stringify(MLXSW_SP_FWREV_MINOR) \ "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" +#define MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME \ + "mellanox/lc_ini_bundle_" \ + __stringify(MLXSW_SP_FWREV_MINOR) "_" \ + __stringify(MLXSW_SP_FWREV_SUBMINOR) ".bin" + static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; @@ -481,23 +486,22 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) } static int -mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port, - struct mlxsw_sp_port_mapping *port_mapping) +mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp, + u16 local_port, char *pmlp_pl, + struct mlxsw_sp_port_mapping *port_mapping) { - char pmlp_pl[MLXSW_REG_PMLP_LEN]; bool separate_rxtx; + u8 first_lane; + u8 slot_index; u8 module; u8 width; - int err; int i; - mlxsw_reg_pmlp_pack(pmlp_pl, local_port); - err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); - if (err) - return err; module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); + slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0); width = mlxsw_reg_pmlp_width_get(pmlp_pl); separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl); + first_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); if (width && !is_power_of_2(width)) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n", @@ -511,6 +515,11 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port, local_port); return -EINVAL; } + if (mlxsw_reg_pmlp_slot_index_get(pmlp_pl, i) != slot_index) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple slot indexes\n", + local_port); + return -EINVAL; + } if (separate_rxtx && mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) { @@ -518,7 +527,7 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port, local_port); return -EINVAL; } - if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) { + if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i + first_lane) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n", local_port); return -EINVAL; @@ -526,6 +535,7 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port, } port_mapping->module = module; + port_mapping->slot_index = slot_index; port_mapping->width = width; port_mapping->module_width = width; port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); @@ -533,17 +543,35 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port, } static int +mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port, + struct mlxsw_sp_port_mapping *port_mapping) +{ + char pmlp_pl[MLXSW_REG_PMLP_LEN]; + int err; + + mlxsw_reg_pmlp_pack(pmlp_pl, local_port); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); + if (err) + return err; + return mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port, + pmlp_pl, port_mapping); +} + +static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port, const struct mlxsw_sp_port_mapping *port_mapping) { char pmlp_pl[MLXSW_REG_PMLP_LEN]; int i, err; - mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->module); + mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->slot_index, + port_mapping->module); mlxsw_reg_pmlp_pack(pmlp_pl, local_port); mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); for (i = 0; i < port_mapping->width; i++) { + mlxsw_reg_pmlp_slot_index_set(pmlp_pl, i, + port_mapping->slot_index); mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ } @@ -554,19 +582,20 @@ mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port, return 0; err_pmlp_write: - mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->module); + mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->slot_index, + port_mapping->module); return err; } static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port, - u8 module) + u8 slot_index, u8 module) { char pmlp_pl[MLXSW_REG_PMLP_LEN]; mlxsw_reg_pmlp_pack(pmlp_pl, local_port); mlxsw_reg_pmlp_width_set(pmlp_pl, 0); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); - mlxsw_env_module_port_unmap(mlxsw_sp->core, module); + mlxsw_env_module_port_unmap(mlxsw_sp->core, slot_index, module); } static int mlxsw_sp_port_open(struct net_device *dev) @@ -576,6 +605,7 @@ static int mlxsw_sp_port_open(struct net_device *dev) int err; err = mlxsw_env_module_port_up(mlxsw_sp->core, + mlxsw_sp_port->mapping.slot_index, mlxsw_sp_port->mapping.module); if (err) return err; @@ -587,6 +617,7 @@ static int mlxsw_sp_port_open(struct net_device *dev) err_port_admin_status_set: mlxsw_env_module_port_down(mlxsw_sp->core, + mlxsw_sp_port->mapping.slot_index, mlxsw_sp_port->mapping.module); return err; } @@ -599,6 +630,7 @@ static int mlxsw_sp_port_stop(struct net_device *dev) netif_stop_queue(dev); mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); mlxsw_env_module_port_down(mlxsw_sp->core, + mlxsw_sp_port->mapping.slot_index, mlxsw_sp_port->mapping.module); return 0; } @@ -1445,12 +1477,13 @@ static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 slot_index = mlxsw_sp_port->mapping.slot_index; u8 module = mlxsw_sp_port->mapping.module; u64 overheat_counter; int err; - err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, module, - &overheat_counter); + err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, slot_index, + module, &overheat_counter); if (err) return err; @@ -1525,7 +1558,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port, } splittable = lanes > 1 && !split; - err = mlxsw_core_port_init(mlxsw_sp->core, local_port, + err = mlxsw_core_port_init(mlxsw_sp->core, local_port, slot_index, port_number, split, split_port_subnumber, splittable, lanes, mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac)); @@ -1775,13 +1808,16 @@ err_port_label_info_get: mlxsw_sp_port_swid_set(mlxsw_sp, local_port, MLXSW_PORT_SWID_DISABLED_PORT); err_port_swid_set: - mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, port_mapping->module); + mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, + port_mapping->slot_index, + port_mapping->module); return err; } static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port) { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; + u8 slot_index = mlxsw_sp_port->mapping.slot_index; u8 module = mlxsw_sp_port->mapping.module; cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); @@ -1804,7 +1840,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port) mlxsw_core_port_fini(mlxsw_sp->core, local_port); mlxsw_sp_port_swid_set(mlxsw_sp, local_port, MLXSW_PORT_SWID_DISABLED_PORT); - mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, module); + mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, slot_index, module); } static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) @@ -1858,21 +1894,148 @@ static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port) return mlxsw_sp->ports[local_port] != NULL; } +static int mlxsw_sp_port_mapping_event_set(struct mlxsw_sp *mlxsw_sp, + u16 local_port, bool enable) +{ + char pmecr_pl[MLXSW_REG_PMECR_LEN]; + + mlxsw_reg_pmecr_pack(pmecr_pl, local_port, + enable ? MLXSW_REG_PMECR_E_GENERATE_EVENT : + MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmecr), pmecr_pl); +} + +struct mlxsw_sp_port_mapping_event { + struct list_head list; + char pmlp_pl[MLXSW_REG_PMLP_LEN]; +}; + +static void mlxsw_sp_port_mapping_events_work(struct work_struct *work) +{ + struct mlxsw_sp_port_mapping_event *event, *next_event; + struct mlxsw_sp_port_mapping_events *events; + struct mlxsw_sp_port_mapping port_mapping; + struct mlxsw_sp *mlxsw_sp; + struct devlink *devlink; + LIST_HEAD(event_queue); + u16 local_port; + int err; + + events = container_of(work, struct mlxsw_sp_port_mapping_events, work); + mlxsw_sp = container_of(events, struct mlxsw_sp, port_mapping_events); + devlink = priv_to_devlink(mlxsw_sp->core); + + spin_lock_bh(&events->queue_lock); + list_splice_init(&events->queue, &event_queue); + spin_unlock_bh(&events->queue_lock); + + list_for_each_entry_safe(event, next_event, &event_queue, list) { + local_port = mlxsw_reg_pmlp_local_port_get(event->pmlp_pl); + err = mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port, + event->pmlp_pl, &port_mapping); + if (err) + goto out; + + if (WARN_ON_ONCE(!port_mapping.width)) + goto out; + + devl_lock(devlink); + + if (!mlxsw_sp_port_created(mlxsw_sp, local_port)) + mlxsw_sp_port_create(mlxsw_sp, local_port, + false, &port_mapping); + else + WARN_ON_ONCE(1); + + devl_unlock(devlink); + + mlxsw_sp->port_mapping[local_port] = port_mapping; + +out: + kfree(event); + } +} + +static void +mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info *reg, + char *pmlp_pl, void *priv) +{ + struct mlxsw_sp_port_mapping_events *events; + struct mlxsw_sp_port_mapping_event *event; + struct mlxsw_sp *mlxsw_sp = priv; + u16 local_port; + + local_port = mlxsw_reg_pmlp_local_port_get(pmlp_pl); + if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port))) + return; + + events = &mlxsw_sp->port_mapping_events; + event = kmalloc(sizeof(*event), GFP_ATOMIC); + if (!event) + return; + memcpy(event->pmlp_pl, pmlp_pl, sizeof(event->pmlp_pl)); + spin_lock(&events->queue_lock); + list_add_tail(&event->list, &events->queue); + spin_unlock(&events->queue_lock); + mlxsw_core_schedule_work(&events->work); +} + +static void +__mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_port_mapping_event *event, *next_event; + struct mlxsw_sp_port_mapping_events *events; + + events = &mlxsw_sp->port_mapping_events; + + /* Caller needs to make sure that no new event is going to appear. */ + cancel_work_sync(&events->work); + list_for_each_entry_safe(event, next_event, &events->queue, list) { + list_del(&event->list); + kfree(event); + } +} + static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) { + unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); int i; - for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) + for (i = 1; i < max_ports; i++) + mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false); + /* Make sure all scheduled events are processed */ + __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp); + + devl_lock(devlink); + for (i = 1; i < max_ports; i++) if (mlxsw_sp_port_created(mlxsw_sp, i)) mlxsw_sp_port_remove(mlxsw_sp, i); mlxsw_sp_cpu_port_remove(mlxsw_sp); + devl_unlock(devlink); kfree(mlxsw_sp->ports); mlxsw_sp->ports = NULL; } +static void +mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core, + bool (*selector)(void *priv, u16 local_port), + void *priv) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core); + int i; + + for (i = 1; i < max_ports; i++) + if (mlxsw_sp_port_created(mlxsw_sp, i) && selector(priv, i)) + mlxsw_sp_port_remove(mlxsw_sp, i); +} + static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) { unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + struct mlxsw_sp_port_mapping_events *events; struct mlxsw_sp_port_mapping *port_mapping; size_t alloc_size; int i; @@ -1883,26 +2046,46 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) if (!mlxsw_sp->ports) return -ENOMEM; + events = &mlxsw_sp->port_mapping_events; + INIT_LIST_HEAD(&events->queue); + spin_lock_init(&events->queue_lock); + INIT_WORK(&events->work, mlxsw_sp_port_mapping_events_work); + + for (i = 1; i < max_ports; i++) { + err = mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, true); + if (err) + goto err_event_enable; + } + + devl_lock(devlink); err = mlxsw_sp_cpu_port_create(mlxsw_sp); if (err) goto err_cpu_port_create; for (i = 1; i < max_ports; i++) { - port_mapping = mlxsw_sp->port_mapping[i]; - if (!port_mapping) + port_mapping = &mlxsw_sp->port_mapping[i]; + if (!port_mapping->width) continue; err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping); if (err) goto err_port_create; } + devl_unlock(devlink); return 0; err_port_create: for (i--; i >= 1; i--) if (mlxsw_sp_port_created(mlxsw_sp, i)) mlxsw_sp_port_remove(mlxsw_sp, i); + i = max_ports; mlxsw_sp_cpu_port_remove(mlxsw_sp); err_cpu_port_create: + devl_unlock(devlink); +err_event_enable: + for (i--; i >= 1; i--) + mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false); + /* Make sure all scheduled events are processed */ + __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp); kfree(mlxsw_sp->ports); mlxsw_sp->ports = NULL; return err; @@ -1911,12 +2094,12 @@ err_cpu_port_create: static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) { unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); - struct mlxsw_sp_port_mapping port_mapping; + struct mlxsw_sp_port_mapping *port_mapping; int i; int err; mlxsw_sp->port_mapping = kcalloc(max_ports, - sizeof(struct mlxsw_sp_port_mapping *), + sizeof(struct mlxsw_sp_port_mapping), GFP_KERNEL); if (!mlxsw_sp->port_mapping) return -ENOMEM; @@ -1925,36 +2108,20 @@ static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) if (mlxsw_core_port_is_xm(mlxsw_sp->core, i)) continue; - err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping); + port_mapping = &mlxsw_sp->port_mapping[i]; + err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, port_mapping); if (err) goto err_port_module_info_get; - if (!port_mapping.width) - continue; - - mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping, - sizeof(port_mapping), - GFP_KERNEL); - if (!mlxsw_sp->port_mapping[i]) { - err = -ENOMEM; - goto err_port_module_info_dup; - } } return 0; err_port_module_info_get: -err_port_module_info_dup: - for (i--; i >= 1; i--) - kfree(mlxsw_sp->port_mapping[i]); kfree(mlxsw_sp->port_mapping); return err; } static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) { - int i; - - for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) - kfree(mlxsw_sp->port_mapping[i]); kfree(mlxsw_sp->port_mapping); } @@ -2004,8 +2171,8 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, for (i = 0; i < count; i++) { u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); - port_mapping = mlxsw_sp->port_mapping[local_port]; - if (!port_mapping || !mlxsw_sp_local_port_valid(local_port)) + port_mapping = &mlxsw_sp->port_mapping[local_port]; + if (!port_mapping->width || !mlxsw_sp_local_port_valid(local_port)) continue; mlxsw_sp_port_create(mlxsw_sp, local_port, false, port_mapping); @@ -2045,7 +2212,8 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port, return -EINVAL; } - mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module, + mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index, + mlxsw_sp_port->mapping.module, mlxsw_sp_port->mapping.module_width / count, count); err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); @@ -2080,6 +2248,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port, err_port_split_create: mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl); + return err; } @@ -2109,7 +2278,8 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port, count = mlxsw_sp_port->mapping.module_width / mlxsw_sp_port->mapping.width; - mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module, + mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index, + mlxsw_sp_port->mapping.module, mlxsw_sp_port->mapping.module_width / count, count); err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); @@ -2300,6 +2470,11 @@ static const struct mlxsw_listener mlxsw_sp1_listener[] = { MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), }; +static const struct mlxsw_listener mlxsw_sp2_listener[] = { + /* Events */ + MLXSW_SP_EVENTL(mlxsw_sp_port_mapping_listener_func, PMLPE), +}; + static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); @@ -2818,7 +2993,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); - struct devlink *devlink = priv_to_devlink(mlxsw_core); int err; mlxsw_sp->core = mlxsw_core; @@ -2979,9 +3153,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_sample_trigger_init; } - devl_lock(devlink); err = mlxsw_sp_ports_create(mlxsw_sp); - devl_unlock(devlink); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); goto err_ports_create; @@ -3094,6 +3266,8 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; + mlxsw_sp->listeners = mlxsw_sp2_listener; + mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener); mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2; return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); @@ -3124,6 +3298,8 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; + mlxsw_sp->listeners = mlxsw_sp2_listener; + mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener); mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3; return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); @@ -3154,6 +3330,8 @@ static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; + mlxsw_sp->listeners = mlxsw_sp2_listener; + mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener); mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4; return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); @@ -3162,12 +3340,8 @@ static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core, static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); - struct devlink *devlink = priv_to_devlink(mlxsw_core); - devl_lock(devlink); mlxsw_sp_ports_remove(mlxsw_sp); - devl_unlock(devlink); - rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); mlxsw_sp_port_module_info_fini(mlxsw_sp); mlxsw_sp_dpipe_fini(mlxsw_sp); @@ -3645,6 +3819,7 @@ static struct mlxsw_driver mlxsw_sp2_driver = { .fini = mlxsw_sp_fini, .port_split = mlxsw_sp_port_split, .port_unsplit = mlxsw_sp_port_unsplit, + .ports_remove_selected = mlxsw_sp_ports_remove_selected, .sb_pool_get = mlxsw_sp_sb_pool_get, .sb_pool_set = mlxsw_sp_sb_pool_set, .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, @@ -3682,6 +3857,7 @@ static struct mlxsw_driver mlxsw_sp3_driver = { .fini = mlxsw_sp_fini, .port_split = mlxsw_sp_port_split, .port_unsplit = mlxsw_sp_port_unsplit, + .ports_remove_selected = mlxsw_sp_ports_remove_selected, .sb_pool_get = mlxsw_sp_sb_pool_get, .sb_pool_set = mlxsw_sp_sb_pool_set, .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, @@ -3717,6 +3893,7 @@ static struct mlxsw_driver mlxsw_sp4_driver = { .fini = mlxsw_sp_fini, .port_split = mlxsw_sp_port_split, .port_unsplit = mlxsw_sp_port_unsplit, + .ports_remove_selected = mlxsw_sp_ports_remove_selected, .sb_pool_get = mlxsw_sp_sb_pool_get, .sb_pool_set = mlxsw_sp_sb_pool_set, .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, @@ -5024,3 +5201,4 @@ MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table); MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME); MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME); +MODULE_FIRMWARE(MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 20588e699588..2ad29ae1c640 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -145,11 +145,18 @@ struct mlxsw_sp_mall_entry; struct mlxsw_sp_port_mapping { u8 module; + u8 slot_index; u8 width; /* Number of lanes used by the port */ u8 module_width; /* Number of lanes in the module (static) */ u8 lane; }; +struct mlxsw_sp_port_mapping_events { + struct list_head queue; + spinlock_t queue_lock; /* protects queue */ + struct work_struct work; +}; + struct mlxsw_sp_parsing { refcount_t parsing_depth_ref; u16 parsing_depth; @@ -164,7 +171,8 @@ struct mlxsw_sp { unsigned char base_mac[ETH_ALEN]; const unsigned char *mac_mask; struct mlxsw_sp_upper *lags; - struct mlxsw_sp_port_mapping **port_mapping; + struct mlxsw_sp_port_mapping *port_mapping; + struct mlxsw_sp_port_mapping_events port_mapping_events; struct rhashtable sample_trigger_ht; struct mlxsw_sp_sb *sb; struct mlxsw_sp_bridge *bridge; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 98f26f596e30..c68fc8f7ca99 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -202,6 +202,21 @@ static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index, return 0; } +static int mlxsw_sp_sb_pr_desc_write(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_reg_sbxx_dir dir, + enum mlxsw_reg_sbpr_mode mode, + u32 size, bool infi_size) +{ + char sbpr_pl[MLXSW_REG_SBPR_LEN]; + + /* The FW default descriptor buffer configuration uses only pool 14 for + * descriptors. + */ + mlxsw_reg_sbpr_pack(sbpr_pl, 14, dir, mode, size, infi_size); + mlxsw_reg_sbpr_desc_set(sbpr_pl, true); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl); +} + static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u16 local_port, u8 pg_buff, u32 min_buff, u32 max_buff, bool infi_max, u16 pool_index) @@ -775,6 +790,17 @@ static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp, if (err) return err; } + + err = mlxsw_sp_sb_pr_desc_write(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS, + MLXSW_REG_SBPR_MODE_DYNAMIC, 0, true); + if (err) + return err; + + err = mlxsw_sp_sb_pr_desc_write(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS, + MLXSW_REG_SBPR_MODE_DYNAMIC, 0, true); + if (err) + return err; + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c index 8b5d7f83b9b0..915dffb85a1c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c @@ -568,14 +568,14 @@ struct mlxsw_sp_port_stats { static u64 mlxsw_sp_port_get_transceiver_overheat_stats(struct mlxsw_sp_port *mlxsw_sp_port) { - struct mlxsw_sp_port_mapping port_mapping = mlxsw_sp_port->mapping; struct mlxsw_core *mlxsw_core = mlxsw_sp_port->mlxsw_sp->core; + u8 slot_index = mlxsw_sp_port->mapping.slot_index; + u8 module = mlxsw_sp_port->mapping.module; u64 stats; int err; - err = mlxsw_env_module_overheat_counter_get(mlxsw_core, - port_mapping.module, - &stats); + err = mlxsw_env_module_overheat_counter_get(mlxsw_core, slot_index, + module, &stats); if (err) return mlxsw_sp_port->module_overheat_initial_val; @@ -1036,6 +1036,7 @@ static int mlxsw_sp_get_module_info(struct net_device *netdev, struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; return mlxsw_env_get_module_info(netdev, mlxsw_sp->core, + mlxsw_sp_port->mapping.slot_index, mlxsw_sp_port->mapping.module, modinfo); } @@ -1045,10 +1046,11 @@ static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 slot_index = mlxsw_sp_port->mapping.slot_index; + u8 module = mlxsw_sp_port->mapping.module; - return mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, - mlxsw_sp_port->mapping.module, ee, - data); + return mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, slot_index, + module, ee, data); } static int @@ -1058,10 +1060,11 @@ mlxsw_sp_get_module_eeprom_by_page(struct net_device *dev, { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 slot_index = mlxsw_sp_port->mapping.slot_index; u8 module = mlxsw_sp_port->mapping.module; - return mlxsw_env_get_module_eeprom_by_page(mlxsw_sp->core, module, page, - extack); + return mlxsw_env_get_module_eeprom_by_page(mlxsw_sp->core, slot_index, + module, page, extack); } static int @@ -1202,9 +1205,11 @@ static int mlxsw_sp_reset(struct net_device *dev, u32 *flags) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 slot_index = mlxsw_sp_port->mapping.slot_index; u8 module = mlxsw_sp_port->mapping.module; - return mlxsw_env_reset_module(dev, mlxsw_sp->core, module, flags); + return mlxsw_env_reset_module(dev, mlxsw_sp->core, slot_index, + module, flags); } static int @@ -1214,10 +1219,11 @@ mlxsw_sp_get_module_power_mode(struct net_device *dev, { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 slot_index = mlxsw_sp_port->mapping.slot_index; u8 module = mlxsw_sp_port->mapping.module; - return mlxsw_env_get_module_power_mode(mlxsw_sp->core, module, params, - extack); + return mlxsw_env_get_module_power_mode(mlxsw_sp->core, slot_index, + module, params, extack); } static int @@ -1227,10 +1233,11 @@ mlxsw_sp_set_module_power_mode(struct net_device *dev, { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 slot_index = mlxsw_sp_port->mapping.slot_index; u8 module = mlxsw_sp_port->mapping.module; - return mlxsw_env_set_module_power_mode(mlxsw_sp->core, module, - params->policy, extack); + return mlxsw_env_set_module_power_mode(mlxsw_sp->core, slot_index, + module, params->policy, extack); } const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 79fd486e29e3..dc820d9f2696 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -21,6 +21,7 @@ #include <net/netevent.h> #include <net/neighbour.h> #include <net/arp.h> +#include <net/inet_dscp.h> #include <net/ip_fib.h> #include <net/ip6_fib.h> #include <net/nexthop.h> @@ -507,7 +508,7 @@ struct mlxsw_sp_fib4_entry { struct mlxsw_sp_fib_entry common; struct fib_info *fi; u32 tb_id; - u8 tos; + dscp_t dscp; u8 type; }; @@ -5559,7 +5560,7 @@ mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry) fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry, common); - return !fib4_entry->tos; + return !fib4_entry->dscp; } static bool @@ -5620,7 +5621,7 @@ mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp, fri.tb_id = fen_info->tb_id; fri.dst = cpu_to_be32(*p_dst); fri.dst_len = fen_info->dst_len; - fri.tos = fen_info->tos; + fri.dscp = fen_info->dscp; fri.type = fen_info->type; fri.offload = false; fri.trap = false; @@ -5645,7 +5646,7 @@ mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp, fri.tb_id = fib4_entry->tb_id; fri.dst = cpu_to_be32(*p_dst); fri.dst_len = dst_len; - fri.tos = fib4_entry->tos; + fri.dscp = fib4_entry->dscp; fri.type = fib4_entry->type; fri.offload = should_offload; fri.trap = !should_offload; @@ -5668,7 +5669,7 @@ mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp, fri.tb_id = fib4_entry->tb_id; fri.dst = cpu_to_be32(*p_dst); fri.dst_len = dst_len; - fri.tos = fib4_entry->tos; + fri.dscp = fib4_entry->dscp; fri.type = fib4_entry->type; fri.offload = false; fri.trap = false; @@ -6250,7 +6251,7 @@ mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp, fib_info_hold(fib4_entry->fi); fib4_entry->tb_id = fen_info->tb_id; fib4_entry->type = fen_info->type; - fib4_entry->tos = fen_info->tos; + fib4_entry->dscp = fen_info->dscp; fib_entry->fib_node = fib_node; @@ -6304,7 +6305,7 @@ mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp, fib4_entry = container_of(fib_node->fib_entry, struct mlxsw_sp_fib4_entry, common); if (fib4_entry->tb_id == fen_info->tb_id && - fib4_entry->tos == fen_info->tos && + fib4_entry->dscp == fen_info->dscp && fib4_entry->type == fen_info->type && fib4_entry->fi == fen_info->fi) return fib4_entry; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index b73466470f75..fe663b0ab708 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -423,7 +423,7 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev, parms = mlxsw_sp_ipip_netdev_parms4(to_dev); ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp, - 0, 0, parms.link, tun->fwmark, 0); + 0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0); rt = ip_route_output_key(tun->net, &fl4); if (IS_ERR(rt)) diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 9e070ab3ed76..d888498aed33 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -133,6 +133,12 @@ enum mlxsw_event_trap_id { MLXSW_TRAP_ID_PTP_ING_FIFO = 0x2D, /* PTP Egress FIFO has a new entry */ MLXSW_TRAP_ID_PTP_EGR_FIFO = 0x2E, + /* Downstream Device Status Change */ + MLXSW_TRAP_ID_DSDSC = 0x321, + /* Binary Code Transfer Operation Executed Event */ + MLXSW_TRAP_ID_BCTOE = 0x322, + /* Port mapping change */ + MLXSW_TRAP_ID_PMLPE = 0x32E, }; #endif /* _MLXSW_TRAP_H */ diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig index 93df3049cdc0..830363bafcce 100644 --- a/drivers/net/ethernet/micrel/Kconfig +++ b/drivers/net/ethernet/micrel/Kconfig @@ -28,6 +28,7 @@ config KS8842 config KS8851 tristate "Micrel KS8851 SPI" depends on SPI + depends on PTP_1588_CLOCK_OPTIONAL select MII select CRC32 select EEPROM_93CX6 @@ -39,6 +40,7 @@ config KS8851 config KS8851_MLL tristate "Micrel KS8851 MLL" depends on HAS_IOMEM + depends on PTP_1588_CLOCK_OPTIONAL select MII select CRC32 select EEPROM_93CX6 diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile index a9ffc719aa0e..fd2e0ebb2427 100644 --- a/drivers/net/ethernet/microchip/lan966x/Makefile +++ b/drivers/net/ethernet/microchip/lan966x/Makefile @@ -8,4 +8,4 @@ obj-$(CONFIG_LAN966X_SWITCH) += lan966x-switch.o lan966x-switch-objs := lan966x_main.o lan966x_phylink.o lan966x_port.o \ lan966x_mac.o lan966x_ethtool.o lan966x_switchdev.o \ lan966x_vlan.o lan966x_fdb.o lan966x_mdb.o \ - lan966x_ptp.o + lan966x_ptp.o lan966x_fdma.o diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c new file mode 100644 index 000000000000..9e2a7323eaf0 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c @@ -0,0 +1,842 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "lan966x_main.h" + +static int lan966x_fdma_channel_active(struct lan966x *lan966x) +{ + return lan_rd(lan966x, FDMA_CH_ACTIVE); +} + +static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx, + struct lan966x_db *db) +{ + struct lan966x *lan966x = rx->lan966x; + dma_addr_t dma_addr; + struct page *page; + + page = dev_alloc_pages(rx->page_order); + if (unlikely(!page)) + return NULL; + + dma_addr = dma_map_page(lan966x->dev, page, 0, + PAGE_SIZE << rx->page_order, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(lan966x->dev, dma_addr))) + goto free_page; + + db->dataptr = dma_addr; + + return page; + +free_page: + __free_pages(page, rx->page_order); + return NULL; +} + +static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx) +{ + struct lan966x *lan966x = rx->lan966x; + struct lan966x_rx_dcb *dcb; + struct lan966x_db *db; + int i, j; + + for (i = 0; i < FDMA_DCB_MAX; ++i) { + dcb = &rx->dcbs[i]; + + for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) { + db = &dcb->db[j]; + dma_unmap_single(lan966x->dev, + (dma_addr_t)db->dataptr, + PAGE_SIZE << rx->page_order, + DMA_FROM_DEVICE); + __free_pages(rx->page[i][j], rx->page_order); + } + } +} + +static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx, + struct lan966x_rx_dcb *dcb, + u64 nextptr) +{ + struct lan966x_db *db; + int i; + + for (i = 0; i < FDMA_RX_DCB_MAX_DBS; ++i) { + db = &dcb->db[i]; + db->status = FDMA_DCB_STATUS_INTR; + } + + dcb->nextptr = FDMA_DCB_INVALID_DATA; + dcb->info = FDMA_DCB_INFO_DATAL(PAGE_SIZE << rx->page_order); + + rx->last_entry->nextptr = nextptr; + rx->last_entry = dcb; +} + +static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx) +{ + struct lan966x *lan966x = rx->lan966x; + struct lan966x_rx_dcb *dcb; + struct lan966x_db *db; + struct page *page; + int i, j; + int size; + + /* calculate how many pages are needed to allocate the dcbs */ + size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX; + size = ALIGN(size, PAGE_SIZE); + + rx->dcbs = dma_alloc_coherent(lan966x->dev, size, &rx->dma, GFP_KERNEL); + if (!rx->dcbs) + return -ENOMEM; + + rx->last_entry = rx->dcbs; + rx->db_index = 0; + rx->dcb_index = 0; + + /* Now for each dcb allocate the dbs */ + for (i = 0; i < FDMA_DCB_MAX; ++i) { + dcb = &rx->dcbs[i]; + dcb->info = 0; + + /* For each db allocate a page and map it to the DB dataptr. */ + for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) { + db = &dcb->db[j]; + page = lan966x_fdma_rx_alloc_page(rx, db); + if (!page) + return -ENOMEM; + + db->status = 0; + rx->page[i][j] = page; + } + + lan966x_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * i); + } + + return 0; +} + +static void lan966x_fdma_rx_free(struct lan966x_rx *rx) +{ + struct lan966x *lan966x = rx->lan966x; + u32 size; + + /* Now it is possible to do the cleanup of dcb */ + size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX; + size = ALIGN(size, PAGE_SIZE); + dma_free_coherent(lan966x->dev, size, rx->dcbs, rx->dma); +} + +static void lan966x_fdma_rx_start(struct lan966x_rx *rx) +{ + struct lan966x *lan966x = rx->lan966x; + u32 mask; + + /* When activating a channel, first is required to write the first DCB + * address and then to activate it + */ + lan_wr(lower_32_bits((u64)rx->dma), lan966x, + FDMA_DCB_LLP(rx->channel_id)); + lan_wr(upper_32_bits((u64)rx->dma), lan966x, + FDMA_DCB_LLP1(rx->channel_id)); + + lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) | + FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | + FDMA_CH_CFG_CH_INJ_PORT_SET(0) | + FDMA_CH_CFG_CH_MEM_SET(1), + lan966x, FDMA_CH_CFG(rx->channel_id)); + + /* Start fdma */ + lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0), + FDMA_PORT_CTRL_XTR_STOP, + lan966x, FDMA_PORT_CTRL(0)); + + /* Enable interrupts */ + mask = lan_rd(lan966x, FDMA_INTR_DB_ENA); + mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask); + mask |= BIT(rx->channel_id); + lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask), + FDMA_INTR_DB_ENA_INTR_DB_ENA, + lan966x, FDMA_INTR_DB_ENA); + + /* Activate the channel */ + lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(rx->channel_id)), + FDMA_CH_ACTIVATE_CH_ACTIVATE, + lan966x, FDMA_CH_ACTIVATE); +} + +static void lan966x_fdma_rx_disable(struct lan966x_rx *rx) +{ + struct lan966x *lan966x = rx->lan966x; + u32 val; + + /* Disable the channel */ + lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(rx->channel_id)), + FDMA_CH_DISABLE_CH_DISABLE, + lan966x, FDMA_CH_DISABLE); + + readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x, + val, !(val & BIT(rx->channel_id)), + READL_SLEEP_US, READL_TIMEOUT_US); + + lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(rx->channel_id)), + FDMA_CH_DB_DISCARD_DB_DISCARD, + lan966x, FDMA_CH_DB_DISCARD); +} + +static void lan966x_fdma_rx_reload(struct lan966x_rx *rx) +{ + struct lan966x *lan966x = rx->lan966x; + + lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->channel_id)), + FDMA_CH_RELOAD_CH_RELOAD, + lan966x, FDMA_CH_RELOAD); +} + +static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx, + struct lan966x_tx_dcb *dcb) +{ + dcb->nextptr = FDMA_DCB_INVALID_DATA; + dcb->info = 0; +} + +static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx) +{ + struct lan966x *lan966x = tx->lan966x; + struct lan966x_tx_dcb *dcb; + struct lan966x_db *db; + int size; + int i, j; + + tx->dcbs_buf = kcalloc(FDMA_DCB_MAX, sizeof(struct lan966x_tx_dcb_buf), + GFP_KERNEL); + if (!tx->dcbs_buf) + return -ENOMEM; + + /* calculate how many pages are needed to allocate the dcbs */ + size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX; + size = ALIGN(size, PAGE_SIZE); + tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL); + if (!tx->dcbs) + goto out; + + /* Now for each dcb allocate the db */ + for (i = 0; i < FDMA_DCB_MAX; ++i) { + dcb = &tx->dcbs[i]; + + for (j = 0; j < FDMA_TX_DCB_MAX_DBS; ++j) { + db = &dcb->db[j]; + db->dataptr = 0; + db->status = 0; + } + + lan966x_fdma_tx_add_dcb(tx, dcb); + } + + return 0; + +out: + kfree(tx->dcbs_buf); + return -ENOMEM; +} + +static void lan966x_fdma_tx_free(struct lan966x_tx *tx) +{ + struct lan966x *lan966x = tx->lan966x; + int size; + + kfree(tx->dcbs_buf); + + size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX; + size = ALIGN(size, PAGE_SIZE); + dma_free_coherent(lan966x->dev, size, tx->dcbs, tx->dma); +} + +static void lan966x_fdma_tx_activate(struct lan966x_tx *tx) +{ + struct lan966x *lan966x = tx->lan966x; + u32 mask; + + /* When activating a channel, first is required to write the first DCB + * address and then to activate it + */ + lan_wr(lower_32_bits((u64)tx->dma), lan966x, + FDMA_DCB_LLP(tx->channel_id)); + lan_wr(upper_32_bits((u64)tx->dma), lan966x, + FDMA_DCB_LLP1(tx->channel_id)); + + lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) | + FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | + FDMA_CH_CFG_CH_INJ_PORT_SET(0) | + FDMA_CH_CFG_CH_MEM_SET(1), + lan966x, FDMA_CH_CFG(tx->channel_id)); + + /* Start fdma */ + lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0), + FDMA_PORT_CTRL_INJ_STOP, + lan966x, FDMA_PORT_CTRL(0)); + + /* Enable interrupts */ + mask = lan_rd(lan966x, FDMA_INTR_DB_ENA); + mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask); + mask |= BIT(tx->channel_id); + lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask), + FDMA_INTR_DB_ENA_INTR_DB_ENA, + lan966x, FDMA_INTR_DB_ENA); + + /* Activate the channel */ + lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(tx->channel_id)), + FDMA_CH_ACTIVATE_CH_ACTIVATE, + lan966x, FDMA_CH_ACTIVATE); +} + +static void lan966x_fdma_tx_disable(struct lan966x_tx *tx) +{ + struct lan966x *lan966x = tx->lan966x; + u32 val; + + /* Disable the channel */ + lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(tx->channel_id)), + FDMA_CH_DISABLE_CH_DISABLE, + lan966x, FDMA_CH_DISABLE); + + readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x, + val, !(val & BIT(tx->channel_id)), + READL_SLEEP_US, READL_TIMEOUT_US); + + lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(tx->channel_id)), + FDMA_CH_DB_DISCARD_DB_DISCARD, + lan966x, FDMA_CH_DB_DISCARD); + + tx->activated = false; +} + +static void lan966x_fdma_tx_reload(struct lan966x_tx *tx) +{ + struct lan966x *lan966x = tx->lan966x; + + /* Write the registers to reload the channel */ + lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->channel_id)), + FDMA_CH_RELOAD_CH_RELOAD, + lan966x, FDMA_CH_RELOAD); +} + +static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x) +{ + struct lan966x_port *port; + int i; + + for (i = 0; i < lan966x->num_phys_ports; ++i) { + port = lan966x->ports[i]; + if (!port) + continue; + + if (netif_queue_stopped(port->dev)) + netif_wake_queue(port->dev); + } +} + +static void lan966x_fdma_stop_netdev(struct lan966x *lan966x) +{ + struct lan966x_port *port; + int i; + + for (i = 0; i < lan966x->num_phys_ports; ++i) { + port = lan966x->ports[i]; + if (!port) + continue; + + netif_stop_queue(port->dev); + } +} + +static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight) +{ + struct lan966x_tx *tx = &lan966x->tx; + struct lan966x_tx_dcb_buf *dcb_buf; + struct lan966x_db *db; + unsigned long flags; + bool clear = false; + int i; + + spin_lock_irqsave(&lan966x->tx_lock, flags); + for (i = 0; i < FDMA_DCB_MAX; ++i) { + dcb_buf = &tx->dcbs_buf[i]; + + if (!dcb_buf->used) + continue; + + db = &tx->dcbs[i].db[0]; + if (!(db->status & FDMA_DCB_STATUS_DONE)) + continue; + + dcb_buf->dev->stats.tx_packets++; + dcb_buf->dev->stats.tx_bytes += dcb_buf->skb->len; + + dcb_buf->used = false; + dma_unmap_single(lan966x->dev, + dcb_buf->dma_addr, + dcb_buf->skb->len, + DMA_TO_DEVICE); + if (!dcb_buf->ptp) + dev_kfree_skb_any(dcb_buf->skb); + + clear = true; + } + + if (clear) + lan966x_fdma_wakeup_netdev(lan966x); + + spin_unlock_irqrestore(&lan966x->tx_lock, flags); +} + +static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx) +{ + struct lan966x_db *db; + + /* Check if there is any data */ + db = &rx->dcbs[rx->dcb_index].db[rx->db_index]; + if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE))) + return false; + + return true; +} + +static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx) +{ + struct lan966x *lan966x = rx->lan966x; + u64 src_port, timestamp; + struct lan966x_db *db; + struct sk_buff *skb; + struct page *page; + + /* Get the received frame and unmap it */ + db = &rx->dcbs[rx->dcb_index].db[rx->db_index]; + page = rx->page[rx->dcb_index][rx->db_index]; + skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order); + if (unlikely(!skb)) + goto unmap_page; + + dma_unmap_single(lan966x->dev, (dma_addr_t)db->dataptr, + FDMA_DCB_STATUS_BLOCKL(db->status), + DMA_FROM_DEVICE); + skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status)); + + lan966x_ifh_get_src_port(skb->data, &src_port); + lan966x_ifh_get_timestamp(skb->data, ×tamp); + + WARN_ON(src_port >= lan966x->num_phys_ports); + + skb->dev = lan966x->ports[src_port]->dev; + skb_pull(skb, IFH_LEN * sizeof(u32)); + + if (likely(!(skb->dev->features & NETIF_F_RXFCS))) + skb_trim(skb, skb->len - ETH_FCS_LEN); + + lan966x_ptp_rxtstamp(lan966x, skb, timestamp); + skb->protocol = eth_type_trans(skb, skb->dev); + + if (lan966x->bridge_mask & BIT(src_port)) { + skb->offload_fwd_mark = 1; + + skb_reset_network_header(skb); + if (!lan966x_hw_offload(lan966x, src_port, skb)) + skb->offload_fwd_mark = 0; + } + + skb->dev->stats.rx_bytes += skb->len; + skb->dev->stats.rx_packets++; + + return skb; + +unmap_page: + dma_unmap_page(lan966x->dev, (dma_addr_t)db->dataptr, + FDMA_DCB_STATUS_BLOCKL(db->status), + DMA_FROM_DEVICE); + __free_pages(page, rx->page_order); + + return NULL; +} + +static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight) +{ + struct lan966x *lan966x = container_of(napi, struct lan966x, napi); + struct lan966x_rx *rx = &lan966x->rx; + int dcb_reload = rx->dcb_index; + struct lan966x_rx_dcb *old_dcb; + struct lan966x_db *db; + struct sk_buff *skb; + struct page *page; + int counter = 0; + u64 nextptr; + + lan966x_fdma_tx_clear_buf(lan966x, weight); + + /* Get all received skb */ + while (counter < weight) { + if (!lan966x_fdma_rx_more_frames(rx)) + break; + + skb = lan966x_fdma_rx_get_frame(rx); + + rx->page[rx->dcb_index][rx->db_index] = NULL; + rx->dcb_index++; + rx->dcb_index &= FDMA_DCB_MAX - 1; + + if (!skb) + break; + + napi_gro_receive(&lan966x->napi, skb); + counter++; + } + + /* Allocate new pages and map them */ + while (dcb_reload != rx->dcb_index) { + db = &rx->dcbs[dcb_reload].db[rx->db_index]; + page = lan966x_fdma_rx_alloc_page(rx, db); + if (unlikely(!page)) + break; + rx->page[dcb_reload][rx->db_index] = page; + + old_dcb = &rx->dcbs[dcb_reload]; + dcb_reload++; + dcb_reload &= FDMA_DCB_MAX - 1; + + nextptr = rx->dma + ((unsigned long)old_dcb - + (unsigned long)rx->dcbs); + lan966x_fdma_rx_add_dcb(rx, old_dcb, nextptr); + lan966x_fdma_rx_reload(rx); + } + + if (counter < weight && napi_complete_done(napi, counter)) + lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA); + + return counter; +} + +irqreturn_t lan966x_fdma_irq_handler(int irq, void *args) +{ + struct lan966x *lan966x = args; + u32 db, err, err_type; + + db = lan_rd(lan966x, FDMA_INTR_DB); + err = lan_rd(lan966x, FDMA_INTR_ERR); + + if (db) { + lan_wr(0, lan966x, FDMA_INTR_DB_ENA); + lan_wr(db, lan966x, FDMA_INTR_DB); + + napi_schedule(&lan966x->napi); + } + + if (err) { + err_type = lan_rd(lan966x, FDMA_ERRORS); + + WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type); + + lan_wr(err, lan966x, FDMA_INTR_ERR); + lan_wr(err_type, lan966x, FDMA_ERRORS); + } + + return IRQ_HANDLED; +} + +static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx) +{ + struct lan966x_tx_dcb_buf *dcb_buf; + int i; + + for (i = 0; i < FDMA_DCB_MAX; ++i) { + dcb_buf = &tx->dcbs_buf[i]; + if (!dcb_buf->used && i != tx->last_in_use) + return i; + } + + return -1; +} + +int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + struct lan966x_tx_dcb_buf *next_dcb_buf; + struct lan966x_tx_dcb *next_dcb, *dcb; + struct lan966x_tx *tx = &lan966x->tx; + struct lan966x_db *next_db; + int needed_headroom; + int needed_tailroom; + dma_addr_t dma_addr; + int next_to_use; + int err; + + /* Get next index */ + next_to_use = lan966x_fdma_get_next_dcb(tx); + if (next_to_use < 0) { + netif_stop_queue(dev); + return NETDEV_TX_BUSY; + } + + if (skb_put_padto(skb, ETH_ZLEN)) { + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + /* skb processing */ + needed_headroom = max_t(int, IFH_LEN * sizeof(u32) - skb_headroom(skb), 0); + needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0); + if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) { + err = pskb_expand_head(skb, needed_headroom, needed_tailroom, + GFP_ATOMIC); + if (unlikely(err)) { + dev->stats.tx_dropped++; + err = NETDEV_TX_OK; + goto release; + } + } + + skb_tx_timestamp(skb); + skb_push(skb, IFH_LEN * sizeof(u32)); + memcpy(skb->data, ifh, IFH_LEN * sizeof(u32)); + skb_put(skb, 4); + + dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(lan966x->dev, dma_addr)) { + dev->stats.tx_dropped++; + err = NETDEV_TX_OK; + goto release; + } + + /* Setup next dcb */ + next_dcb = &tx->dcbs[next_to_use]; + next_dcb->nextptr = FDMA_DCB_INVALID_DATA; + + next_db = &next_dcb->db[0]; + next_db->dataptr = dma_addr; + next_db->status = FDMA_DCB_STATUS_SOF | + FDMA_DCB_STATUS_EOF | + FDMA_DCB_STATUS_INTR | + FDMA_DCB_STATUS_BLOCKO(0) | + FDMA_DCB_STATUS_BLOCKL(skb->len); + + /* Fill up the buffer */ + next_dcb_buf = &tx->dcbs_buf[next_to_use]; + next_dcb_buf->skb = skb; + next_dcb_buf->dma_addr = dma_addr; + next_dcb_buf->used = true; + next_dcb_buf->ptp = false; + next_dcb_buf->dev = dev; + + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) + next_dcb_buf->ptp = true; + + if (likely(lan966x->tx.activated)) { + /* Connect current dcb to the next db */ + dcb = &tx->dcbs[tx->last_in_use]; + dcb->nextptr = tx->dma + (next_to_use * + sizeof(struct lan966x_tx_dcb)); + + lan966x_fdma_tx_reload(tx); + } else { + /* Because it is first time, then just activate */ + lan966x->tx.activated = true; + lan966x_fdma_tx_activate(tx); + } + + /* Move to next dcb because this last in use */ + tx->last_in_use = next_to_use; + + return NETDEV_TX_OK; + +release: + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) + lan966x_ptp_txtstamp_release(port, skb); + + dev_kfree_skb_any(skb); + return err; +} + +static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x) +{ + int max_mtu = 0; + int i; + + for (i = 0; i < lan966x->num_phys_ports; ++i) { + int mtu; + + if (!lan966x->ports[i]) + continue; + + mtu = lan966x->ports[i]->dev->mtu; + if (mtu > max_mtu) + max_mtu = mtu; + } + + return max_mtu; +} + +static int lan966x_qsys_sw_status(struct lan966x *lan966x) +{ + return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT)); +} + +static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu) +{ + void *rx_dcbs, *tx_dcbs, *tx_dcbs_buf; + dma_addr_t rx_dma, tx_dma; + u32 size; + int err; + + /* Store these for later to free them */ + rx_dma = lan966x->rx.dma; + tx_dma = lan966x->tx.dma; + rx_dcbs = lan966x->rx.dcbs; + tx_dcbs = lan966x->tx.dcbs; + tx_dcbs_buf = lan966x->tx.dcbs_buf; + + napi_synchronize(&lan966x->napi); + napi_disable(&lan966x->napi); + lan966x_fdma_stop_netdev(lan966x); + + lan966x_fdma_rx_disable(&lan966x->rx); + lan966x_fdma_rx_free_pages(&lan966x->rx); + lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1; + err = lan966x_fdma_rx_alloc(&lan966x->rx); + if (err) + goto restore; + lan966x_fdma_rx_start(&lan966x->rx); + + size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX; + size = ALIGN(size, PAGE_SIZE); + dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma); + + lan966x_fdma_tx_disable(&lan966x->tx); + err = lan966x_fdma_tx_alloc(&lan966x->tx); + if (err) + goto restore_tx; + + size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX; + size = ALIGN(size, PAGE_SIZE); + dma_free_coherent(lan966x->dev, size, tx_dcbs, tx_dma); + + kfree(tx_dcbs_buf); + + lan966x_fdma_wakeup_netdev(lan966x); + napi_enable(&lan966x->napi); + + return err; +restore: + lan966x->rx.dma = rx_dma; + lan966x->tx.dma = tx_dma; + lan966x_fdma_rx_start(&lan966x->rx); + +restore_tx: + lan966x->rx.dcbs = rx_dcbs; + lan966x->tx.dcbs = tx_dcbs; + lan966x->tx.dcbs_buf = tx_dcbs_buf; + + return err; +} + +int lan966x_fdma_change_mtu(struct lan966x *lan966x) +{ + int max_mtu; + int err; + u32 val; + + max_mtu = lan966x_fdma_get_max_mtu(lan966x); + max_mtu += IFH_LEN * sizeof(u32); + + if (round_up(max_mtu, PAGE_SIZE) / PAGE_SIZE - 1 == + lan966x->rx.page_order) + return 0; + + /* Disable the CPU port */ + lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0), + QSYS_SW_PORT_MODE_PORT_ENA, + lan966x, QSYS_SW_PORT_MODE(CPU_PORT)); + + /* Flush the CPU queues */ + readx_poll_timeout(lan966x_qsys_sw_status, lan966x, + val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)), + READL_SLEEP_US, READL_TIMEOUT_US); + + /* Add a sleep in case there are frames between the queues and the CPU + * port + */ + usleep_range(1000, 2000); + + err = lan966x_fdma_reload(lan966x, max_mtu); + + /* Enable back the CPU port */ + lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1), + QSYS_SW_PORT_MODE_PORT_ENA, + lan966x, QSYS_SW_PORT_MODE(CPU_PORT)); + + return err; +} + +void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev) +{ + if (lan966x->fdma_ndev) + return; + + lan966x->fdma_ndev = dev; + netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll, + NAPI_POLL_WEIGHT); + napi_enable(&lan966x->napi); +} + +void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev) +{ + if (lan966x->fdma_ndev == dev) { + netif_napi_del(&lan966x->napi); + lan966x->fdma_ndev = NULL; + } +} + +int lan966x_fdma_init(struct lan966x *lan966x) +{ + int err; + + if (!lan966x->fdma) + return 0; + + lan966x->rx.lan966x = lan966x; + lan966x->rx.channel_id = FDMA_XTR_CHANNEL; + lan966x->tx.lan966x = lan966x; + lan966x->tx.channel_id = FDMA_INJ_CHANNEL; + lan966x->tx.last_in_use = -1; + + err = lan966x_fdma_rx_alloc(&lan966x->rx); + if (err) + return err; + + err = lan966x_fdma_tx_alloc(&lan966x->tx); + if (err) { + lan966x_fdma_rx_free(&lan966x->rx); + return err; + } + + lan966x_fdma_rx_start(&lan966x->rx); + + return 0; +} + +void lan966x_fdma_deinit(struct lan966x *lan966x) +{ + if (!lan966x->fdma) + return; + + lan966x_fdma_rx_disable(&lan966x->rx); + lan966x_fdma_tx_disable(&lan966x->tx); + + napi_synchronize(&lan966x->napi); + napi_disable(&lan966x->napi); + + lan966x_fdma_rx_free_pages(&lan966x->rx); + lan966x_fdma_rx_free(&lan966x->rx); + lan966x_fdma_tx_free(&lan966x->tx); +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c index ce5970bdcc6a..005e56ea5da1 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c @@ -346,7 +346,8 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row, lan966x_mac_process_raw_entry(&raw_entries[column], mac, &vid, &dest_idx); - WARN_ON(dest_idx > lan966x->num_phys_ports); + if (WARN_ON(dest_idx >= lan966x->num_phys_ports)) + continue; /* If the entry in SW is found, then there is nothing * to do @@ -392,7 +393,8 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row, lan966x_mac_process_raw_entry(&raw_entries[column], mac, &vid, &dest_idx); - WARN_ON(dest_idx > lan966x->num_phys_ports); + if (WARN_ON(dest_idx >= lan966x->num_phys_ports)) + continue; mac_entry = lan966x_mac_alloc_entry(mac, vid, dest_idx); if (!mac_entry) diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c index 1f8c67f0261b..138718f33dbd 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c @@ -24,9 +24,6 @@ #define XTR_NOT_READY 0x07000080U #define XTR_VALID_BYTES(x) (4 - (((x) >> 24) & 3)) -#define READL_SLEEP_US 10 -#define READL_TIMEOUT_US 100000000 - #define IO_RANGES 2 static const struct of_device_id lan966x_match[] = { @@ -43,6 +40,7 @@ struct lan966x_main_io_resource { static const struct lan966x_main_io_resource lan966x_main_iomap[] = { { TARGET_CPU, 0xc0000, 0 }, /* 0xe00c0000 */ + { TARGET_FDMA, 0xc0400, 0 }, /* 0xe00c0400 */ { TARGET_ORG, 0, 1 }, /* 0xe2000000 */ { TARGET_GCB, 0x4000, 1 }, /* 0xe2004000 */ { TARGET_QS, 0x8000, 1 }, /* 0xe2008000 */ @@ -343,7 +341,10 @@ static int lan966x_port_xmit(struct sk_buff *skb, struct net_device *dev) } spin_lock(&lan966x->tx_lock); - err = lan966x_port_ifh_xmit(skb, ifh, dev); + if (port->lan966x->fdma) + err = lan966x_fdma_xmit(skb, ifh, dev); + else + err = lan966x_port_ifh_xmit(skb, ifh, dev); spin_unlock(&lan966x->tx_lock); return err; @@ -353,12 +354,24 @@ static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu) { struct lan966x_port *port = netdev_priv(dev); struct lan966x *lan966x = port->lan966x; + int old_mtu = dev->mtu; + int err; lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(new_mtu), lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port)); dev->mtu = new_mtu; - return 0; + if (!lan966x->fdma) + return 0; + + err = lan966x_fdma_change_mtu(lan966x); + if (err) { + lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(old_mtu), + lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port)); + dev->mtu = old_mtu; + } + + return err; } static int lan966x_mc_unsync(struct net_device *dev, const unsigned char *addr) @@ -432,8 +445,7 @@ bool lan966x_netdevice_check(const struct net_device *dev) return dev->netdev_ops == &lan966x_port_netdev_ops; } -static bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, - struct sk_buff *skb) +bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb) { u32 val; @@ -446,6 +458,12 @@ static bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, ANA_CPU_FWD_CFG_MLD_REDIR_ENA))) return true; + if (eth_type_vlan(skb->protocol)) { + skb = skb_vlan_untag(skb); + if (unlikely(!skb)) + return false; + } + if (skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->protocol == IPPROTO_IGMP) return false; @@ -514,7 +532,7 @@ static int lan966x_rx_frame_word(struct lan966x *lan966x, u8 grp, u32 *rval) } } -static void lan966x_ifh_get_src_port(void *ifh, u64 *src_port) +void lan966x_ifh_get_src_port(void *ifh, u64 *src_port) { packing(ifh, src_port, IFH_POS_SRCPORT + IFH_WID_SRCPORT - 1, IFH_POS_SRCPORT, IFH_LEN * 4, UNPACK, 0); @@ -526,7 +544,7 @@ static void lan966x_ifh_get_len(void *ifh, u64 *len) IFH_POS_LEN, IFH_LEN * 4, UNPACK, 0); } -static void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp) +void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp) { packing(ifh, timestamp, IFH_POS_TIMESTAMP + IFH_WID_TIMESTAMP - 1, IFH_POS_TIMESTAMP, IFH_LEN * 4, UNPACK, 0); @@ -646,6 +664,9 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x) if (port->dev) unregister_netdev(port->dev); + if (lan966x->fdma && lan966x->fdma_ndev == port->dev) + lan966x_fdma_netdev_deinit(lan966x, port->dev); + if (port->phylink) { rtnl_lock(); lan966x_port_stop(port->dev); @@ -665,6 +686,15 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x) disable_irq(lan966x->ana_irq); lan966x->ana_irq = -ENXIO; } + + if (lan966x->fdma) + devm_free_irq(lan966x->dev, lan966x->fdma_irq, lan966x); + + if (lan966x->ptp_irq) + devm_free_irq(lan966x->dev, lan966x->ptp_irq, lan966x); + + if (lan966x->ptp_ext_irq) + devm_free_irq(lan966x->dev, lan966x->ptp_ext_irq, lan966x); } static int lan966x_probe_port(struct lan966x *lan966x, u32 p, @@ -793,12 +823,12 @@ static void lan966x_init(struct lan966x *lan966x) /* Do byte-swap and expect status after last data word * Extraction: Mode: manual extraction) | Byte_swap */ - lan_wr(QS_XTR_GRP_CFG_MODE_SET(1) | + lan_wr(QS_XTR_GRP_CFG_MODE_SET(lan966x->fdma ? 2 : 1) | QS_XTR_GRP_CFG_BYTE_SWAP_SET(1), lan966x, QS_XTR_GRP_CFG(0)); /* Injection: Mode: manual injection | Byte_swap */ - lan_wr(QS_INJ_GRP_CFG_MODE_SET(1) | + lan_wr(QS_INJ_GRP_CFG_MODE_SET(lan966x->fdma ? 2 : 1) | QS_INJ_GRP_CFG_BYTE_SWAP_SET(1), lan966x, QS_INJ_GRP_CFG(0)); @@ -907,7 +937,7 @@ static int lan966x_ram_init(struct lan966x *lan966x) static int lan966x_reset_switch(struct lan966x *lan966x) { - struct reset_control *switch_reset, *phy_reset; + struct reset_control *switch_reset; int val = 0; int ret; @@ -916,13 +946,7 @@ static int lan966x_reset_switch(struct lan966x *lan966x) return dev_err_probe(lan966x->dev, PTR_ERR(switch_reset), "Could not obtain switch reset"); - phy_reset = devm_reset_control_get_shared(lan966x->dev, "phy"); - if (IS_ERR(phy_reset)) - return dev_err_probe(lan966x->dev, PTR_ERR(phy_reset), - "Could not obtain phy reset\n"); - reset_control_reset(switch_reset); - reset_control_reset(phy_reset); lan_wr(SYS_RESET_CFG_CORE_ENA_SET(0), lan966x, SYS_RESET_CFG); lan_wr(SYS_RAM_INIT_RAM_INIT_SET(1), lan966x, SYS_RAM_INIT); @@ -1020,6 +1044,31 @@ static int lan966x_probe(struct platform_device *pdev) lan966x->ptp = 1; } + lan966x->fdma_irq = platform_get_irq_byname(pdev, "fdma"); + if (lan966x->fdma_irq > 0) { + err = devm_request_irq(&pdev->dev, lan966x->fdma_irq, + lan966x_fdma_irq_handler, 0, + "fdma irq", lan966x); + if (err) + return dev_err_probe(&pdev->dev, err, "Unable to use fdma irq"); + + lan966x->fdma = true; + } + + if (lan966x->ptp) { + lan966x->ptp_ext_irq = platform_get_irq_byname(pdev, "ptp-ext"); + if (lan966x->ptp_ext_irq > 0) { + err = devm_request_threaded_irq(&pdev->dev, + lan966x->ptp_ext_irq, NULL, + lan966x_ptp_ext_irq_handler, + IRQF_ONESHOT, + "ptp-ext irq", lan966x); + if (err) + return dev_err_probe(&pdev->dev, err, + "Unable to use ptp-ext irq"); + } + } + /* init switch */ lan966x_init(lan966x); lan966x_stats_init(lan966x); @@ -1058,8 +1107,15 @@ static int lan966x_probe(struct platform_device *pdev) if (err) goto cleanup_fdb; + err = lan966x_fdma_init(lan966x); + if (err) + goto cleanup_ptp; + return 0; +cleanup_ptp: + lan966x_ptp_deinit(lan966x); + cleanup_fdb: lan966x_fdb_deinit(lan966x); @@ -1079,6 +1135,7 @@ static int lan966x_remove(struct platform_device *pdev) { struct lan966x *lan966x = platform_get_drvdata(pdev); + lan966x_fdma_deinit(lan966x); lan966x_cleanup_ports(lan966x); cancel_delayed_work_sync(&lan966x->stats_work); diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h index ae282da1da74..3b86ddddc756 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h @@ -17,6 +17,9 @@ #define TABLE_UPDATE_SLEEP_US 10 #define TABLE_UPDATE_TIMEOUT_US 100000 +#define READL_SLEEP_US 10 +#define READL_TIMEOUT_US 100000000 + #define LAN966X_BUFFER_CELL_SZ 64 #define LAN966X_BUFFER_MEMORY (160 * 1024) #define LAN966X_BUFFER_MIN_SZ 60 @@ -53,11 +56,28 @@ #define LAN966X_PHC_COUNT 3 #define LAN966X_PHC_PORT 0 +#define LAN966X_PHC_PINS_NUM 7 #define IFH_REW_OP_NOOP 0x0 #define IFH_REW_OP_ONE_STEP_PTP 0x3 #define IFH_REW_OP_TWO_STEP_PTP 0x4 +#define FDMA_RX_DCB_MAX_DBS 1 +#define FDMA_TX_DCB_MAX_DBS 1 +#define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0)) + +#define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0)) +#define FDMA_DCB_STATUS_SOF BIT(16) +#define FDMA_DCB_STATUS_EOF BIT(17) +#define FDMA_DCB_STATUS_INTR BIT(18) +#define FDMA_DCB_STATUS_DONE BIT(19) +#define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20)) +#define FDMA_DCB_INVALID_DATA 0x1 + +#define FDMA_XTR_CHANNEL 6 +#define FDMA_INJ_CHANNEL 0 +#define FDMA_DCB_MAX 512 + /* MAC table entry types. * ENTRYTYPE_NORMAL is subject to aging. * ENTRYTYPE_LOCKED is not subject to aging. @@ -73,6 +93,83 @@ enum macaccess_entry_type { struct lan966x_port; +struct lan966x_db { + u64 dataptr; + u64 status; +}; + +struct lan966x_rx_dcb { + u64 nextptr; + u64 info; + struct lan966x_db db[FDMA_RX_DCB_MAX_DBS]; +}; + +struct lan966x_tx_dcb { + u64 nextptr; + u64 info; + struct lan966x_db db[FDMA_TX_DCB_MAX_DBS]; +}; + +struct lan966x_rx { + struct lan966x *lan966x; + + /* Pointer to the array of hardware dcbs. */ + struct lan966x_rx_dcb *dcbs; + + /* Pointer to the last address in the dcbs. */ + struct lan966x_rx_dcb *last_entry; + + /* For each DB, there is a page */ + struct page *page[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS]; + + /* Represents the db_index, it can have a value between 0 and + * FDMA_RX_DCB_MAX_DBS, once it reaches the value of FDMA_RX_DCB_MAX_DBS + * it means that the DCB can be reused. + */ + int db_index; + + /* Represents the index in the dcbs. It has a value between 0 and + * FDMA_DCB_MAX + */ + int dcb_index; + + /* Represents the dma address to the dcbs array */ + dma_addr_t dma; + + /* Represents the page order that is used to allocate the pages for the + * RX buffers. This value is calculated based on max MTU of the devices. + */ + u8 page_order; + + u8 channel_id; +}; + +struct lan966x_tx_dcb_buf { + struct net_device *dev; + struct sk_buff *skb; + dma_addr_t dma_addr; + bool used; + bool ptp; +}; + +struct lan966x_tx { + struct lan966x *lan966x; + + /* Pointer to the dcb list */ + struct lan966x_tx_dcb *dcbs; + u16 last_in_use; + + /* Represents the DMA address to the first entry of the dcb entries. */ + dma_addr_t dma; + + /* Array of dcbs that are given to the HW */ + struct lan966x_tx_dcb_buf *dcbs_buf; + + u8 channel_id; + + bool activated; +}; + struct lan966x_stat_layout { u32 offset; char name[ETH_GSTRING_LEN]; @@ -81,6 +178,7 @@ struct lan966x_stat_layout { struct lan966x_phc { struct ptp_clock *clock; struct ptp_clock_info info; + struct ptp_pin_desc pins[LAN966X_PHC_PINS_NUM]; struct hwtstamp_config hwtstamp_config; struct lan966x *lan966x; u8 index; @@ -134,6 +232,8 @@ struct lan966x { int xtr_irq; int ana_irq; int ptp_irq; + int fdma_irq; + int ptp_ext_irq; /* worqueue for fdb */ struct workqueue_struct *fdb_work; @@ -150,6 +250,13 @@ struct lan966x { spinlock_t ptp_ts_id_lock; /* lock for ts_id */ struct mutex ptp_lock; /* lock for ptp interface state */ u16 ptp_skbs; + + /* fdma */ + bool fdma; + struct net_device *fdma_ndev; + struct lan966x_rx rx; + struct lan966x_tx tx; + struct napi_struct napi; }; struct lan966x_port_config { @@ -195,6 +302,11 @@ bool lan966x_netdevice_check(const struct net_device *dev); void lan966x_register_notifier_blocks(void); void lan966x_unregister_notifier_blocks(void); +bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb); + +void lan966x_ifh_get_src_port(void *ifh, u64 *src_port); +void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp); + void lan966x_stats_get(struct net_device *dev, struct rtnl_link_stats64 *stats); int lan966x_stats_init(struct lan966x *lan966x); @@ -283,6 +395,15 @@ int lan966x_ptp_txtstamp_request(struct lan966x_port *port, void lan966x_ptp_txtstamp_release(struct lan966x_port *port, struct sk_buff *skb); irqreturn_t lan966x_ptp_irq_handler(int irq, void *args); +irqreturn_t lan966x_ptp_ext_irq_handler(int irq, void *args); + +int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev); +int lan966x_fdma_change_mtu(struct lan966x *lan966x); +void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev); +void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev); +int lan966x_fdma_init(struct lan966x *lan966x); +void lan966x_fdma_deinit(struct lan966x *lan966x); +irqreturn_t lan966x_fdma_irq_handler(int irq, void *args); static inline void __iomem *lan_addr(void __iomem *base[], int id, int tinst, int tcnt, diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c index 237555845a52..f141644e4372 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c @@ -393,6 +393,9 @@ void lan966x_port_init(struct lan966x_port *port) lan966x_port_config_down(port); + if (lan966x->fdma) + lan966x_fdma_netdev_init(lan966x, port->dev); + if (config->portmode != PHY_INTERFACE_MODE_QSGMII) return; diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c index ae782778d6dd..3a621c5165bc 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c @@ -16,7 +16,7 @@ */ #define LAN966X_1PPB_FORMAT 3480517749LL -#define TOD_ACC_PIN 0x5 +#define TOD_ACC_PIN 0x7 enum { PTP_PIN_ACTION_IDLE = 0, @@ -29,10 +29,10 @@ enum { static u64 lan966x_ptp_get_nominal_value(void) { - u64 res = 0x304d2df1; - - res <<= 32; - return res; + /* This is the default value that for each system clock, the time of day + * is increased. It has the format 5.59 nanosecond. + */ + return 0x304d4873ecade305; } int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr) @@ -321,6 +321,63 @@ irqreturn_t lan966x_ptp_irq_handler(int irq, void *args) return IRQ_HANDLED; } +irqreturn_t lan966x_ptp_ext_irq_handler(int irq, void *args) +{ + struct lan966x *lan966x = args; + struct lan966x_phc *phc; + unsigned long flags; + u64 time = 0; + time64_t s; + int pin, i; + s64 ns; + + if (!(lan_rd(lan966x, PTP_PIN_INTR))) + return IRQ_NONE; + + /* Go through all domains and see which pin generated the interrupt */ + for (i = 0; i < LAN966X_PHC_COUNT; ++i) { + struct ptp_clock_event ptp_event = {0}; + + phc = &lan966x->phc[i]; + pin = ptp_find_pin_unlocked(phc->clock, PTP_PF_EXTTS, 0); + if (pin == -1) + continue; + + if (!(lan_rd(lan966x, PTP_PIN_INTR) & BIT(pin))) + continue; + + spin_lock_irqsave(&lan966x->ptp_clock_lock, flags); + + /* Enable to get the new interrupt. + * By writing 1 it clears the bit + */ + lan_wr(BIT(pin), lan966x, PTP_PIN_INTR); + + /* Get current time */ + s = lan_rd(lan966x, PTP_TOD_SEC_MSB(pin)); + s <<= 32; + s |= lan_rd(lan966x, PTP_TOD_SEC_LSB(pin)); + ns = lan_rd(lan966x, PTP_TOD_NSEC(pin)); + ns &= PTP_TOD_NSEC_TOD_NSEC; + + spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags); + + if ((ns & 0xFFFFFFF0) == 0x3FFFFFF0) { + s--; + ns &= 0xf; + ns += 999999984; + } + time = ktime_set(s, ns); + + ptp_event.index = pin; + ptp_event.timestamp = time; + ptp_event.type = PTP_CLOCK_EXTTS; + ptp_clock_event(phc->clock, &ptp_event); + } + + return IRQ_HANDLED; +} + static int lan966x_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) { struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info); @@ -493,6 +550,207 @@ static int lan966x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) return 0; } +static int lan966x_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info); + struct lan966x *lan966x = phc->lan966x; + struct ptp_clock_info *info; + int i; + + /* Currently support only 1 channel */ + if (chan != 0) + return -1; + + switch (func) { + case PTP_PF_NONE: + case PTP_PF_PEROUT: + case PTP_PF_EXTTS: + break; + default: + return -1; + } + + /* The PTP pins are shared by all the PHC. So it is required to see if + * the pin is connected to another PHC. The pin is connected to another + * PHC if that pin already has a function on that PHC. + */ + for (i = 0; i < LAN966X_PHC_COUNT; ++i) { + info = &lan966x->phc[i].info; + + /* Ignore the check with ourself */ + if (ptp == info) + continue; + + if (info->pin_config[pin].func == PTP_PF_PEROUT || + info->pin_config[pin].func == PTP_PF_EXTTS) + return -1; + } + + return 0; +} + +static int lan966x_ptp_perout(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info); + struct lan966x *lan966x = phc->lan966x; + struct timespec64 ts_phase, ts_period; + unsigned long flags; + s64 wf_high, wf_low; + bool pps = false; + int pin; + + if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE | + PTP_PEROUT_PHASE)) + return -EOPNOTSUPP; + + pin = ptp_find_pin(phc->clock, PTP_PF_PEROUT, rq->perout.index); + if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM) + return -EINVAL; + + if (!on) { + spin_lock_irqsave(&lan966x->ptp_clock_lock, flags); + lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) | + PTP_PIN_CFG_PIN_DOM_SET(phc->index) | + PTP_PIN_CFG_PIN_SYNC_SET(0), + PTP_PIN_CFG_PIN_ACTION | + PTP_PIN_CFG_PIN_DOM | + PTP_PIN_CFG_PIN_SYNC, + lan966x, PTP_PIN_CFG(pin)); + spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags); + return 0; + } + + if (rq->perout.period.sec == 1 && + rq->perout.period.nsec == 0) + pps = true; + + if (rq->perout.flags & PTP_PEROUT_PHASE) { + ts_phase.tv_sec = rq->perout.phase.sec; + ts_phase.tv_nsec = rq->perout.phase.nsec; + } else { + ts_phase.tv_sec = rq->perout.start.sec; + ts_phase.tv_nsec = rq->perout.start.nsec; + } + + if (ts_phase.tv_sec || (ts_phase.tv_nsec && !pps)) { + dev_warn(lan966x->dev, + "Absolute time not supported!\n"); + return -EINVAL; + } + + if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) { + struct timespec64 ts_on; + + ts_on.tv_sec = rq->perout.on.sec; + ts_on.tv_nsec = rq->perout.on.nsec; + + wf_high = timespec64_to_ns(&ts_on); + } else { + wf_high = 5000; + } + + if (pps) { + spin_lock_irqsave(&lan966x->ptp_clock_lock, flags); + lan_wr(PTP_WF_LOW_PERIOD_PIN_WFL(ts_phase.tv_nsec), + lan966x, PTP_WF_LOW_PERIOD(pin)); + lan_wr(PTP_WF_HIGH_PERIOD_PIN_WFH(wf_high), + lan966x, PTP_WF_HIGH_PERIOD(pin)); + lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_CLOCK) | + PTP_PIN_CFG_PIN_DOM_SET(phc->index) | + PTP_PIN_CFG_PIN_SYNC_SET(3), + PTP_PIN_CFG_PIN_ACTION | + PTP_PIN_CFG_PIN_DOM | + PTP_PIN_CFG_PIN_SYNC, + lan966x, PTP_PIN_CFG(pin)); + spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags); + return 0; + } + + ts_period.tv_sec = rq->perout.period.sec; + ts_period.tv_nsec = rq->perout.period.nsec; + + wf_low = timespec64_to_ns(&ts_period); + wf_low -= wf_high; + + spin_lock_irqsave(&lan966x->ptp_clock_lock, flags); + lan_wr(PTP_WF_LOW_PERIOD_PIN_WFL(wf_low), + lan966x, PTP_WF_LOW_PERIOD(pin)); + lan_wr(PTP_WF_HIGH_PERIOD_PIN_WFH(wf_high), + lan966x, PTP_WF_HIGH_PERIOD(pin)); + lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_CLOCK) | + PTP_PIN_CFG_PIN_DOM_SET(phc->index) | + PTP_PIN_CFG_PIN_SYNC_SET(0), + PTP_PIN_CFG_PIN_ACTION | + PTP_PIN_CFG_PIN_DOM | + PTP_PIN_CFG_PIN_SYNC, + lan966x, PTP_PIN_CFG(pin)); + spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags); + + return 0; +} + +static int lan966x_ptp_extts(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info); + struct lan966x *lan966x = phc->lan966x; + unsigned long flags; + int pin; + u32 val; + + if (lan966x->ptp_ext_irq <= 0) + return -EOPNOTSUPP; + + /* Reject requests with unsupported flags */ + if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + + pin = ptp_find_pin(phc->clock, PTP_PF_EXTTS, rq->extts.index); + if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM) + return -EINVAL; + + spin_lock_irqsave(&lan966x->ptp_clock_lock, flags); + lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) | + PTP_PIN_CFG_PIN_SYNC_SET(on ? 3 : 0) | + PTP_PIN_CFG_PIN_DOM_SET(phc->index) | + PTP_PIN_CFG_PIN_SELECT_SET(pin), + PTP_PIN_CFG_PIN_ACTION | + PTP_PIN_CFG_PIN_SYNC | + PTP_PIN_CFG_PIN_DOM | + PTP_PIN_CFG_PIN_SELECT, + lan966x, PTP_PIN_CFG(pin)); + + val = lan_rd(lan966x, PTP_PIN_INTR_ENA); + if (on) + val |= BIT(pin); + else + val &= ~BIT(pin); + lan_wr(val, lan966x, PTP_PIN_INTR_ENA); + + spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags); + + return 0; +} + +static int lan966x_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + switch (rq->type) { + case PTP_CLK_REQ_PEROUT: + return lan966x_ptp_perout(ptp, rq, on); + case PTP_CLK_REQ_EXTTS: + return lan966x_ptp_extts(ptp, rq, on); + default: + return -EOPNOTSUPP; + } + + return 0; +} + static struct ptp_clock_info lan966x_ptp_clock_info = { .owner = THIS_MODULE, .name = "lan966x ptp", @@ -501,6 +759,11 @@ static struct ptp_clock_info lan966x_ptp_clock_info = { .settime64 = lan966x_ptp_settime64, .adjtime = lan966x_ptp_adjtime, .adjfine = lan966x_ptp_adjfine, + .verify = lan966x_ptp_verify, + .enable = lan966x_ptp_enable, + .n_per_out = LAN966X_PHC_PINS_NUM, + .n_ext_ts = LAN966X_PHC_PINS_NUM, + .n_pins = LAN966X_PHC_PINS_NUM, }; static int lan966x_ptp_phc_init(struct lan966x *lan966x, @@ -508,8 +771,19 @@ static int lan966x_ptp_phc_init(struct lan966x *lan966x, struct ptp_clock_info *clock_info) { struct lan966x_phc *phc = &lan966x->phc[index]; + struct ptp_pin_desc *p; + int i; + + for (i = 0; i < LAN966X_PHC_PINS_NUM; i++) { + p = &phc->pins[i]; + + snprintf(p->name, sizeof(p->name), "pin%d", i); + p->index = i; + p->func = PTP_PF_NONE; + } phc->info = *clock_info; + phc->info.pin_config = &phc->pins[0]; phc->clock = ptp_clock_register(&phc->info, lan966x->dev); if (IS_ERR(phc->clock)) return PTR_ERR(phc->clock); diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h index 0c0b3e173d53..8265ad89f0bc 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h @@ -17,6 +17,7 @@ enum lan966x_target { TARGET_CHIP_TOP = 5, TARGET_CPU = 6, TARGET_DEV = 13, + TARGET_FDMA = 21, TARGET_GCB = 27, TARGET_ORG = 36, TARGET_PTP = 41, @@ -578,6 +579,129 @@ enum lan966x_target { #define DEV_PCS1G_STICKY_LINK_DOWN_STICKY_GET(x)\ FIELD_GET(DEV_PCS1G_STICKY_LINK_DOWN_STICKY, x) +/* FDMA:FDMA:FDMA_CH_ACTIVATE */ +#define FDMA_CH_ACTIVATE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 0, 0, 1, 4) + +#define FDMA_CH_ACTIVATE_CH_ACTIVATE GENMASK(7, 0) +#define FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(x)\ + FIELD_PREP(FDMA_CH_ACTIVATE_CH_ACTIVATE, x) +#define FDMA_CH_ACTIVATE_CH_ACTIVATE_GET(x)\ + FIELD_GET(FDMA_CH_ACTIVATE_CH_ACTIVATE, x) + +/* FDMA:FDMA:FDMA_CH_RELOAD */ +#define FDMA_CH_RELOAD __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 4, 0, 1, 4) + +#define FDMA_CH_RELOAD_CH_RELOAD GENMASK(7, 0) +#define FDMA_CH_RELOAD_CH_RELOAD_SET(x)\ + FIELD_PREP(FDMA_CH_RELOAD_CH_RELOAD, x) +#define FDMA_CH_RELOAD_CH_RELOAD_GET(x)\ + FIELD_GET(FDMA_CH_RELOAD_CH_RELOAD, x) + +/* FDMA:FDMA:FDMA_CH_DISABLE */ +#define FDMA_CH_DISABLE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 8, 0, 1, 4) + +#define FDMA_CH_DISABLE_CH_DISABLE GENMASK(7, 0) +#define FDMA_CH_DISABLE_CH_DISABLE_SET(x)\ + FIELD_PREP(FDMA_CH_DISABLE_CH_DISABLE, x) +#define FDMA_CH_DISABLE_CH_DISABLE_GET(x)\ + FIELD_GET(FDMA_CH_DISABLE_CH_DISABLE, x) + +/* FDMA:FDMA:FDMA_CH_DB_DISCARD */ +#define FDMA_CH_DB_DISCARD __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 16, 0, 1, 4) + +#define FDMA_CH_DB_DISCARD_DB_DISCARD GENMASK(7, 0) +#define FDMA_CH_DB_DISCARD_DB_DISCARD_SET(x)\ + FIELD_PREP(FDMA_CH_DB_DISCARD_DB_DISCARD, x) +#define FDMA_CH_DB_DISCARD_DB_DISCARD_GET(x)\ + FIELD_GET(FDMA_CH_DB_DISCARD_DB_DISCARD, x) + +/* FDMA:FDMA:FDMA_DCB_LLP */ +#define FDMA_DCB_LLP(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 52, r, 8, 4) + +/* FDMA:FDMA:FDMA_DCB_LLP1 */ +#define FDMA_DCB_LLP1(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 84, r, 8, 4) + +/* FDMA:FDMA:FDMA_CH_ACTIVE */ +#define FDMA_CH_ACTIVE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 180, 0, 1, 4) + +/* FDMA:FDMA:FDMA_CH_CFG */ +#define FDMA_CH_CFG(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 224, r, 8, 4) + +#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY BIT(4) +#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(x)\ + FIELD_PREP(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x) +#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_GET(x)\ + FIELD_GET(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x) + +#define FDMA_CH_CFG_CH_INJ_PORT BIT(3) +#define FDMA_CH_CFG_CH_INJ_PORT_SET(x)\ + FIELD_PREP(FDMA_CH_CFG_CH_INJ_PORT, x) +#define FDMA_CH_CFG_CH_INJ_PORT_GET(x)\ + FIELD_GET(FDMA_CH_CFG_CH_INJ_PORT, x) + +#define FDMA_CH_CFG_CH_DCB_DB_CNT GENMASK(2, 1) +#define FDMA_CH_CFG_CH_DCB_DB_CNT_SET(x)\ + FIELD_PREP(FDMA_CH_CFG_CH_DCB_DB_CNT, x) +#define FDMA_CH_CFG_CH_DCB_DB_CNT_GET(x)\ + FIELD_GET(FDMA_CH_CFG_CH_DCB_DB_CNT, x) + +#define FDMA_CH_CFG_CH_MEM BIT(0) +#define FDMA_CH_CFG_CH_MEM_SET(x)\ + FIELD_PREP(FDMA_CH_CFG_CH_MEM, x) +#define FDMA_CH_CFG_CH_MEM_GET(x)\ + FIELD_GET(FDMA_CH_CFG_CH_MEM, x) + +/* FDMA:FDMA:FDMA_PORT_CTRL */ +#define FDMA_PORT_CTRL(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 376, r, 2, 4) + +#define FDMA_PORT_CTRL_INJ_STOP BIT(4) +#define FDMA_PORT_CTRL_INJ_STOP_SET(x)\ + FIELD_PREP(FDMA_PORT_CTRL_INJ_STOP, x) +#define FDMA_PORT_CTRL_INJ_STOP_GET(x)\ + FIELD_GET(FDMA_PORT_CTRL_INJ_STOP, x) + +#define FDMA_PORT_CTRL_XTR_STOP BIT(2) +#define FDMA_PORT_CTRL_XTR_STOP_SET(x)\ + FIELD_PREP(FDMA_PORT_CTRL_XTR_STOP, x) +#define FDMA_PORT_CTRL_XTR_STOP_GET(x)\ + FIELD_GET(FDMA_PORT_CTRL_XTR_STOP, x) + +/* FDMA:FDMA:FDMA_INTR_DB */ +#define FDMA_INTR_DB __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 392, 0, 1, 4) + +/* FDMA:FDMA:FDMA_INTR_DB_ENA */ +#define FDMA_INTR_DB_ENA __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 396, 0, 1, 4) + +#define FDMA_INTR_DB_ENA_INTR_DB_ENA GENMASK(7, 0) +#define FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(x)\ + FIELD_PREP(FDMA_INTR_DB_ENA_INTR_DB_ENA, x) +#define FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(x)\ + FIELD_GET(FDMA_INTR_DB_ENA_INTR_DB_ENA, x) + +/* FDMA:FDMA:FDMA_INTR_ERR */ +#define FDMA_INTR_ERR __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 400, 0, 1, 4) + +/* FDMA:FDMA:FDMA_ERRORS */ +#define FDMA_ERRORS __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 412, 0, 1, 4) + +/* PTP:PTP_CFG:PTP_PIN_INTR */ +#define PTP_PIN_INTR __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 0, 0, 1, 4) + +#define PTP_PIN_INTR_INTR_PTP GENMASK(7, 0) +#define PTP_PIN_INTR_INTR_PTP_SET(x)\ + FIELD_PREP(PTP_PIN_INTR_INTR_PTP, x) +#define PTP_PIN_INTR_INTR_PTP_GET(x)\ + FIELD_GET(PTP_PIN_INTR_INTR_PTP, x) + +/* PTP:PTP_CFG:PTP_PIN_INTR_ENA */ +#define PTP_PIN_INTR_ENA __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 4, 0, 1, 4) + +#define PTP_PIN_INTR_ENA_INTR_ENA GENMASK(7, 0) +#define PTP_PIN_INTR_ENA_INTR_ENA_SET(x)\ + FIELD_PREP(PTP_PIN_INTR_ENA_INTR_ENA, x) +#define PTP_PIN_INTR_ENA_INTR_ENA_GET(x)\ + FIELD_GET(PTP_PIN_INTR_ENA_INTR_ENA, x) + /* PTP:PTP_CFG:PTP_DOM_CFG */ #define PTP_DOM_CFG __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 12, 0, 1, 4) @@ -611,6 +735,12 @@ enum lan966x_target { #define PTP_PIN_CFG_PIN_SYNC_GET(x)\ FIELD_GET(PTP_PIN_CFG_PIN_SYNC, x) +#define PTP_PIN_CFG_PIN_SELECT GENMASK(23, 21) +#define PTP_PIN_CFG_PIN_SELECT_SET(x)\ + FIELD_PREP(PTP_PIN_CFG_PIN_SELECT, x) +#define PTP_PIN_CFG_PIN_SELECT_GET(x)\ + FIELD_GET(PTP_PIN_CFG_PIN_SELECT, x) + #define PTP_PIN_CFG_PIN_DOM GENMASK(17, 16) #define PTP_PIN_CFG_PIN_DOM_SET(x)\ FIELD_PREP(PTP_PIN_CFG_PIN_DOM, x) @@ -638,6 +768,22 @@ enum lan966x_target { #define PTP_TOD_NSEC_TOD_NSEC_GET(x)\ FIELD_GET(PTP_TOD_NSEC_TOD_NSEC, x) +/* PTP:PTP_PINS:WF_HIGH_PERIOD */ +#define PTP_WF_HIGH_PERIOD(g) __REG(TARGET_PTP,\ + 0, 1, 0, g, 8, 64, 24, 0, 1, 4) + +#define PTP_WF_HIGH_PERIOD_PIN_WFH(x) ((x) & GENMASK(29, 0)) +#define PTP_WF_HIGH_PERIOD_PIN_WFH_M GENMASK(29, 0) +#define PTP_WF_HIGH_PERIOD_PIN_WFH_X(x) ((x) & GENMASK(29, 0)) + +/* PTP:PTP_PINS:WF_LOW_PERIOD */ +#define PTP_WF_LOW_PERIOD(g) __REG(TARGET_PTP,\ + 0, 1, 0, g, 8, 64, 28, 0, 1, 4) + +#define PTP_WF_LOW_PERIOD_PIN_WFL(x) ((x) & GENMASK(29, 0)) +#define PTP_WF_LOW_PERIOD_PIN_WFL_M GENMASK(29, 0) +#define PTP_WF_LOW_PERIOD_PIN_WFL_X(x) ((x) & GENMASK(29, 0)) + /* PTP:PTP_TS_FIFO:PTP_TWOSTEP_CTRL */ #define PTP_TWOSTEP_CTRL __REG(TARGET_PTP, 0, 1, 612, 0, 1, 12, 0, 0, 1, 4) diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c index e3555c94294d..df2bee678559 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c @@ -322,8 +322,7 @@ static int lan966x_port_prechangeupper(struct net_device *dev, if (netif_is_bridge_master(info->upper_dev) && !info->linking) switchdev_bridge_port_unoffload(port->dev, port, - &lan966x_switchdev_nb, - &lan966x_switchdev_blocking_nb); + NULL, NULL); return NOTIFY_DONE; } diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index e443bd8b2d09..0825a92599a5 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -551,7 +551,7 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1]; struct ocelot_port *ocelot_port = ocelot->ports[port]; struct ocelot_vcap_filter *filter; - int err; + int err = 0; u32 val; list_for_each_entry(filter, &block->rules, list) { @@ -570,7 +570,7 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, if (vlan_aware) err = ocelot_del_vlan_unaware_pvid(ocelot, port, ocelot_port->bridge); - else + else if (ocelot_port->bridge) err = ocelot_add_vlan_unaware_pvid(ocelot, port, ocelot_port->bridge); if (err) @@ -629,6 +629,13 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid, { int err; + /* Ignore VID 0 added to our RX filter by the 8021q module, since + * that collides with OCELOT_STANDALONE_PVID and changes it from + * egress-untagged to egress-tagged. + */ + if (!vid) + return 0; + err = ocelot_vlan_member_add(ocelot, port, vid, untagged); if (err) return err; @@ -651,6 +658,9 @@ int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid) bool del_pvid = false; int err; + if (!vid) + return 0; + if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid) del_pvid = true; @@ -2859,6 +2869,8 @@ static void ocelot_port_set_mcast_flood(struct ocelot *ocelot, int port, val = BIT(port); ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MC); + ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV4); + ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV6); } static void ocelot_port_set_bcast_flood(struct ocelot *ocelot, int port, @@ -3216,6 +3228,7 @@ static void ocelot_detect_features(struct ocelot *ocelot) int ocelot_init(struct ocelot *ocelot) { + const struct ocelot_stat_layout *stat; char queue_name[32]; int i, ret; u32 port; @@ -3228,6 +3241,10 @@ int ocelot_init(struct ocelot *ocelot) } } + ocelot->num_stats = 0; + for_each_stat(ocelot, stat) + ocelot->num_stats++; + ocelot->stats = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports * ocelot->num_stats, sizeof(u64), GFP_KERNEL); diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c index c8701ac955a8..1e74bdb215ec 100644 --- a/drivers/net/ethernet/mscc/ocelot_vcap.c +++ b/drivers/net/ethernet/mscc/ocelot_vcap.c @@ -672,12 +672,10 @@ static void is1_entry_set(struct ocelot *ocelot, int ix, { const struct vcap_props *vcap = &ocelot->vcap[VCAP_IS1]; struct ocelot_vcap_key_vlan *tag = &filter->vlan; - struct ocelot_vcap_u64 payload; struct vcap_data data; int row = ix / 2; u32 type; - memset(&payload, 0, sizeof(payload)); memset(&data, 0, sizeof(data)); /* Read row */ @@ -813,11 +811,9 @@ static void es0_entry_set(struct ocelot *ocelot, int ix, { const struct vcap_props *vcap = &ocelot->vcap[VCAP_ES0]; struct ocelot_vcap_key_vlan *tag = &filter->vlan; - struct ocelot_vcap_u64 payload; struct vcap_data data; int row = ix; - memset(&payload, 0, sizeof(payload)); memset(&data, 0, sizeof(data)); /* Read row */ diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c index 4f4a495a60ad..961f803aca19 100644 --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c @@ -190,6 +190,7 @@ static const struct ocelot_stat_layout ocelot_stats_layout[] = { { .name = "drop_green_prio_5", .offset = 0x8F, }, { .name = "drop_green_prio_6", .offset = 0x90, }, { .name = "drop_green_prio_7", .offset = 0x91, }, + OCELOT_STAT_END }; static void ocelot_pll5_init(struct ocelot *ocelot) @@ -227,7 +228,6 @@ static int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops) ocelot->map = ocelot_regmap; ocelot->stats_layout = ocelot_stats_layout; - ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout); ocelot->num_mact_rows = 1024; ocelot->ops = ops; diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 50ac3ee2577a..fe5e77330f5f 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -579,7 +579,7 @@ static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size) int status; unsigned i; - if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) { + if (request_firmware(&fw, mgp->fw_name, dev) < 0) { dev_err(dev, "Unable to load %s firmware image via hotplug\n", mgp->fw_name); status = -EINVAL; @@ -2903,11 +2903,9 @@ static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb, status = myri10ge_xmit(curr, dev); if (status != 0) { dev_kfree_skb_any(curr); - if (segs != NULL) { - curr = segs; - segs = next; + skb_list_walk_safe(next, curr, next) { curr->next = NULL; - dev_kfree_skb_any(segs); + dev_kfree_skb_any(curr); } goto drop; } diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index 82a22711ce45..50bca486a244 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -989,8 +989,6 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that made udelay() unreliable. - The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is - deprecated. */ #define eeprom_delay(ee_addr) readl(ee_addr) diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index aa7c093f1f91..db4dfae8c01d 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -4351,7 +4351,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) } ll_config->tx_steering_type = TX_MULTIQ_STEERING; ll_config->intr_type = MSI_X; - ll_config->napi_weight = NEW_NAPI_WEIGHT; + ll_config->napi_weight = NAPI_POLL_WEIGHT; ll_config->rth_steering = RTH_STEERING; /* get the default configuration parameters */ diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h index 63f65193dd49..da9d2c191828 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.h +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h @@ -167,8 +167,6 @@ struct macInfo { struct vxge_config { int tx_pause_enable; int rx_pause_enable; - -#define NEW_NAPI_WEIGHT 64 int napi_weight; int intr_type; #define INTA 0 diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c index 84d66d138c3d..78368e71ce83 100644 --- a/drivers/net/ethernet/netronome/nfp/crypto/tls.c +++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c @@ -289,7 +289,7 @@ nfp_net_tls_add(struct net_device *netdev, struct sock *sk, switch (sk->sk_family) { #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: - if (sk->sk_ipv6only || + if (ipv6_only_sock(sk) || ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) { req_sz = sizeof(struct nfp_crypto_req_add_v6); ipv6 = true; diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c index bfd7d1c35076..1edcd9f86c9c 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c +++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c @@ -83,6 +83,10 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1, entry2->rule->match.dissector->used_keys; bool out; + if (entry1->netdev && entry2->netdev && + entry1->netdev != entry2->netdev) + return -EINVAL; + /* check the overlapped fields one by one, the unmasked part * should not conflict with each other. */ @@ -914,7 +918,7 @@ static int nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry *zt, /* Check that the two tc flows are also compatible with * the nft entry. No need to check the pre_ct and post_ct * entries as that was already done during pre_merge. - * The nft entry does not have a netdev or chain populated, so + * The nft entry does not have a chain populated, so * skip this check. */ err = nfp_ct_merge_check(pre_ct_entry, nft_entry); @@ -999,8 +1003,6 @@ static int nfp_ct_do_tc_merge(struct nfp_fl_ct_zone_entry *zt, pre_ct_entry = ct_entry2; } - if (post_ct_entry->netdev != pre_ct_entry->netdev) - return -EINVAL; /* Checks that the chain_index of the filter matches the * chain_index of the GOTO action. */ @@ -1114,6 +1116,20 @@ err_tc_merge_tb_init: return ERR_PTR(err); } +static struct net_device *get_netdev_from_rule(struct flow_rule *rule) +{ + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) { + struct flow_match_meta match; + + flow_rule_match_meta(rule, &match); + if (match.key->ingress_ifindex & match.mask->ingress_ifindex) + return __dev_get_by_index(&init_net, + match.key->ingress_ifindex); + } + + return NULL; +} + static struct nfp_fl_ct_flow_entry *nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry *zt, struct net_device *netdev, @@ -1154,6 +1170,9 @@ nfp_fl_ct_flow_entry *nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry *zt, entry->rule->match.dissector = &nft_match->dissector; entry->rule->match.mask = &nft_match->mask; entry->rule->match.key = &nft_match->key; + + if (!netdev) + netdev = get_netdev_from_rule(entry->rule); } else { entry->rule->match.dissector = flow->rule->match.dissector; entry->rule->match.mask = flow->rule->match.mask; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index b412670d89b2..5528d12d1f48 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -2259,8 +2259,12 @@ static void nfp_net_netdev_init(struct nfp_net *nn) if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) netdev->hw_features |= NETIF_F_RXHASH; if (nn->cap & NFP_NET_CFG_CTRL_VXLAN) { - if (nn->cap & NFP_NET_CFG_CTRL_LSO) - netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + if (nn->cap & NFP_NET_CFG_CTRL_LSO) { + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL; + netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; + } netdev->udp_tunnel_nic_info = &nfp_udp_tunnels; nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c index 3fdaaf8ed2ba..4627715a5e32 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c @@ -95,15 +95,17 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, __be16 vlan_proto) { struct nfp_app *app = nfp_app_from_netdev(netdev); + u16 update = NFP_NET_VF_CFG_MB_UPD_VLAN; + bool is_proto_sup = true; unsigned int vf_offset; - u16 vlan_tci; + u32 vlan_tag; int err; err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN, "vlan"); if (err) return err; - if (vlan_proto != htons(ETH_P_8021Q)) + if (!eth_type_vlan(vlan_proto)) return -EOPNOTSUPP; if (vlan > 4095 || qos > 7) { @@ -112,14 +114,32 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, return -EINVAL; } + /* Check if fw supports or not */ + err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto"); + if (err) + is_proto_sup = false; + + if (vlan_proto != htons(ETH_P_8021Q)) { + if (!is_proto_sup) + return -EOPNOTSUPP; + update |= NFP_NET_VF_CFG_MB_UPD_VLAN_PROTO; + } + /* Write VLAN tag to VF entry in VF config symbol */ - vlan_tci = FIELD_PREP(NFP_NET_VF_CFG_VLAN_VID, vlan) | + vlan_tag = FIELD_PREP(NFP_NET_VF_CFG_VLAN_VID, vlan) | FIELD_PREP(NFP_NET_VF_CFG_VLAN_QOS, qos); + + /* vlan_tag of 0 means that the configuration should be cleared and in + * such circumstances setting the TPID has no meaning when + * configuring firmware. + */ + if (vlan_tag && is_proto_sup) + vlan_tag |= FIELD_PREP(NFP_NET_VF_CFG_VLAN_PROT, ntohs(vlan_proto)); + vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ; - writew(vlan_tci, app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_VLAN); + writel(vlan_tag, app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_VLAN); - return nfp_net_sriov_update(app, vf, NFP_NET_VF_CFG_MB_UPD_VLAN, - "vlan"); + return nfp_net_sriov_update(app, vf, update, "vlan"); } int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable) @@ -209,7 +229,7 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf, { struct nfp_app *app = nfp_app_from_netdev(netdev); unsigned int vf_offset; - u16 vlan_tci; + u32 vlan_tag; u32 mac_hi; u16 mac_lo; u8 flags; @@ -225,7 +245,7 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf, mac_lo = readw(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_MAC_LO); flags = readb(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_CTRL); - vlan_tci = readw(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_VLAN); + vlan_tag = readl(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_VLAN); memset(ivi, 0, sizeof(*ivi)); ivi->vf = vf; @@ -233,9 +253,10 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf, put_unaligned_be32(mac_hi, &ivi->mac[0]); put_unaligned_be16(mac_lo, &ivi->mac[4]); - ivi->vlan = FIELD_GET(NFP_NET_VF_CFG_VLAN_VID, vlan_tci); - ivi->qos = FIELD_GET(NFP_NET_VF_CFG_VLAN_QOS, vlan_tci); - + ivi->vlan = FIELD_GET(NFP_NET_VF_CFG_VLAN_VID, vlan_tag); + ivi->qos = FIELD_GET(NFP_NET_VF_CFG_VLAN_QOS, vlan_tag); + if (!nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto")) + ivi->vlan_proto = htons(FIELD_GET(NFP_NET_VF_CFG_VLAN_PROT, vlan_tag)); ivi->spoofchk = FIELD_GET(NFP_NET_VF_CFG_CTRL_SPOOF, flags); ivi->trusted = FIELD_GET(NFP_NET_VF_CFG_CTRL_TRUST, flags); ivi->linkstate = FIELD_GET(NFP_NET_VF_CFG_CTRL_LINK_STATE, flags); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h index 786be58a907e..7b72cc083476 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h @@ -19,6 +19,7 @@ #define NFP_NET_VF_CFG_MB_CAP_SPOOF (0x1 << 2) #define NFP_NET_VF_CFG_MB_CAP_LINK_STATE (0x1 << 3) #define NFP_NET_VF_CFG_MB_CAP_TRUST (0x1 << 4) +#define NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO (0x1 << 5) #define NFP_NET_VF_CFG_MB_RET 0x2 #define NFP_NET_VF_CFG_MB_UPD 0x4 #define NFP_NET_VF_CFG_MB_UPD_MAC (0x1 << 0) @@ -26,6 +27,7 @@ #define NFP_NET_VF_CFG_MB_UPD_SPOOF (0x1 << 2) #define NFP_NET_VF_CFG_MB_UPD_LINK_STATE (0x1 << 3) #define NFP_NET_VF_CFG_MB_UPD_TRUST (0x1 << 4) +#define NFP_NET_VF_CFG_MB_UPD_VLAN_PROTO (0x1 << 5) #define NFP_NET_VF_CFG_MB_VF_NUM 0x7 /* VF config entry @@ -43,6 +45,7 @@ #define NFP_NET_VF_CFG_LS_MODE_ENABLE 1 #define NFP_NET_VF_CFG_LS_MODE_DISABLE 2 #define NFP_NET_VF_CFG_VLAN 0x8 +#define NFP_NET_VF_CFG_VLAN_PROT 0xffff0000 #define NFP_NET_VF_CFG_VLAN_QOS 0xe000 #define NFP_NET_VF_CFG_VLAN_VID 0x0fff diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h index 3d379e937184..ddb34bfb9bef 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h @@ -13,22 +13,36 @@ #include <linux/ctype.h> #include <linux/types.h> #include <linux/sizes.h> +#include <linux/stringify.h> #ifndef NFP_SUBSYS #define NFP_SUBSYS "nfp" #endif -#define nfp_err(cpp, fmt, args...) \ +#define string_format(x) __FILE__ ":" __stringify(__LINE__) ": " x + +#define __nfp_err(cpp, fmt, args...) \ dev_err(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args) -#define nfp_warn(cpp, fmt, args...) \ +#define __nfp_warn(cpp, fmt, args...) \ dev_warn(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args) -#define nfp_info(cpp, fmt, args...) \ +#define __nfp_info(cpp, fmt, args...) \ dev_info(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args) -#define nfp_dbg(cpp, fmt, args...) \ +#define __nfp_dbg(cpp, fmt, args...) \ dev_dbg(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args) +#define __nfp_printk(level, cpp, fmt, args...) \ + dev_printk(level, nfp_cpp_device(cpp)->parent, \ + NFP_SUBSYS ": " fmt, ## args) + +#define nfp_err(cpp, fmt, args...) \ + __nfp_err(cpp, string_format(fmt), ## args) +#define nfp_warn(cpp, fmt, args...) \ + __nfp_warn(cpp, string_format(fmt), ## args) +#define nfp_info(cpp, fmt, args...) \ + __nfp_info(cpp, string_format(fmt), ## args) +#define nfp_dbg(cpp, fmt, args...) \ + __nfp_dbg(cpp, string_format(fmt), ## args) #define nfp_printk(level, cpp, fmt, args...) \ - dev_printk(level, nfp_cpp_device(cpp)->parent, \ - NFP_SUBSYS ": " fmt, ## args) + __nfp_printk(level, cpp, string_format(fmt), ## args) #define PCI_64BIT_BAR_COUNT 3 diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 660013f716d4..5116badaf091 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -56,8 +56,8 @@ #include <asm/irq.h> -#define TX_WORK_PER_LOOP 64 -#define RX_WORK_PER_LOOP 64 +#define TX_WORK_PER_LOOP NAPI_POLL_WEIGHT +#define RX_WORK_PER_LOOP NAPI_POLL_WEIGHT /* * Hardware access: @@ -5876,7 +5876,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) else dev->netdev_ops = &nv_netdev_ops_optimized; - netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); + netif_napi_add(dev, &np->napi, nv_napi_poll, NAPI_POLL_WEIGHT); dev->ethtool_ops = &ops; dev->watchdog_timeo = NV_WATCHDOG_TIMEO; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 1dc40c537281..46da937ad27f 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -32,8 +32,6 @@ #define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013 #define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802 -#define PCH_GBE_TX_WEIGHT 64 -#define PCH_GBE_RX_WEIGHT 64 #define PCH_GBE_RX_BUFFER_WRITE 16 /* Initialize the wake-on-LAN settings */ @@ -1469,7 +1467,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, tx_desc->gbec_status, tx_desc->dma_status); unused = PCH_GBE_DESC_UNUSED(tx_ring); - thresh = tx_ring->count - PCH_GBE_TX_WEIGHT; + thresh = tx_ring->count - NAPI_POLL_WEIGHT; if ((tx_desc->gbec_status == DSC_INIT16) && (unused < thresh)) { /* current marked clean, tx queue filling up, do extra clean */ int j, k; @@ -1482,13 +1480,13 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, /* current marked clean, scan for more that need cleaning. */ k = i; - for (j = 0; j < PCH_GBE_TX_WEIGHT; j++) + for (j = 0; j < NAPI_POLL_WEIGHT; j++) { tx_desc = PCH_GBE_TX_DESC(*tx_ring, k); if (tx_desc->gbec_status != DSC_INIT16) break; /*found*/ if (++k >= tx_ring->count) k = 0; /*increment, wrap*/ } - if (j < PCH_GBE_TX_WEIGHT) { + if (j < NAPI_POLL_WEIGHT) { netdev_dbg(adapter->netdev, "clean_tx: unused=%d loops=%d found tx_desc[%x,%x:%x].gbec_status=%04x\n", unused, j, i, k, tx_ring->next_to_use, @@ -1547,7 +1545,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, tx_desc = PCH_GBE_TX_DESC(*tx_ring, i); /* weight of a sort for tx, to avoid endless transmit cleanup */ - if (cleaned_count++ == PCH_GBE_TX_WEIGHT) { + if (cleaned_count++ == NAPI_POLL_WEIGHT) { cleaned = false; break; } @@ -2519,7 +2517,7 @@ static int pch_gbe_probe(struct pci_dev *pdev, netdev->netdev_ops = &pch_gbe_netdev_ops; netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD; netif_napi_add(netdev, &adapter->napi, - pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT); + pch_gbe_napi_poll, NAPI_POLL_WEIGHT); netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; netdev->features = netdev->hw_features; diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index 0d9c2fe0245d..3d2098f21bb7 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -30,8 +30,7 @@ qed-$(CONFIG_QED_OOO) += qed_ooo.o qed-$(CONFIG_QED_NVMETCP) += \ qed_nvmetcp.o \ - qed_nvmetcp_fw_funcs.o \ - qed_nvmetcp_ip_services.o + qed_nvmetcp_fw_funcs.o qed-$(CONFIG_QED_RDMA) += \ qed_iwarp.o \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index e3edca187ddf..5250d1d1e49c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -489,7 +489,7 @@ struct split_type_defs { #define STATIC_DEBUG_LINE_DWORDS 9 -#define NUM_COMMON_GLOBAL_PARAMS 11 +#define NUM_COMMON_GLOBAL_PARAMS 10 #define MAX_RECURSION_DEPTH 10 diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c deleted file mode 100644 index 96a2077fd315..000000000000 --- a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c +++ /dev/null @@ -1,238 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) -/* - * Copyright 2021 Marvell. All rights reserved. - */ - -#include <linux/types.h> -#include <asm/byteorder.h> -#include <asm/param.h> -#include <linux/delay.h> -#include <linux/pci.h> -#include <linux/dma-mapping.h> -#include <linux/etherdevice.h> -#include <linux/kernel.h> -#include <linux/stddef.h> -#include <linux/errno.h> - -#include <net/tcp.h> - -#include <linux/qed/qed_nvmetcp_ip_services_if.h> - -#define QED_IP_RESOL_TIMEOUT 4 - -int qed_route_ipv4(struct sockaddr_storage *local_addr, - struct sockaddr_storage *remote_addr, - struct sockaddr *hardware_address, - struct net_device **ndev) -{ - struct neighbour *neigh = NULL; - __be32 *loc_ip, *rem_ip; - struct rtable *rt; - int rc = -ENXIO; - int retry; - - loc_ip = &((struct sockaddr_in *)local_addr)->sin_addr.s_addr; - rem_ip = &((struct sockaddr_in *)remote_addr)->sin_addr.s_addr; - *ndev = NULL; - rt = ip_route_output(&init_net, *rem_ip, *loc_ip, 0/*tos*/, 0/*oif*/); - if (IS_ERR(rt)) { - pr_err("lookup route failed\n"); - rc = PTR_ERR(rt); - goto return_err; - } - - neigh = dst_neigh_lookup(&rt->dst, rem_ip); - if (!neigh) { - rc = -ENOMEM; - ip_rt_put(rt); - goto return_err; - } - - *ndev = rt->dst.dev; - ip_rt_put(rt); - - /* If not resolved, kick-off state machine towards resolution */ - if (!(neigh->nud_state & NUD_VALID)) - neigh_event_send(neigh, NULL); - - /* query neighbor until resolved or timeout */ - retry = QED_IP_RESOL_TIMEOUT; - while (!(neigh->nud_state & NUD_VALID) && retry > 0) { - msleep(1000); - retry--; - } - - if (neigh->nud_state & NUD_VALID) { - /* copy resolved MAC address */ - neigh_ha_snapshot(hardware_address->sa_data, neigh, *ndev); - hardware_address->sa_family = (*ndev)->type; - rc = 0; - } - - neigh_release(neigh); - if (!(*loc_ip)) { - *loc_ip = inet_select_addr(*ndev, *rem_ip, RT_SCOPE_UNIVERSE); - local_addr->ss_family = AF_INET; - } - -return_err: - - return rc; -} -EXPORT_SYMBOL(qed_route_ipv4); - -int qed_route_ipv6(struct sockaddr_storage *local_addr, - struct sockaddr_storage *remote_addr, - struct sockaddr *hardware_address, - struct net_device **ndev) -{ - struct neighbour *neigh = NULL; - struct dst_entry *dst; - struct flowi6 fl6; - int rc = -ENXIO; - int retry; - - memset(&fl6, 0, sizeof(fl6)); - fl6.saddr = ((struct sockaddr_in6 *)local_addr)->sin6_addr; - fl6.daddr = ((struct sockaddr_in6 *)remote_addr)->sin6_addr; - dst = ip6_route_output(&init_net, NULL, &fl6); - if (!dst || dst->error) { - if (dst) { - dst_release(dst); - pr_err("lookup route failed %d\n", dst->error); - } - - goto out; - } - - neigh = dst_neigh_lookup(dst, &fl6.daddr); - if (neigh) { - *ndev = ip6_dst_idev(dst)->dev; - - /* If not resolved, kick-off state machine towards resolution */ - if (!(neigh->nud_state & NUD_VALID)) - neigh_event_send(neigh, NULL); - - /* query neighbor until resolved or timeout */ - retry = QED_IP_RESOL_TIMEOUT; - while (!(neigh->nud_state & NUD_VALID) && retry > 0) { - msleep(1000); - retry--; - } - - if (neigh->nud_state & NUD_VALID) { - neigh_ha_snapshot((u8 *)hardware_address->sa_data, - neigh, *ndev); - hardware_address->sa_family = (*ndev)->type; - rc = 0; - } - - neigh_release(neigh); - - if (ipv6_addr_any(&fl6.saddr)) { - if (ipv6_dev_get_saddr(dev_net(*ndev), *ndev, - &fl6.daddr, 0, &fl6.saddr)) { - pr_err("Unable to find source IP address\n"); - goto out; - } - - local_addr->ss_family = AF_INET6; - ((struct sockaddr_in6 *)local_addr)->sin6_addr = - fl6.saddr; - } - } - - dst_release(dst); - -out: - - return rc; -} -EXPORT_SYMBOL(qed_route_ipv6); - -void qed_vlan_get_ndev(struct net_device **ndev, u16 *vlan_id) -{ - if (is_vlan_dev(*ndev)) { - *vlan_id = vlan_dev_vlan_id(*ndev); - *ndev = vlan_dev_real_dev(*ndev); - } -} -EXPORT_SYMBOL(qed_vlan_get_ndev); - -struct pci_dev *qed_validate_ndev(struct net_device *ndev) -{ - struct pci_dev *pdev = NULL; - struct net_device *upper; - - for_each_pci_dev(pdev) { - if (pdev && pdev->driver && - !strcmp(pdev->driver->name, "qede")) { - upper = pci_get_drvdata(pdev); - if (upper->ifindex == ndev->ifindex) - return pdev; - } - } - - return NULL; -} -EXPORT_SYMBOL(qed_validate_ndev); - -__be16 qed_get_in_port(struct sockaddr_storage *sa) -{ - return sa->ss_family == AF_INET - ? ((struct sockaddr_in *)sa)->sin_port - : ((struct sockaddr_in6 *)sa)->sin6_port; -} -EXPORT_SYMBOL(qed_get_in_port); - -int qed_fetch_tcp_port(struct sockaddr_storage local_ip_addr, - struct socket **sock, u16 *port) -{ - struct sockaddr_storage sa; - int rc = 0; - - rc = sock_create(local_ip_addr.ss_family, SOCK_STREAM, IPPROTO_TCP, - sock); - if (rc) { - pr_warn("failed to create socket: %d\n", rc); - goto err; - } - - (*sock)->sk->sk_allocation = GFP_KERNEL; - sk_set_memalloc((*sock)->sk); - - rc = kernel_bind(*sock, (struct sockaddr *)&local_ip_addr, - sizeof(local_ip_addr)); - - if (rc) { - pr_warn("failed to bind socket: %d\n", rc); - goto err_sock; - } - - rc = kernel_getsockname(*sock, (struct sockaddr *)&sa); - if (rc < 0) { - pr_warn("getsockname() failed: %d\n", rc); - goto err_sock; - } - - *port = ntohs(qed_get_in_port(&sa)); - - return 0; - -err_sock: - sock_release(*sock); - sock = NULL; -err: - - return rc; -} -EXPORT_SYMBOL(qed_fetch_tcp_port); - -void qed_return_tcp_port(struct socket *sock) -{ - if (sock && sock->sk) { - tcp_set_state(sock->sk, TCP_CLOSE); - sock_release(sock); - } -} -EXPORT_SYMBOL(qed_return_tcp_port); diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index b242000a77fd..b7cc36589f59 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -748,6 +748,9 @@ qede_build_skb(struct qede_rx_queue *rxq, buf = page_address(bd->data) + bd->page_offset; skb = build_skb(buf, rxq->rx_buf_seg_size); + if (unlikely(!skb)) + return NULL; + skb_reserve(skb, pad); skb_put(skb, len); diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 39176e765767..c9c8225f04d6 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -496,19 +496,19 @@ void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb) if (test_and_set_bit_lock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags)) { - DP_ERR(edev, "Timestamping in progress\n"); + DP_VERBOSE(edev, QED_MSG_DEBUG, "Timestamping in progress\n"); edev->ptp_skip_txts++; return; } if (unlikely(!test_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags))) { - DP_ERR(edev, - "Tx timestamping was not enabled, this packet will not be timestamped\n"); + DP_VERBOSE(edev, QED_MSG_DEBUG, + "Tx timestamping was not enabled, this pkt will not be timestamped\n"); clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags); edev->ptp_skip_txts++; } else if (unlikely(ptp->tx_skb)) { - DP_ERR(edev, - "The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n"); + DP_VERBOSE(edev, QED_MSG_DEBUG, + "Device supports a single outstanding pkt to ts, It will not be ts\n"); clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags); edev->ptp_skip_txts++; } else { diff --git a/drivers/net/ethernet/realtek/atp.h b/drivers/net/ethernet/realtek/atp.h index 63f0d2d0e87b..b202184eddd4 100644 --- a/drivers/net/ethernet/realtek/atp.h +++ b/drivers/net/ethernet/realtek/atp.h @@ -255,10 +255,6 @@ static inline void write_word_mode0(short ioaddr, unsigned short value) #define EE_DATA_WRITE 0x01 /* EEPROM chip data in. */ #define EE_DATA_READ 0x08 /* EEPROM chip data out. */ -/* Delay between EEPROM clock transitions. */ -#define eeprom_delay(ticks) \ -do { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; } } while (0) - /* The EEPROM commands include the alway-set leading bit. */ #define EE_WRITE_CMD(offset) (((5 << 6) + (offset)) << 17) #define EE_READ(offset) (((6 << 6) + (offset)) << 17) diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile index 8bd01c429f91..5ba98769b52b 100644 --- a/drivers/net/ethernet/sfc/Makefile +++ b/drivers/net/ethernet/sfc/Makefile @@ -8,7 +8,7 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \ ef100.o ef100_nic.o ef100_netdev.o \ ef100_ethtool.o ef100_rx.o ef100_tx.o sfc-$(CONFIG_SFC_MTD) += mtd.o -sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o +sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o ef100_sriov.o obj-$(CONFIG_SFC) += sfc.o diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 50d535981a35..c9ee5011803f 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -2256,7 +2256,7 @@ int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb, * guaranteed to satisfy the second as we only attempt TSO if * inner_network_header <= 208. */ - ip_tot_len = -EFX_TSO2_MAX_HDRLEN; + ip_tot_len = 0x10000 - EFX_TSO2_MAX_HDRLEN; EFX_WARN_ON_ONCE_PARANOID(mss + EFX_TSO2_MAX_HDRLEN + (tcp->doff << 2u) > ip_tot_len); diff --git a/drivers/net/ethernet/sfc/ef100.c b/drivers/net/ethernet/sfc/ef100.c index ffdb36715a49..173f0ecebc70 100644 --- a/drivers/net/ethernet/sfc/ef100.c +++ b/drivers/net/ethernet/sfc/ef100.c @@ -2,7 +2,7 @@ /**************************************************************************** * Driver for Solarflare network controllers and boards * Copyright 2005-2018 Solarflare Communications Inc. - * Copyright 2019-2020 Xilinx Inc. + * Copyright 2019-2022 Xilinx Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -17,6 +17,7 @@ #include "io.h" #include "ef100_nic.h" #include "ef100_netdev.h" +#include "ef100_sriov.h" #include "ef100_regs.h" #include "ef100.h" @@ -436,6 +437,10 @@ static void ef100_pci_remove(struct pci_dev *pci_dev) * blocks, so we have to do it before PCI removal. */ unregister_netdevice_notifier(&efx->netdev_notifier); +#if defined(CONFIG_SFC_SRIOV) + if (!efx->type->is_vf) + efx_ef100_pci_sriov_disable(efx); +#endif ef100_remove(efx); efx_fini_io(efx); netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); @@ -524,6 +529,23 @@ fail: return rc; } +#ifdef CONFIG_SFC_SRIOV +static int ef100_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ + struct efx_nic *efx = pci_get_drvdata(dev); + int rc; + + if (efx->type->sriov_configure) { + rc = efx->type->sriov_configure(efx, num_vfs); + if (rc) + return rc; + else + return num_vfs; + } + return -ENOENT; +} +#endif + /* PCI device ID table */ static const struct pci_device_id ef100_pci_table[] = { {PCI_DEVICE(PCI_VENDOR_ID_XILINX, 0x0100), /* Riverhead PF */ @@ -538,6 +560,9 @@ struct pci_driver ef100_pci_driver = { .id_table = ef100_pci_table, .probe = ef100_pci_probe, .remove = ef100_pci_remove, +#ifdef CONFIG_SFC_SRIOV + .sriov_configure = ef100_pci_sriov_configure, +#endif .err_handler = &efx_err_handlers, }; diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index a07cbf45a326..b04911bc8c57 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -2,7 +2,7 @@ /**************************************************************************** * Driver for Solarflare network controllers and boards * Copyright 2018 Solarflare Communications Inc. - * Copyright 2019-2020 Xilinx Inc. + * Copyright 2019-2022 Xilinx Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -22,6 +22,7 @@ #include "mcdi_filters.h" #include "ef100_rx.h" #include "ef100_tx.h" +#include "ef100_sriov.h" #include "ef100_netdev.h" #include "rx_common.h" @@ -787,6 +788,9 @@ const struct efx_nic_type ef100_pf_nic_type = { .update_stats = ef100_update_stats, .pull_stats = efx_mcdi_mac_pull_stats, .stop_stats = efx_mcdi_mac_stop_stats, +#ifdef CONFIG_SFC_SRIOV + .sriov_configure = efx_ef100_sriov_configure, +#endif /* Per-type bar/size configuration not used on ef100. Location of * registers is defined by extended capabilities. diff --git a/drivers/net/ethernet/sfc/ef100_sriov.c b/drivers/net/ethernet/sfc/ef100_sriov.c new file mode 100644 index 000000000000..664578176bfe --- /dev/null +++ b/drivers/net/ethernet/sfc/ef100_sriov.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * Copyright 2020-2022 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "ef100_sriov.h" +#include "ef100_nic.h" + +static int efx_ef100_pci_sriov_enable(struct efx_nic *efx, int num_vfs) +{ + struct pci_dev *dev = efx->pci_dev; + int rc; + + efx->vf_count = num_vfs; + rc = pci_enable_sriov(dev, num_vfs); + if (rc) + goto fail; + + return 0; + +fail: + netif_err(efx, probe, efx->net_dev, "Failed to enable SRIOV VFs\n"); + efx->vf_count = 0; + return rc; +} + +int efx_ef100_pci_sriov_disable(struct efx_nic *efx) +{ + struct pci_dev *dev = efx->pci_dev; + unsigned int vfs_assigned; + + vfs_assigned = pci_vfs_assigned(dev); + if (vfs_assigned) { + netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; " + "please detach them before disabling SR-IOV\n"); + return -EBUSY; + } + + pci_disable_sriov(dev); + + return 0; +} + +int efx_ef100_sriov_configure(struct efx_nic *efx, int num_vfs) +{ + if (num_vfs == 0) + return efx_ef100_pci_sriov_disable(efx); + else + return efx_ef100_pci_sriov_enable(efx, num_vfs); +} diff --git a/drivers/net/ethernet/sfc/ef100_sriov.h b/drivers/net/ethernet/sfc/ef100_sriov.h new file mode 100644 index 000000000000..c48fccd46c57 --- /dev/null +++ b/drivers/net/ethernet/sfc/ef100_sriov.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * Copyright 2020-2022 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#include "net_driver.h" + +int efx_ef100_sriov_configure(struct efx_nic *efx, int num_vfs); +int efx_ef100_pci_sriov_disable(struct efx_nic *efx); diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index daf0c00c1242..c05a83da9e44 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -28,7 +28,6 @@ static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct ef100_enqueue_skb, __efx_enqueue_skb, tx_queue, skb); } -void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); void efx_xmit_done_single(struct efx_tx_queue *tx_queue); int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, void *type_data); diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index f9064532beb6..eec80b024195 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -51,28 +51,7 @@ MODULE_PARM_DESC(irq_adapt_high_thresh, */ static int napi_weight = 64; -/*************** - * Housekeeping - ***************/ - -int efx_channel_dummy_op_int(struct efx_channel *channel) -{ - return 0; -} - -void efx_channel_dummy_op_void(struct efx_channel *channel) -{ -} - -static const struct efx_channel_type efx_default_channel_type = { - .pre_probe = efx_channel_dummy_op_int, - .post_remove = efx_channel_dummy_op_void, - .get_name = efx_get_channel_name, - .copy = efx_copy_channel, - .want_txqs = efx_default_channel_want_txqs, - .keep_eventq = false, - .want_pio = true, -}; +static const struct efx_channel_type efx_default_channel_type; /************* * INTERRUPTS @@ -619,6 +598,7 @@ void efx_fini_channels(struct efx_nic *efx) /* Allocate and initialise a channel structure, copying parameters * (but not resources) from an old channel structure. */ +static struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel) { struct efx_rx_queue *rx_queue; @@ -696,7 +676,8 @@ fail: return rc; } -void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len) +static void efx_get_channel_name(struct efx_channel *channel, char *buf, + size_t len) { struct efx_nic *efx = channel->efx; const char *type; @@ -786,6 +767,85 @@ void efx_remove_channels(struct efx_nic *efx) kfree(efx->xdp_tx_queues); } +static int efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number, + struct efx_tx_queue *tx_queue) +{ + if (xdp_queue_number >= efx->xdp_tx_queue_count) + return -EINVAL; + + netif_dbg(efx, drv, efx->net_dev, + "Channel %u TXQ %u is XDP %u, HW %u\n", + tx_queue->channel->channel, tx_queue->label, + xdp_queue_number, tx_queue->queue); + efx->xdp_tx_queues[xdp_queue_number] = tx_queue; + return 0; +} + +static void efx_set_xdp_channels(struct efx_nic *efx) +{ + struct efx_tx_queue *tx_queue; + struct efx_channel *channel; + unsigned int next_queue = 0; + int xdp_queue_number = 0; + int rc; + + /* We need to mark which channels really have RX and TX + * queues, and adjust the TX queue numbers if we have separate + * RX-only and TX-only channels. + */ + efx_for_each_channel(channel, efx) { + if (channel->channel < efx->tx_channel_offset) + continue; + + if (efx_channel_is_xdp_tx(channel)) { + efx_for_each_channel_tx_queue(tx_queue, channel) { + tx_queue->queue = next_queue++; + rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, + tx_queue); + if (rc == 0) + xdp_queue_number++; + } + } else { + efx_for_each_channel_tx_queue(tx_queue, channel) { + tx_queue->queue = next_queue++; + netif_dbg(efx, drv, efx->net_dev, + "Channel %u TXQ %u is HW %u\n", + channel->channel, tx_queue->label, + tx_queue->queue); + } + + /* If XDP is borrowing queues from net stack, it must + * use the queue with no csum offload, which is the + * first one of the channel + * (note: tx_queue_by_type is not initialized yet) + */ + if (efx->xdp_txq_queues_mode == + EFX_XDP_TX_QUEUES_BORROWED) { + tx_queue = &channel->tx_queue[0]; + rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, + tx_queue); + if (rc == 0) + xdp_queue_number++; + } + } + } + WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED && + xdp_queue_number != efx->xdp_tx_queue_count); + WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED && + xdp_queue_number > efx->xdp_tx_queue_count); + + /* If we have more CPUs than assigned XDP TX queues, assign the already + * existing queues to the exceeding CPUs + */ + next_queue = 0; + while (xdp_queue_number < efx->xdp_tx_queue_count) { + tx_queue = efx->xdp_tx_queues[next_queue++]; + rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue); + if (rc == 0) + xdp_queue_number++; + } +} + int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) { struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; @@ -857,6 +917,7 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) efx_init_napi_channel(efx->channel[i]); } + efx_set_xdp_channels(efx); out: /* Destroy unused channel structures */ for (i = 0; i < efx->n_channels; i++) { @@ -889,26 +950,9 @@ rollback: goto out; } -static inline int -efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number, - struct efx_tx_queue *tx_queue) -{ - if (xdp_queue_number >= efx->xdp_tx_queue_count) - return -EINVAL; - - netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n", - tx_queue->channel->channel, tx_queue->label, - xdp_queue_number, tx_queue->queue); - efx->xdp_tx_queues[xdp_queue_number] = tx_queue; - return 0; -} - int efx_set_channels(struct efx_nic *efx) { - struct efx_tx_queue *tx_queue; struct efx_channel *channel; - unsigned int next_queue = 0; - int xdp_queue_number; int rc; efx->tx_channel_offset = @@ -926,61 +970,14 @@ int efx_set_channels(struct efx_nic *efx) return -ENOMEM; } - /* We need to mark which channels really have RX and TX - * queues, and adjust the TX queue numbers if we have separate - * RX-only and TX-only channels. - */ - xdp_queue_number = 0; efx_for_each_channel(channel, efx) { if (channel->channel < efx->n_rx_channels) channel->rx_queue.core_index = channel->channel; else channel->rx_queue.core_index = -1; - - if (channel->channel >= efx->tx_channel_offset) { - if (efx_channel_is_xdp_tx(channel)) { - efx_for_each_channel_tx_queue(tx_queue, channel) { - tx_queue->queue = next_queue++; - rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue); - if (rc == 0) - xdp_queue_number++; - } - } else { - efx_for_each_channel_tx_queue(tx_queue, channel) { - tx_queue->queue = next_queue++; - netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is HW %u\n", - channel->channel, tx_queue->label, - tx_queue->queue); - } - - /* If XDP is borrowing queues from net stack, it must use the queue - * with no csum offload, which is the first one of the channel - * (note: channel->tx_queue_by_type is not initialized yet) - */ - if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) { - tx_queue = &channel->tx_queue[0]; - rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue); - if (rc == 0) - xdp_queue_number++; - } - } - } } - WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED && - xdp_queue_number != efx->xdp_tx_queue_count); - WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED && - xdp_queue_number > efx->xdp_tx_queue_count); - /* If we have more CPUs than assigned XDP TX queues, assign the already - * existing queues to the exceeding CPUs - */ - next_queue = 0; - while (xdp_queue_number < efx->xdp_tx_queue_count) { - tx_queue = efx->xdp_tx_queues[next_queue++]; - rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue); - if (rc == 0) - xdp_queue_number++; - } + efx_set_xdp_channels(efx); rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); if (rc) @@ -988,7 +985,7 @@ int efx_set_channels(struct efx_nic *efx) return netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); } -bool efx_default_channel_want_txqs(struct efx_channel *channel) +static bool efx_default_channel_want_txqs(struct efx_channel *channel) { return channel->channel - channel->efx->tx_channel_offset < channel->efx->n_tx_channels; @@ -1124,7 +1121,7 @@ void efx_start_channels(struct efx_nic *efx) struct efx_rx_queue *rx_queue; struct efx_channel *channel; - efx_for_each_channel(channel, efx) { + efx_for_each_channel_rev(channel, efx) { efx_for_each_channel_tx_queue(tx_queue, channel) { efx_init_tx_queue(tx_queue); atomic_inc(&efx->active_queues); @@ -1346,3 +1343,26 @@ void efx_fini_napi(struct efx_nic *efx) efx_for_each_channel(channel, efx) efx_fini_napi_channel(channel); } + +/*************** + * Housekeeping + ***************/ + +static int efx_channel_dummy_op_int(struct efx_channel *channel) +{ + return 0; +} + +void efx_channel_dummy_op_void(struct efx_channel *channel) +{ +} + +static const struct efx_channel_type efx_default_channel_type = { + .pre_probe = efx_channel_dummy_op_int, + .post_remove = efx_channel_dummy_op_void, + .get_name = efx_get_channel_name, + .copy = efx_copy_channel, + .want_txqs = efx_default_channel_want_txqs, + .keep_eventq = false, + .want_pio = true, +}; diff --git a/drivers/net/ethernet/sfc/efx_channels.h b/drivers/net/ethernet/sfc/efx_channels.h index d77ec1f77fb1..64abb99a56b8 100644 --- a/drivers/net/ethernet/sfc/efx_channels.h +++ b/drivers/net/ethernet/sfc/efx_channels.h @@ -32,16 +32,13 @@ void efx_fini_eventq(struct efx_channel *channel); void efx_remove_eventq(struct efx_channel *channel); int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries); -void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len); void efx_set_channel_names(struct efx_nic *efx); int efx_init_channels(struct efx_nic *efx); int efx_probe_channels(struct efx_nic *efx); int efx_set_channels(struct efx_nic *efx); -bool efx_default_channel_want_txqs(struct efx_channel *channel); void efx_remove_channel(struct efx_channel *channel); void efx_remove_channels(struct efx_nic *efx); void efx_fini_channels(struct efx_nic *efx); -struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel); void efx_start_channels(struct efx_nic *efx); void efx_stop_channels(struct efx_nic *efx); @@ -50,7 +47,6 @@ void efx_init_napi(struct efx_nic *efx); void efx_fini_napi_channel(struct efx_channel *channel); void efx_fini_napi(struct efx_nic *efx); -int efx_channel_dummy_op_int(struct efx_channel *channel); void efx_channel_dummy_op_void(struct efx_channel *channel); #endif diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c index af37c990217e..f6577e74d6e6 100644 --- a/drivers/net/ethernet/sfc/efx_common.c +++ b/drivers/net/ethernet/sfc/efx_common.c @@ -51,8 +51,8 @@ static unsigned int efx_monitor_interval = 1 * HZ; /* Default stats update time */ #define STATS_PERIOD_MS_DEFAULT 1000 -const unsigned int efx_reset_type_max = RESET_TYPE_MAX; -const char *const efx_reset_type_names[] = { +static const unsigned int efx_reset_type_max = RESET_TYPE_MAX; +static const char *const efx_reset_type_names[] = { [RESET_TYPE_INVISIBLE] = "INVISIBLE", [RESET_TYPE_ALL] = "ALL", [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL", diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c index 0c6cc2191369..6bbdb5d2eebf 100644 --- a/drivers/net/ethernet/sfc/falcon/rx.c +++ b/drivers/net/ethernet/sfc/falcon/rx.c @@ -718,12 +718,14 @@ static void ef4_init_rx_recycle_ring(struct ef4_nic *efx, struct ef4_rx_queue *rx_queue) { unsigned int bufs_in_recycle_ring, page_ring_size; + struct iommu_domain __maybe_unused *domain; /* Set the RX recycle ring size */ #ifdef CONFIG_PPC64 bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU; #else - if (iommu_present(&pci_bus_type)) + domain = iommu_get_domain_for_dev(&efx->pci_dev->dev); + if (domain && domain->type != IOMMU_DOMAIN_IDENTITY) bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU; else bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_NOIOMMU; diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 148dcd48b58d..9599123bc28d 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -16,6 +16,7 @@ #include "bitfield.h" #include "efx.h" #include "rx_common.h" +#include "tx_common.h" #include "nic.h" #include "farch_regs.h" #include "sriov.h" diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index d3fcbf930dba..ff617b1b38d3 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -73,8 +73,8 @@ * \------------------------------ Resync (always set) * * The client writes it's request into MC shared memory, and rings the - * doorbell. Each request is completed by either by the MC writting - * back into shared memory, or by writting out an event. + * doorbell. Each request is completed by either by the MC writing + * back into shared memory, or by writing out an event. * * All MCDI commands support completion by shared memory response. Each * request may also contain additional data (accounted for by HEADER.LEN), diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index c75dc75e2857..318db906a154 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -612,11 +612,6 @@ extern const unsigned int efx_loopback_mode_max; #define LOOPBACK_MODE(efx) \ STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode) -extern const char *const efx_reset_type_names[]; -extern const unsigned int efx_reset_type_max; -#define RESET_TYPE(type) \ - STRING_TABLE_LOOKUP(type, efx_reset_type) - enum efx_int_mode { /* Be careful if altering to correct macro below */ EFX_INT_MODE_MSIX = 0, diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c index 1b22c7be0088..fa8b9aacca11 100644 --- a/drivers/net/ethernet/sfc/rx_common.c +++ b/drivers/net/ethernet/sfc/rx_common.c @@ -150,6 +150,9 @@ static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue) struct efx_nic *efx = rx_queue->efx; int i; + if (unlikely(!rx_queue->page_ring)) + return; + /* Unmap and release the pages in the recycle ring. Remove the ring. */ for (i = 0; i <= rx_queue->page_ptr_mask; i++) { struct page *page = rx_queue->page_ring[i]; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index d16e031e95f4..138bca611341 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -443,6 +443,9 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, if (unlikely(!tx_queue)) return -EINVAL; + if (!tx_queue->initialised) + return -EINVAL; + if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED) HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu); @@ -524,7 +527,8 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, /* PTP "event" packet */ if (unlikely(efx_xmit_with_hwtstamp(skb)) && - unlikely(efx_ptp_is_ptp_tx(efx, skb))) { + ((efx_ptp_use_mac_tx_timestamps(efx) && efx->ptp_data) || + unlikely(efx_ptp_is_ptp_tx(efx, skb)))) { /* There may be existing transmits on the channel that are * waiting for this packet to trigger the doorbell write. * We need to send the packets at this point. diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c index d530cde2b864..9bc8281b7f5b 100644 --- a/drivers/net/ethernet/sfc/tx_common.c +++ b/drivers/net/ethernet/sfc/tx_common.c @@ -101,6 +101,8 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, "shutting down TX queue %d\n", tx_queue->queue); + tx_queue->initialised = false; + if (!tx_queue->buffer) return; diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c index d937af18973e..0c68c7f8056d 100644 --- a/drivers/net/ethernet/smsc/smsc9420.c +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -1585,7 +1585,7 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev->netdev_ops = &smsc9420_netdev_ops; dev->ethtool_ops = &smsc9420_ethtool_ops; - netif_napi_add(dev, &pd->napi, smsc9420_rx_poll, NAPI_WEIGHT); + netif_napi_add(dev, &pd->napi, smsc9420_rx_poll, NAPI_POLL_WEIGHT); result = register_netdev(dev); if (result) { diff --git a/drivers/net/ethernet/smsc/smsc9420.h b/drivers/net/ethernet/smsc/smsc9420.h index 409e82b2018a..876410a256c6 100644 --- a/drivers/net/ethernet/smsc/smsc9420.h +++ b/drivers/net/ethernet/smsc/smsc9420.h @@ -15,7 +15,6 @@ /* interrupt deassertion in multiples of 10us */ #define INT_DEAS_TIME (50) -#define NAPI_WEIGHT (64) #define SMSC_BAR (3) #ifdef __BIG_ENDIAN diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c index cd478d2cd871..00f6d347eaf7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c +++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c @@ -57,10 +57,6 @@ #define TSE_PCS_USE_SGMII_ENA BIT(0) #define TSE_PCS_IF_USE_SGMII 0x03 -#define SGMII_ADAPTER_CTRL_REG 0x00 -#define SGMII_ADAPTER_DISABLE 0x0001 -#define SGMII_ADAPTER_ENABLE 0x0000 - #define AUTONEGO_LINK_TIMER 20 static int tse_pcs_reset(void __iomem *base, struct tse_pcs *pcs) @@ -202,12 +198,8 @@ void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev, unsigned int speed) { void __iomem *tse_pcs_base = pcs->tse_pcs_base; - void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base; u32 val; - writew(SGMII_ADAPTER_ENABLE, - sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); - pcs->autoneg = phy_dev->autoneg; if (phy_dev->autoneg == AUTONEG_ENABLE) { diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h index 442812c0a4bd..694ac25ef426 100644 --- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h +++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h @@ -10,6 +10,10 @@ #include <linux/phy.h> #include <linux/timer.h> +#define SGMII_ADAPTER_CTRL_REG 0x00 +#define SGMII_ADAPTER_ENABLE 0x0000 +#define SGMII_ADAPTER_DISABLE 0x0001 + struct tse_pcs { struct device *dev; void __iomem *tse_pcs_base; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c index 84651207a1de..bd52fb7cf486 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c @@ -197,9 +197,9 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev) } if (of_machine_is_compatible("fsl,imx8mp")) { - /* Binding doc describes the propety: + /* Binding doc describes the property: is required by i.MX8MP. - is optinoal for i.MX8DXL. + is optional for i.MX8DXL. */ dwmac->intf_regmap = syscon_regmap_lookup_by_phandle(np, "intf_mode"); if (IS_ERR(dwmac->intf_regmap)) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index ecf759ee1c9f..017dbbda0c1c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -205,7 +205,7 @@ static const struct pci_device_id loongson_dwmac_id_table[] = { }; MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table); -struct pci_driver loongson_dwmac_driver = { +static struct pci_driver loongson_dwmac_driver = { .name = "dwmac-loongson-pci", .id_table = loongson_dwmac_id_table, .probe = loongson_dwmac_probe, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index b7c2579c963b..6b447d8f0bd8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -18,9 +18,6 @@ #include "altr_tse_pcs.h" -#define SGMII_ADAPTER_CTRL_REG 0x00 -#define SGMII_ADAPTER_DISABLE 0x0001 - #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2 @@ -62,14 +59,13 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed) { struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv; void __iomem *splitter_base = dwmac->splitter_base; - void __iomem *tse_pcs_base = dwmac->pcs.tse_pcs_base; void __iomem *sgmii_adapter_base = dwmac->pcs.sgmii_adapter_base; struct device *dev = dwmac->dev; struct net_device *ndev = dev_get_drvdata(dev); struct phy_device *phy_dev = ndev->phydev; u32 val; - if ((tse_pcs_base) && (sgmii_adapter_base)) + if (sgmii_adapter_base) writew(SGMII_ADAPTER_DISABLE, sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); @@ -93,8 +89,11 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed) writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG); } - if (tse_pcs_base && sgmii_adapter_base) + if (phy_dev && sgmii_adapter_base) { + writew(SGMII_ADAPTER_ENABLE, + sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed); + } } static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index 22fea0f67245..92d32940aff0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -71,9 +71,9 @@ static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec) writel(value, ioaddr + PTP_TCR); /* wait for present system time initialize to complete */ - return readl_poll_timeout(ioaddr + PTP_TCR, value, + return readl_poll_timeout_atomic(ioaddr + PTP_TCR, value, !(value & PTP_TCR_TSINIT), - 10000, 100000); + 10, 100000); } static int config_addend(void __iomem *ioaddr, u32 addend) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 4a4b3651ab3e..7c834c02e084 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3643,11 +3643,9 @@ static int stmmac_open(struct net_device *dev) u32 chan; int ret; - ret = pm_runtime_get_sync(priv->device); - if (ret < 0) { - pm_runtime_put_noidle(priv->device); + ret = pm_runtime_resume_and_get(priv->device); + if (ret < 0) return ret; - } if (priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI && @@ -5886,11 +5884,9 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr) struct stmmac_priv *priv = netdev_priv(ndev); int ret = 0; - ret = pm_runtime_get_sync(priv->device); - if (ret < 0) { - pm_runtime_put_noidle(priv->device); + ret = pm_runtime_resume_and_get(priv->device); + if (ret < 0) return ret; - } ret = eth_mac_addr(ndev, addr); if (ret) @@ -6220,11 +6216,9 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi bool is_double = false; int ret; - ret = pm_runtime_get_sync(priv->device); - if (ret < 0) { - pm_runtime_put_noidle(priv->device); + ret = pm_runtime_resume_and_get(priv->device); + if (ret < 0) return ret; - } if (be16_to_cpu(proto) == ETH_P_8021AD) is_double = true; @@ -6565,7 +6559,7 @@ int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags) return -ENETDOWN; if (!stmmac_xdp_is_enabled(priv)) - return -ENXIO; + return -EINVAL; if (queue >= priv->plat->rx_queues_to_use || queue >= priv->plat->tx_queues_to_use) @@ -6576,7 +6570,7 @@ int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags) ch = &priv->channel[queue]; if (!rx_q->xsk_pool && !tx_q->xsk_pool) - return -ENXIO; + return -EINVAL; if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) { /* EQoS does not have per-DMA channel SW interrupt, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index a5d150c5f3d8..9bc625fccca0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -88,11 +88,9 @@ static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) u32 tmp, addr, value = MII_XGMAC_BUSY; int ret; - ret = pm_runtime_get_sync(priv->device); - if (ret < 0) { - pm_runtime_put_noidle(priv->device); + ret = pm_runtime_resume_and_get(priv->device); + if (ret < 0) return ret; - } /* Wait until any existing MII operation is complete */ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, @@ -156,11 +154,9 @@ static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr, u32 addr, tmp, value = MII_XGMAC_BUSY; int ret; - ret = pm_runtime_get_sync(priv->device); - if (ret < 0) { - pm_runtime_put_noidle(priv->device); + ret = pm_runtime_resume_and_get(priv->device); + if (ret < 0) return ret; - } /* Wait until any existing MII operation is complete */ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, @@ -229,11 +225,9 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) int data = 0; u32 v; - data = pm_runtime_get_sync(priv->device); - if (data < 0) { - pm_runtime_put_noidle(priv->device); + data = pm_runtime_resume_and_get(priv->device); + if (data < 0) return data; - } value |= (phyaddr << priv->hw->mii.addr_shift) & priv->hw->mii.addr_mask; @@ -297,11 +291,9 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, u32 value = MII_BUSY; u32 v; - ret = pm_runtime_get_sync(priv->device); - if (ret < 0) { - pm_runtime_put_noidle(priv->device); + ret = pm_runtime_resume_and_get(priv->device); + if (ret < 0) return ret; - } value |= (phyaddr << priv->hw->mii.addr_shift) & priv->hw->mii.addr_mask; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 5d29f336315b..11e1055e8260 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -431,8 +431,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) plat->phylink_node = np; /* Get max speed of operation from device tree */ - if (of_property_read_u32(np, "max-speed", &plat->max_speed)) - plat->max_speed = -1; + of_property_read_u32(np, "max-speed", &plat->max_speed); plat->bus_id = of_alias_get_id(np, "ethernet"); if (plat->bus_id < 0) diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index affcf92cd3aa..fb30bc5d56cb 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -94,6 +94,7 @@ config TI_K3_AM65_CPSW_NUSS depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER select NET_DEVLINK select TI_DAVINCI_MDIO + select PHYLINK imply PHY_TI_GMII_SEL depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS help diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c index 72acdf802258..abc1e4276cf0 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c +++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c @@ -380,11 +380,9 @@ static int am65_cpsw_ethtool_op_begin(struct net_device *ndev) struct am65_cpsw_common *common = am65_ndev_to_common(ndev); int ret; - ret = pm_runtime_get_sync(common->dev); - if (ret < 0) { + ret = pm_runtime_resume_and_get(common->dev); + if (ret < 0) dev_err(common->dev, "ethtool begin failed %d\n", ret); - pm_runtime_put_noidle(common->dev); - } return ret; } diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index d2747e9db286..b7ebd741f284 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -173,11 +173,9 @@ static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev, if (!netif_running(ndev) || !vid) return 0; - ret = pm_runtime_get_sync(common->dev); - if (ret < 0) { - pm_runtime_put_noidle(common->dev); + ret = pm_runtime_resume_and_get(common->dev); + if (ret < 0) return ret; - } port_mask = BIT(port->port_id) | ALE_PORT_HOST; if (!vid) @@ -203,11 +201,9 @@ static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev, if (!netif_running(ndev) || !vid) return 0; - ret = pm_runtime_get_sync(common->dev); - if (ret < 0) { - pm_runtime_put_noidle(common->dev); + ret = pm_runtime_resume_and_get(common->dev); + if (ret < 0) return ret; - } dev_info(common->dev, "Removing vlan %d from vlan filter\n", vid); ret = cpsw_ale_del_vlan(common->ale, vid, @@ -557,11 +553,9 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev) struct am65_cpsw_port *port = am65_ndev_to_port(ndev); int ret, i; - ret = pm_runtime_get_sync(common->dev); - if (ret < 0) { - pm_runtime_put_noidle(common->dev); + ret = pm_runtime_resume_and_get(common->dev); + if (ret < 0) return ret; - } /* Notify the stack of the actual queue counts. */ ret = netif_set_real_num_tx_queues(ndev, common->tx_ch_num); @@ -1214,11 +1208,9 @@ static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev, if (ret < 0) return ret; - ret = pm_runtime_get_sync(common->dev); - if (ret < 0) { - pm_runtime_put_noidle(common->dev); + ret = pm_runtime_resume_and_get(common->dev); + if (ret < 0) return ret; - } cpsw_ale_del_ucast(common->ale, ndev->dev_addr, HOST_PORT_NUM, 0, 0); @@ -2692,9 +2684,8 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) common->bus_freq = clk_get_rate(clk); pm_runtime_enable(dev); - ret = pm_runtime_get_sync(dev); + ret = pm_runtime_resume_and_get(dev); if (ret < 0) { - pm_runtime_put_noidle(dev); pm_runtime_disable(dev); return ret; } @@ -2789,11 +2780,9 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev) common = dev_get_drvdata(dev); - ret = pm_runtime_get_sync(&pdev->dev); - if (ret < 0) { - pm_runtime_put_noidle(&pdev->dev); + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret < 0) return ret; - } am65_cpsw_nuss_phylink_cleanup(common); am65_cpsw_unregister_devlink(common); diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c index ebcc6386cc34..aa32dd905e2b 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-qos.c +++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c @@ -8,10 +8,12 @@ #include <linux/pm_runtime.h> #include <linux/time.h> +#include <net/pkt_cls.h> #include "am65-cpsw-nuss.h" #include "am65-cpsw-qos.h" #include "am65-cpts.h" +#include "cpsw_ale.h" #define AM65_CPSW_REG_CTL 0x004 #define AM65_CPSW_PN_REG_CTL 0x004 @@ -588,12 +590,190 @@ static int am65_cpsw_setup_taprio(struct net_device *ndev, void *type_data) return am65_cpsw_set_taprio(ndev, type_data); } +static int am65_cpsw_qos_clsflower_add_policer(struct am65_cpsw_port *port, + struct netlink_ext_ack *extack, + struct flow_cls_offload *cls, + u64 rate_pkt_ps) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; + static const u8 mc_mac[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00}; + struct am65_cpsw_qos *qos = &port->qos; + struct flow_match_eth_addrs match; + int ret; + + if (dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) { + NL_SET_ERR_MSG_MOD(extack, + "Unsupported keys used"); + return -EOPNOTSUPP; + } + + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address"); + return -EOPNOTSUPP; + } + + flow_rule_match_eth_addrs(rule, &match); + + if (!is_zero_ether_addr(match.mask->src)) { + NL_SET_ERR_MSG_MOD(extack, + "Matching on source MAC not supported"); + return -EOPNOTSUPP; + } + + if (is_broadcast_ether_addr(match.key->dst) && + is_broadcast_ether_addr(match.mask->dst)) { + ret = cpsw_ale_rx_ratelimit_bc(port->common->ale, port->port_id, rate_pkt_ps); + if (ret) + return ret; + + qos->ale_bc_ratelimit.cookie = cls->cookie; + qos->ale_bc_ratelimit.rate_packet_ps = rate_pkt_ps; + } else if (ether_addr_equal_unaligned(match.key->dst, mc_mac) && + ether_addr_equal_unaligned(match.mask->dst, mc_mac)) { + ret = cpsw_ale_rx_ratelimit_mc(port->common->ale, port->port_id, rate_pkt_ps); + if (ret) + return ret; + + qos->ale_mc_ratelimit.cookie = cls->cookie; + qos->ale_mc_ratelimit.rate_packet_ps = rate_pkt_ps; + } else { + NL_SET_ERR_MSG_MOD(extack, "Not supported matching key"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int am65_cpsw_qos_clsflower_policer_validate(const struct flow_action *action, + const struct flow_action_entry *act, + struct netlink_ext_ack *extack) +{ + if (act->police.exceed.act_id != FLOW_ACTION_DROP) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when exceed action is not drop"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && + act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is not pipe or ok"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && + !flow_action_is_last_entry(action, act)) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is ok, but action is not last"); + return -EOPNOTSUPP; + } + + if (act->police.rate_bytes_ps || act->police.peakrate_bytes_ps || + act->police.avrate || act->police.overhead) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when bytes per second/peakrate/avrate/overhead is configured"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int am65_cpsw_qos_configure_clsflower(struct am65_cpsw_port *port, + struct flow_cls_offload *cls) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct netlink_ext_ack *extack = cls->common.extack; + const struct flow_action_entry *act; + int i, ret; + + flow_action_for_each(i, act, &rule->action) { + switch (act->id) { + case FLOW_ACTION_POLICE: + ret = am65_cpsw_qos_clsflower_policer_validate(&rule->action, act, extack); + if (ret) + return ret; + + return am65_cpsw_qos_clsflower_add_policer(port, extack, cls, + act->police.rate_pkt_ps); + default: + NL_SET_ERR_MSG_MOD(extack, + "Action not supported"); + return -EOPNOTSUPP; + } + } + return -EOPNOTSUPP; +} + +static int am65_cpsw_qos_delete_clsflower(struct am65_cpsw_port *port, struct flow_cls_offload *cls) +{ + struct am65_cpsw_qos *qos = &port->qos; + + if (cls->cookie == qos->ale_bc_ratelimit.cookie) { + qos->ale_bc_ratelimit.cookie = 0; + qos->ale_bc_ratelimit.rate_packet_ps = 0; + cpsw_ale_rx_ratelimit_bc(port->common->ale, port->port_id, 0); + } + + if (cls->cookie == qos->ale_mc_ratelimit.cookie) { + qos->ale_mc_ratelimit.cookie = 0; + qos->ale_mc_ratelimit.rate_packet_ps = 0; + cpsw_ale_rx_ratelimit_mc(port->common->ale, port->port_id, 0); + } + + return 0; +} + +static int am65_cpsw_qos_setup_tc_clsflower(struct am65_cpsw_port *port, + struct flow_cls_offload *cls_flower) +{ + switch (cls_flower->command) { + case FLOW_CLS_REPLACE: + return am65_cpsw_qos_configure_clsflower(port, cls_flower); + case FLOW_CLS_DESTROY: + return am65_cpsw_qos_delete_clsflower(port, cls_flower); + default: + return -EOPNOTSUPP; + } +} + +static int am65_cpsw_qos_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) +{ + struct am65_cpsw_port *port = cb_priv; + + if (!tc_cls_can_offload_and_chain0(port->ndev, type_data)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return am65_cpsw_qos_setup_tc_clsflower(port, type_data); + default: + return -EOPNOTSUPP; + } +} + +static LIST_HEAD(am65_cpsw_qos_block_cb_list); + +static int am65_cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f) +{ + struct am65_cpsw_port *port = am65_ndev_to_port(ndev); + + return flow_block_cb_setup_simple(f, &am65_cpsw_qos_block_cb_list, + am65_cpsw_qos_setup_tc_block_cb, + port, port, true); +} + int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type, void *type_data) { switch (type) { case TC_SETUP_QDISC_TAPRIO: return am65_cpsw_setup_taprio(ndev, type_data); + case TC_SETUP_BLOCK: + return am65_cpsw_qos_setup_tc_block(ndev, type_data); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.h b/drivers/net/ethernet/ti/am65-cpsw-qos.h index e8f1b6b59e93..fb223b43b196 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-qos.h +++ b/drivers/net/ethernet/ti/am65-cpsw-qos.h @@ -14,11 +14,19 @@ struct am65_cpsw_est { struct tc_taprio_qopt_offload taprio; }; +struct am65_cpsw_ale_ratelimit { + unsigned long cookie; + u64 rate_packet_ps; +}; + struct am65_cpsw_qos { struct am65_cpsw_est *est_admin; struct am65_cpsw_est *est_oper; ktime_t link_down_time; int link_speed; + + struct am65_cpsw_ale_ratelimit ale_bc_ratelimit; + struct am65_cpsw_ale_ratelimit ale_mc_ratelimit; }; int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type, diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 03575c017500..662435e36805 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -335,7 +335,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) static unsigned int cpsw_rxbuf_total_len(unsigned int len) { - len += CPSW_HEADROOM; + len += CPSW_HEADROOM_NA; len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); return SKB_DATA_ALIGN(len); @@ -756,11 +756,9 @@ static int cpsw_ndo_open(struct net_device *ndev) int ret; u32 reg; - ret = pm_runtime_get_sync(cpsw->dev); - if (ret < 0) { - pm_runtime_put_noidle(cpsw->dev); + ret = pm_runtime_resume_and_get(cpsw->dev); + if (ret < 0) return ret; - } netif_carrier_off(ndev); @@ -968,11 +966,9 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - ret = pm_runtime_get_sync(cpsw->dev); - if (ret < 0) { - pm_runtime_put_noidle(cpsw->dev); + ret = pm_runtime_resume_and_get(cpsw->dev); + if (ret < 0) return ret; - } if (cpsw->data.dual_emac) { vid = cpsw->slaves[priv->emac_port].port_vlan; @@ -1052,11 +1048,9 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, if (vid == cpsw->data.default_vlan) return 0; - ret = pm_runtime_get_sync(cpsw->dev); - if (ret < 0) { - pm_runtime_put_noidle(cpsw->dev); + ret = pm_runtime_resume_and_get(cpsw->dev); + if (ret < 0) return ret; - } if (cpsw->data.dual_emac) { /* In dual EMAC, reserved VLAN id should not be used for @@ -1090,11 +1084,9 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, if (vid == cpsw->data.default_vlan) return 0; - ret = pm_runtime_get_sync(cpsw->dev); - if (ret < 0) { - pm_runtime_put_noidle(cpsw->dev); + ret = pm_runtime_resume_and_get(cpsw->dev); + if (ret < 0) return ret; - } if (cpsw->data.dual_emac) { int i; @@ -1567,11 +1559,9 @@ static int cpsw_probe(struct platform_device *pdev) /* Need to enable clocks with runtime PM api to access module * registers */ - ret = pm_runtime_get_sync(dev); - if (ret < 0) { - pm_runtime_put_noidle(dev); + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) goto clean_runtime_disable_ret; - } ret = cpsw_probe_dt(&cpsw->data, pdev); if (ret) @@ -1649,10 +1639,10 @@ static int cpsw_probe(struct platform_device *pdev) ndev->ethtool_ops = &cpsw_ethtool_ops; netif_napi_add(ndev, &cpsw->napi_rx, cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll, - CPSW_POLL_WEIGHT); + NAPI_POLL_WEIGHT); netif_tx_napi_add(ndev, &cpsw->napi_tx, cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll, - CPSW_POLL_WEIGHT); + NAPI_POLL_WEIGHT); /* register the network device */ SET_NETDEV_DEV(ndev, dev); @@ -1734,11 +1724,9 @@ static int cpsw_remove(struct platform_device *pdev) struct cpsw_common *cpsw = platform_get_drvdata(pdev); int i, ret; - ret = pm_runtime_get_sync(&pdev->dev); - if (ret < 0) { - pm_runtime_put_noidle(&pdev->dev); + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret < 0) return ret; - } for (i = 0; i < cpsw->data.slaves; i++) if (cpsw->slaves[i].ndev) diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 1ef0aaef5c61..231370e9a801 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -50,6 +50,8 @@ /* ALE_AGING_TIMER */ #define ALE_AGING_TIMER_MASK GENMASK(23, 0) +#define ALE_RATE_LIMIT_MIN_PPS 1000 + /** * struct ale_entry_fld - The ALE tbl entry field description * @start_bit: field start bit @@ -1136,6 +1138,50 @@ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control) return tmp & BITMASK(info->bits); } +int cpsw_ale_rx_ratelimit_mc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps) + +{ + int val = ratelimit_pps / ALE_RATE_LIMIT_MIN_PPS; + u32 remainder = ratelimit_pps % ALE_RATE_LIMIT_MIN_PPS; + + if (ratelimit_pps && !val) { + dev_err(ale->params.dev, "ALE MC port:%d ratelimit min value 1000pps\n", port); + return -EINVAL; + } + + if (remainder) + dev_info(ale->params.dev, "ALE port:%d MC ratelimit set to %dpps (requested %d)\n", + port, ratelimit_pps - remainder, ratelimit_pps); + + cpsw_ale_control_set(ale, port, ALE_PORT_MCAST_LIMIT, val); + + dev_dbg(ale->params.dev, "ALE port:%d MC ratelimit set %d\n", + port, val * ALE_RATE_LIMIT_MIN_PPS); + return 0; +} + +int cpsw_ale_rx_ratelimit_bc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps) + +{ + int val = ratelimit_pps / ALE_RATE_LIMIT_MIN_PPS; + u32 remainder = ratelimit_pps % ALE_RATE_LIMIT_MIN_PPS; + + if (ratelimit_pps && !val) { + dev_err(ale->params.dev, "ALE port:%d BC ratelimit min value 1000pps\n", port); + return -EINVAL; + } + + if (remainder) + dev_info(ale->params.dev, "ALE port:%d BC ratelimit set to %dpps (requested %d)\n", + port, ratelimit_pps - remainder, ratelimit_pps); + + cpsw_ale_control_set(ale, port, ALE_PORT_BCAST_LIMIT, val); + + dev_dbg(ale->params.dev, "ALE port:%d BC ratelimit set %d\n", + port, val * ALE_RATE_LIMIT_MIN_PPS); + return 0; +} + static void cpsw_ale_timer(struct timer_list *t) { struct cpsw_ale *ale = from_timer(ale, t, timer); @@ -1199,6 +1245,26 @@ static void cpsw_ale_aging_stop(struct cpsw_ale *ale) void cpsw_ale_start(struct cpsw_ale *ale) { + unsigned long ale_prescale; + + /* configure Broadcast and Multicast Rate Limit + * number_of_packets = (Fclk / ALE_PRESCALE) * port.BCAST/MCAST_LIMIT + * ALE_PRESCALE width is 19bit and min value 0x10 + * port.BCAST/MCAST_LIMIT is 8bit + * + * For multi port configuration support the ALE_PRESCALE is configured to 1ms interval, + * which allows to configure port.BCAST/MCAST_LIMIT per port and achieve: + * min number_of_packets = 1000 when port.BCAST/MCAST_LIMIT = 1 + * max number_of_packets = 1000 * 255 = 255000 when port.BCAST/MCAST_LIMIT = 0xFF + */ + ale_prescale = ale->params.bus_freq / ALE_RATE_LIMIT_MIN_PPS; + writel((u32)ale_prescale, ale->params.ale_regs + ALE_PRESCALE); + + /* Allow MC/BC rate limiting globally. + * The actual Rate Limit cfg enabled per-port by port.BCAST/MCAST_LIMIT + */ + cpsw_ale_control_set(ale, 0, ALE_RATE_LIMIT, 1); + cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1); cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1); diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h index 13fe47687fde..aba4572cfa3b 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.h +++ b/drivers/net/ethernet/ti/cpsw_ale.h @@ -120,6 +120,8 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, int reg_mcast, int unreg_mcast); int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port); void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti, int port); +int cpsw_ale_rx_ratelimit_bc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps); +int cpsw_ale_rx_ratelimit_mc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps); int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control); int cpsw_ale_control_set(struct cpsw_ale *ale, int port, diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index bd4b1528cf99..b33781ed760e 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -273,7 +273,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) static unsigned int cpsw_rxbuf_total_len(unsigned int len) { - len += CPSW_HEADROOM; + len += CPSW_HEADROOM_NA; len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); return SKB_DATA_ALIGN(len); @@ -449,11 +449,9 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, if (vid == cpsw->data.default_vlan) return 0; - ret = pm_runtime_get_sync(cpsw->dev); - if (ret < 0) { - pm_runtime_put_noidle(cpsw->dev); + ret = pm_runtime_resume_and_get(cpsw->dev); + if (ret < 0) return ret; - } /* In dual EMAC, reserved VLAN id should not be used for * creating VLAN interfaces as this can break the dual @@ -498,6 +496,8 @@ static void cpsw_restore(struct cpsw_priv *priv) /* restore CBS offload */ cpsw_cbs_resume(&cpsw->slaves[priv->emac_port - 1], priv); + + cpsw_qos_clsflower_resume(priv); } static void cpsw_init_stp_ale_entry(struct cpsw_common *cpsw) @@ -829,11 +829,9 @@ static int cpsw_ndo_open(struct net_device *ndev) dev_info(priv->dev, "starting ndev. mode: %s\n", cpsw_is_switch_en(cpsw) ? "switch" : "dual_mac"); - ret = pm_runtime_get_sync(cpsw->dev); - if (ret < 0) { - pm_runtime_put_noidle(cpsw->dev); + ret = pm_runtime_resume_and_get(cpsw->dev); + if (ret < 0) return ret; - } /* Notify the stack of the actual queue counts. */ ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num); @@ -985,11 +983,9 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - ret = pm_runtime_get_sync(cpsw->dev); - if (ret < 0) { - pm_runtime_put_noidle(cpsw->dev); + ret = pm_runtime_resume_and_get(cpsw->dev); + if (ret < 0) return ret; - } vid = cpsw->slaves[slave_no].port_vlan; flags = ALE_VLAN | ALE_SECURE; @@ -1024,11 +1020,9 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, if (vid == cpsw->data.default_vlan) return 0; - ret = pm_runtime_get_sync(cpsw->dev); - if (ret < 0) { - pm_runtime_put_noidle(cpsw->dev); + ret = pm_runtime_resume_and_get(cpsw->dev); + if (ret < 0) return ret; - } /* reset the return code as pm_runtime_get_sync() can return * non zero values as well. @@ -1407,7 +1401,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw) cpsw->slaves[i].ndev = ndev; ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | - NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL; + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC; ndev->netdev_ops = &cpsw_netdev_ops; ndev->ethtool_ops = &cpsw_ethtool_ops; @@ -1422,11 +1416,11 @@ static int cpsw_create_ports(struct cpsw_common *cpsw) netif_napi_add(ndev, &cpsw->napi_rx, cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll, - CPSW_POLL_WEIGHT); + NAPI_POLL_WEIGHT); netif_tx_napi_add(ndev, &cpsw->napi_tx, cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll, - CPSW_POLL_WEIGHT); + NAPI_POLL_WEIGHT); } napi_ndev = ndev; @@ -1918,9 +1912,8 @@ static int cpsw_probe(struct platform_device *pdev) /* Need to enable clocks with runtime PM api to access module * registers */ - ret = pm_runtime_get_sync(dev); + ret = pm_runtime_resume_and_get(dev); if (ret < 0) { - pm_runtime_put_noidle(dev); pm_runtime_disable(dev); return ret; } @@ -2045,11 +2038,9 @@ static int cpsw_remove(struct platform_device *pdev) struct cpsw_common *cpsw = platform_get_drvdata(pdev); int ret; - ret = pm_runtime_get_sync(&pdev->dev); - if (ret < 0) { - pm_runtime_put_noidle(&pdev->dev); + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret < 0) return ret; - } cpsw_unregister_notifiers(cpsw); cpsw_unregister_devlink(cpsw); diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c index 8f6817f346ba..758295c898ac 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.c +++ b/drivers/net/ethernet/ti/cpsw_priv.c @@ -364,7 +364,7 @@ void cpsw_split_res(struct cpsw_common *cpsw) if (cpsw->tx_ch_num == rlim_ch_num) { max_rate = consumed_rate; } else if (!rlim_ch_num) { - ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num; + ch_budget = NAPI_POLL_WEIGHT / cpsw->tx_ch_num; bigest_rate = 0; max_rate = consumed_rate; } else { @@ -379,19 +379,19 @@ void cpsw_split_res(struct cpsw_common *cpsw) if (max_rate < consumed_rate) max_rate *= 10; - ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate; - ch_budget = (CPSW_POLL_WEIGHT - ch_budget) / + ch_budget = (consumed_rate * NAPI_POLL_WEIGHT) / max_rate; + ch_budget = (NAPI_POLL_WEIGHT - ch_budget) / (cpsw->tx_ch_num - rlim_ch_num); bigest_rate = (max_rate - consumed_rate) / (cpsw->tx_ch_num - rlim_ch_num); } /* split tx weight/budget */ - budget = CPSW_POLL_WEIGHT; + budget = NAPI_POLL_WEIGHT; for (i = 0; i < cpsw->tx_ch_num; i++) { ch_rate = cpdma_chan_get_rate(txv[i].ch); if (ch_rate) { - txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate; + txv[i].budget = (ch_rate * NAPI_POLL_WEIGHT) / max_rate; if (!txv[i].budget) txv[i].budget++; if (ch_rate > bigest_rate) { @@ -417,7 +417,7 @@ void cpsw_split_res(struct cpsw_common *cpsw) txv[bigest_rate_ch].budget += budget; /* split rx budget */ - budget = CPSW_POLL_WEIGHT; + budget = NAPI_POLL_WEIGHT; ch_budget = budget / cpsw->rx_ch_num; for (i = 0; i < cpsw->rx_ch_num; i++) { cpsw->rxv[i].budget = ch_budget; @@ -502,6 +502,7 @@ int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs, ale_params.ale_ageout = ale_ageout; ale_params.ale_ports = CPSW_ALE_PORTS_NUM; ale_params.dev_id = "cpsw"; + ale_params.bus_freq = cpsw->bus_freq_mhz * 1000000; cpsw->ale = cpsw_ale_create(&ale_params); if (IS_ERR(cpsw->ale)) { @@ -754,11 +755,9 @@ int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate) return -EINVAL; } - ret = pm_runtime_get_sync(cpsw->dev); - if (ret < 0) { - pm_runtime_put_noidle(cpsw->dev); + ret = pm_runtime_resume_and_get(cpsw->dev); + if (ret < 0) return ret; - } ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate); pm_runtime_put(cpsw->dev); @@ -970,11 +969,9 @@ static int cpsw_set_cbs(struct net_device *ndev, return -1; } - ret = pm_runtime_get_sync(cpsw->dev); - if (ret < 0) { - pm_runtime_put_noidle(cpsw->dev); + ret = pm_runtime_resume_and_get(cpsw->dev); + if (ret < 0) return ret; - } bw = qopt->enable ? qopt->idleslope : 0; ret = cpsw_set_fifo_rlimit(priv, fifo, bw); @@ -1008,11 +1005,9 @@ static int cpsw_set_mqprio(struct net_device *ndev, void *type_data) if (mqprio->mode != TC_MQPRIO_MODE_DCB) return -EINVAL; - ret = pm_runtime_get_sync(cpsw->dev); - if (ret < 0) { - pm_runtime_put_noidle(cpsw->dev); + ret = pm_runtime_resume_and_get(cpsw->dev); + if (ret < 0) return ret; - } if (num_tc) { for (i = 0; i < 8; i++) { @@ -1048,6 +1043,8 @@ static int cpsw_set_mqprio(struct net_device *ndev, void *type_data) return 0; } +static int cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f); + int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type, void *type_data) { @@ -1058,6 +1055,9 @@ int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type, case TC_SETUP_QDISC_MQPRIO: return cpsw_set_mqprio(ndev, type_data); + case TC_SETUP_BLOCK: + return cpsw_qos_setup_tc_block(ndev, type_data); + default: return -EOPNOTSUPP; } @@ -1381,3 +1381,202 @@ drop: page_pool_recycle_direct(cpsw->page_pool[ch], page); return ret; } + +static int cpsw_qos_clsflower_add_policer(struct cpsw_priv *priv, + struct netlink_ext_ack *extack, + struct flow_cls_offload *cls, + u64 rate_pkt_ps) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; + static const u8 mc_mac[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00}; + struct flow_match_eth_addrs match; + u32 port_id; + int ret; + + if (dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) { + NL_SET_ERR_MSG_MOD(extack, + "Unsupported keys used"); + return -EOPNOTSUPP; + } + + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address"); + return -EOPNOTSUPP; + } + + flow_rule_match_eth_addrs(rule, &match); + + if (!is_zero_ether_addr(match.mask->src)) { + NL_SET_ERR_MSG_MOD(extack, + "Matching on source MAC not supported"); + return -EOPNOTSUPP; + } + + port_id = cpsw_slave_index(priv->cpsw, priv) + 1; + + if (is_broadcast_ether_addr(match.key->dst) && + is_broadcast_ether_addr(match.mask->dst)) { + ret = cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id, rate_pkt_ps); + if (ret) + return ret; + + priv->ale_bc_ratelimit.cookie = cls->cookie; + priv->ale_bc_ratelimit.rate_packet_ps = rate_pkt_ps; + } else if (ether_addr_equal_unaligned(match.key->dst, mc_mac) && + ether_addr_equal_unaligned(match.mask->dst, mc_mac)) { + ret = cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id, rate_pkt_ps); + if (ret) + return ret; + + priv->ale_mc_ratelimit.cookie = cls->cookie; + priv->ale_mc_ratelimit.rate_packet_ps = rate_pkt_ps; + } else { + NL_SET_ERR_MSG_MOD(extack, "Not supported matching key"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int cpsw_qos_clsflower_policer_validate(const struct flow_action *action, + const struct flow_action_entry *act, + struct netlink_ext_ack *extack) +{ + if (act->police.exceed.act_id != FLOW_ACTION_DROP) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when exceed action is not drop"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && + act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is not pipe or ok"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && + !flow_action_is_last_entry(action, act)) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is ok, but action is not last"); + return -EOPNOTSUPP; + } + + if (act->police.rate_bytes_ps || act->police.peakrate_bytes_ps || + act->police.avrate || act->police.overhead) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when bytes per second/peakrate/avrate/overhead is configured"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int cpsw_qos_configure_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct netlink_ext_ack *extack = cls->common.extack; + const struct flow_action_entry *act; + int i, ret; + + flow_action_for_each(i, act, &rule->action) { + switch (act->id) { + case FLOW_ACTION_POLICE: + ret = cpsw_qos_clsflower_policer_validate(&rule->action, act, extack); + if (ret) + return ret; + + return cpsw_qos_clsflower_add_policer(priv, extack, cls, + act->police.rate_pkt_ps); + default: + NL_SET_ERR_MSG_MOD(extack, "Action not supported"); + return -EOPNOTSUPP; + } + } + return -EOPNOTSUPP; +} + +static int cpsw_qos_delete_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls) +{ + u32 port_id = cpsw_slave_index(priv->cpsw, priv) + 1; + + if (cls->cookie == priv->ale_bc_ratelimit.cookie) { + priv->ale_bc_ratelimit.cookie = 0; + priv->ale_bc_ratelimit.rate_packet_ps = 0; + cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id, 0); + } + + if (cls->cookie == priv->ale_mc_ratelimit.cookie) { + priv->ale_mc_ratelimit.cookie = 0; + priv->ale_mc_ratelimit.rate_packet_ps = 0; + cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id, 0); + } + + return 0; +} + +static int cpsw_qos_setup_tc_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls_flower) +{ + switch (cls_flower->command) { + case FLOW_CLS_REPLACE: + return cpsw_qos_configure_clsflower(priv, cls_flower); + case FLOW_CLS_DESTROY: + return cpsw_qos_delete_clsflower(priv, cls_flower); + default: + return -EOPNOTSUPP; + } +} + +static int cpsw_qos_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) +{ + struct cpsw_priv *priv = cb_priv; + int ret; + + if (!tc_cls_can_offload_and_chain0(priv->ndev, type_data)) + return -EOPNOTSUPP; + + ret = pm_runtime_get_sync(priv->dev); + if (ret < 0) { + pm_runtime_put_noidle(priv->dev); + return ret; + } + + switch (type) { + case TC_SETUP_CLSFLOWER: + ret = cpsw_qos_setup_tc_clsflower(priv, type_data); + break; + default: + ret = -EOPNOTSUPP; + } + + pm_runtime_put(priv->dev); + return ret; +} + +static LIST_HEAD(cpsw_qos_block_cb_list); + +static int cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + + return flow_block_cb_setup_simple(f, &cpsw_qos_block_cb_list, + cpsw_qos_setup_tc_block_cb, + priv, priv, true); +} + +void cpsw_qos_clsflower_resume(struct cpsw_priv *priv) +{ + u32 port_id = cpsw_slave_index(priv->cpsw, priv) + 1; + + if (priv->ale_bc_ratelimit.cookie) + cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id, + priv->ale_bc_ratelimit.rate_packet_ps); + + if (priv->ale_mc_ratelimit.cookie) + cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id, + priv->ale_mc_ratelimit.rate_packet_ps); +} diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h index 74555970730c..34230145ca0b 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.h +++ b/drivers/net/ethernet/ti/cpsw_priv.h @@ -89,7 +89,6 @@ do { \ #define CPDMA_TXCP 0x40 #define CPDMA_RXCP 0x60 -#define CPSW_POLL_WEIGHT 64 #define CPSW_RX_VLAN_ENCAP_HDR_SIZE 4 #define CPSW_MIN_PACKET_SIZE_VLAN (VLAN_ETH_ZLEN) #define CPSW_MIN_PACKET_SIZE (ETH_ZLEN) @@ -364,6 +363,11 @@ struct cpsw_common { u8 base_mac[ETH_ALEN]; }; +struct cpsw_ale_ratelimit { + unsigned long cookie; + u64 rate_packet_ps; +}; + struct cpsw_priv { struct net_device *ndev; struct device *dev; @@ -384,6 +388,8 @@ struct cpsw_priv { struct cpsw_common *cpsw; int offload_fwd_mark; u32 tx_packet_min; + struct cpsw_ale_ratelimit ale_bc_ratelimit; + struct cpsw_ale_ratelimit ale_mc_ratelimit; }; #define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw) @@ -411,7 +417,6 @@ struct __aligned(sizeof(long)) cpsw_meta_xdp { /* The buf includes headroom compatible with both skb and xdpf */ #define CPSW_HEADROOM_NA (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + NET_IP_ALIGN) -#define CPSW_HEADROOM ALIGN(CPSW_HEADROOM_NA, sizeof(long)) static inline int cpsw_is_xdpf_handle(void *handle) { @@ -462,6 +467,7 @@ int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type, bool cpsw_shp_is_off(struct cpsw_priv *priv); void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv); void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv); +void cpsw_qos_clsflower_resume(struct cpsw_priv *priv); /* ethtool */ u32 cpsw_get_msglevel(struct net_device *ndev); diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 4b6aed78d392..2a3e4e842fa5 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -113,7 +113,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1"; #define EMAC_DEF_RX_NUM_DESC (128) #define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */ #define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */ -#define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */ /* Buffer descriptor parameters */ #define EMAC_DEF_TX_MAX_SERVICE (32) /* TX max service BD's */ @@ -1422,9 +1421,8 @@ static int emac_dev_open(struct net_device *ndev) struct phy_device *phydev = NULL; struct device *phy = NULL; - ret = pm_runtime_get_sync(&priv->pdev->dev); + ret = pm_runtime_resume_and_get(&priv->pdev->dev); if (ret < 0) { - pm_runtime_put_noidle(&priv->pdev->dev); dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n", __func__, ret); return ret; @@ -1661,9 +1659,8 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev) u32 stats_clear_mask; int err; - err = pm_runtime_get_sync(&priv->pdev->dev); + err = pm_runtime_resume_and_get(&priv->pdev->dev); if (err < 0) { - pm_runtime_put_noidle(&priv->pdev->dev); dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n", __func__, err); return &ndev->stats; @@ -1951,12 +1948,11 @@ static int davinci_emac_probe(struct platform_device *pdev) ndev->netdev_ops = &emac_netdev_ops; ndev->ethtool_ops = ðtool_ops; - netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT); + netif_napi_add(ndev, &priv->napi, emac_poll, NAPI_POLL_WEIGHT); pm_runtime_enable(&pdev->dev); - rc = pm_runtime_get_sync(&pdev->dev); + rc = pm_runtime_resume_and_get(&pdev->dev); if (rc < 0) { - pm_runtime_put_noidle(&pdev->dev); dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n", __func__, rc); goto err_napi_del; diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index fce2626e34fa..ea3772618043 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -134,11 +134,9 @@ static int davinci_mdio_reset(struct mii_bus *bus) u32 phy_mask, ver; int ret; - ret = pm_runtime_get_sync(data->dev); - if (ret < 0) { - pm_runtime_put_noidle(data->dev); + ret = pm_runtime_resume_and_get(data->dev); + if (ret < 0) return ret; - } /* wait for scan logic to settle */ msleep(PHY_MAX_ADDR * data->access_time); @@ -232,11 +230,9 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg) if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK) return -EINVAL; - ret = pm_runtime_get_sync(data->dev); - if (ret < 0) { - pm_runtime_put_noidle(data->dev); + ret = pm_runtime_resume_and_get(data->dev); + if (ret < 0) return ret; - } reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) | (phy_id << 16)); @@ -276,11 +272,9 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id, if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK) return -EINVAL; - ret = pm_runtime_get_sync(data->dev); - if (ret < 0) { - pm_runtime_put_noidle(data->dev); + ret = pm_runtime_resume_and_get(data->dev); + if (ret < 0) return ret; - } reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) | (phy_id << 16) | (phy_data & USERACCESS_DATA)); diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 16507bff652a..21b0e961eab5 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -24,7 +24,6 @@ #include "netcp.h" #define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD) -#define NETCP_NAPI_WEIGHT 64 #define NETCP_TX_TIMEOUT (5 * HZ) #define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN) #define NETCP_MIN_PACKET_SIZE ETH_ZLEN @@ -2096,8 +2095,8 @@ static int netcp_create_interface(struct netcp_device *netcp_device, } /* NAPI register */ - netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll, NETCP_NAPI_WEIGHT); - netif_tx_napi_add(ndev, &netcp->tx_napi, netcp_tx_poll, NETCP_NAPI_WEIGHT); + netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll, NAPI_POLL_WEIGHT); + netif_tx_napi_add(ndev, &netcp->tx_napi, netcp_tx_poll, NAPI_POLL_WEIGHT); /* Register the network device */ ndev->dev_id = 0; diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index f47b8358669d..c09cd961edbb 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -2270,7 +2270,7 @@ spider_net_setup_netdev(struct spider_net_card *card) timer_setup(&card->aneg_timer, spider_net_link_phy, 0); netif_napi_add(netdev, &card->napi, - spider_net_poll, SPIDER_NET_NAPI_WEIGHT); + spider_net_poll, NAPI_POLL_WEIGHT); spider_net_setup_netdev_ops(netdev); diff --git a/drivers/net/ethernet/toshiba/spider_net.h b/drivers/net/ethernet/toshiba/spider_net.h index 05b1a0736835..51948e2b3a34 100644 --- a/drivers/net/ethernet/toshiba/spider_net.h +++ b/drivers/net/ethernet/toshiba/spider_net.h @@ -44,7 +44,6 @@ extern char spider_net_driver_name[]; #define SPIDER_NET_RX_CSUM_DEFAULT 1 #define SPIDER_NET_WATCHDOG_TIMEOUT 50*HZ -#define SPIDER_NET_NAPI_WEIGHT 64 #define SPIDER_NET_FIRMWARE_SEQS 6 #define SPIDER_NET_FIRMWARE_SEQWORDS 1024 diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index be2b992f24d9..ff0c102cb578 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -2846,8 +2846,7 @@ static int velocity_probe(struct device *dev, int irq, netdev->netdev_ops = &velocity_netdev_ops; netdev->ethtool_ops = &velocity_ethtool_ops; - netif_napi_add(netdev, &vptr->napi, velocity_poll, - VELOCITY_NAPI_WEIGHT); + netif_napi_add(netdev, &vptr->napi, velocity_poll, NAPI_POLL_WEIGHT); netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_TX; diff --git a/drivers/net/ethernet/via/via-velocity.h b/drivers/net/ethernet/via/via-velocity.h index d3f960cc7c6e..c02a9654dce6 100644 --- a/drivers/net/ethernet/via/via-velocity.h +++ b/drivers/net/ethernet/via/via-velocity.h @@ -23,7 +23,6 @@ #define VELOCITY_VERSION "1.15" #define VELOCITY_IO_SIZE 256 -#define VELOCITY_NAPI_WEIGHT 64 #define PKT_BUF_SZ 1540 diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 869e362e09c1..3f6b9dfca095 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1515,7 +1515,7 @@ static int temac_probe(struct platform_device *pdev) of_node_put(dma_np); return PTR_ERR(lp->sdma_regs); } - if (of_get_property(dma_np, "little-endian", NULL)) { + if (of_property_read_bool(dma_np, "little-endian")) { lp->dma_in = temac_dma_in32_le; lp->dma_out = temac_dma_out32_le; } else { diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index 0f9c88dd1a4a..d5c1e5c4a508 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -433,8 +433,6 @@ struct axienet_local { struct net_device *ndev; struct device *dev; - struct device_node *phy_node; - struct phylink *phylink; struct phylink_config phylink_config; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index c7eb05e4a6bf..d6fc3f7acdf0 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -2064,25 +2064,33 @@ static int axienet_probe(struct platform_device *pdev) if (ret) goto cleanup_clk; - lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); - if (lp->phy_node) { - ret = axienet_mdio_setup(lp); - if (ret) - dev_warn(&pdev->dev, - "error registering MDIO bus: %d\n", ret); - } + ret = axienet_mdio_setup(lp); + if (ret) + dev_warn(&pdev->dev, + "error registering MDIO bus: %d\n", ret); + if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII || lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) { - if (!lp->phy_node) { - dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n"); + np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0); + if (!np) { + /* Deprecated: Always use "pcs-handle" for pcs_phy. + * Falling back to "phy-handle" here is only for + * backward compatibility with old device trees. + */ + np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); + } + if (!np) { + dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n"); ret = -EINVAL; goto cleanup_mdio; } - lp->pcs_phy = of_mdio_find_device(lp->phy_node); + lp->pcs_phy = of_mdio_find_device(np); if (!lp->pcs_phy) { ret = -EPROBE_DEFER; + of_node_put(np); goto cleanup_mdio; } + of_node_put(np); lp->pcs.ops = &axienet_pcs_ops; lp->pcs.poll = true; } @@ -2125,8 +2133,6 @@ cleanup_mdio: put_device(&lp->pcs_phy->dev); if (lp->mii_bus) axienet_mdio_teardown(lp); - of_node_put(lp->phy_node); - cleanup_clk: clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); clk_disable_unprepare(lp->axi_clk); @@ -2155,9 +2161,6 @@ static int axienet_remove(struct platform_device *pdev) clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); clk_disable_unprepare(lp->axi_clk); - of_node_put(lp->phy_node); - lp->phy_node = NULL; - free_netdev(ndev); return 0; diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 57a24f62e353..7a86ae82fcc1 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1,11 +1,10 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* - * Xilinx EmacLite Linux driver for the Xilinx Ethernet MAC Lite device. +/* Xilinx EmacLite Linux driver for the Xilinx Ethernet MAC Lite device. * * This is a new flat driver which is based on the original emac_lite * driver from John Williams <john.williams@xilinx.com>. * - * 2007 - 2013 (c) Xilinx, Inc. + * Copyright (c) 2007 - 2013 Xilinx, Inc. */ #include <linux/module.h> @@ -91,13 +90,7 @@ #define XEL_ARP_PACKET_SIZE 28 /* Max ARP packet size */ #define XEL_HEADER_IP_LENGTH_OFFSET 16 /* IP Length Offset */ - - #define TX_TIMEOUT (60 * HZ) /* Tx timeout is 60 seconds. */ -#define ALIGNMENT 4 - -/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */ -#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((uintptr_t)adr)) % ALIGNMENT) #ifdef __BIG_ENDIAN #define xemaclite_readl ioread32be @@ -124,7 +117,6 @@ * @last_link: last link status */ struct net_local { - struct net_device *ndev; bool tx_ping_pong; @@ -133,7 +125,7 @@ struct net_local { u32 next_rx_buf_to_use; void __iomem *base_addr; - spinlock_t reset_lock; + spinlock_t reset_lock; /* serialize xmit and tx_timeout execution */ struct sk_buff *deferred_skb; struct phy_device *phy_dev; @@ -144,7 +136,6 @@ struct net_local { int last_link; }; - /*************************/ /* EmacLite driver calls */ /*************************/ @@ -207,7 +198,7 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata) * address in the EmacLite device. */ static void xemaclite_aligned_write(const void *src_ptr, u32 *dest_ptr, - unsigned length) + unsigned int length) { const u16 *from_u16_ptr; u32 align_buffer; @@ -265,7 +256,7 @@ static void xemaclite_aligned_write(const void *src_ptr, u32 *dest_ptr, * to a 16-bit aligned buffer. */ static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr, - unsigned length) + unsigned int length) { u16 *to_u16_ptr, *from_u16_ptr; u32 *from_u32_ptr; @@ -330,7 +321,6 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK)) == 0) { - /* Switch to next buffer if configured */ if (drvdata->tx_ping_pong != 0) drvdata->next_tx_buf_to_use ^= XEL_BUFFER_OFFSET; @@ -346,8 +336,9 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK)) != 0) return -1; /* Buffers were full, return failure */ - } else + } else { return -1; /* Buffer was full, return failure */ + } /* Write the frame to the buffer */ xemaclite_aligned_write(data, (u32 __force *)addr, byte_count); @@ -423,7 +414,6 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen) * or an IP packet or an ARP packet */ if (proto_type > ETH_DATA_LEN) { - if (proto_type == ETH_P_IP) { length = ((ntohl(xemaclite_readl(addr + XEL_HEADER_IP_LENGTH_OFFSET + @@ -433,23 +423,25 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen) length = min_t(u16, length, ETH_DATA_LEN); length += ETH_HLEN + ETH_FCS_LEN; - } else if (proto_type == ETH_P_ARP) + } else if (proto_type == ETH_P_ARP) { length = XEL_ARP_PACKET_SIZE + ETH_HLEN + ETH_FCS_LEN; - else + } else { /* Field contains type other than IP or ARP, use max * frame size and let user parse it */ length = ETH_FRAME_LEN + ETH_FCS_LEN; - } else + } + } else { /* Use the length in the frame, plus the header and trailer */ length = proto_type + ETH_HLEN + ETH_FCS_LEN; + } if (WARN_ON(length > maxlen)) length = maxlen; /* Read from the EmacLite device */ xemaclite_aligned_read((u32 __force *)(addr + XEL_RXBUFF_OFFSET), - data, length); + data, length); /* Acknowledge the frame */ reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET); @@ -599,11 +591,10 @@ static void xemaclite_rx_handler(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); struct sk_buff *skb; - unsigned int align; u32 len; len = ETH_FRAME_LEN + ETH_FCS_LEN; - skb = netdev_alloc_skb(dev, len + ALIGNMENT); + skb = netdev_alloc_skb(dev, len + NET_IP_ALIGN); if (!skb) { /* Couldn't get memory. */ dev->stats.rx_dropped++; @@ -611,16 +602,7 @@ static void xemaclite_rx_handler(struct net_device *dev) return; } - /* A new skb should have the data halfword aligned, but this code is - * here just in case that isn't true. Calculate how many - * bytes we should reserve to get the data to start on a word - * boundary - */ - align = BUFFER_ALIGN(skb->data); - if (align) - skb_reserve(skb, align); - - skb_reserve(skb, 2); + skb_reserve(skb, NET_IP_ALIGN); len = xemaclite_recv_data(lp, (u8 *)skb->data, len); @@ -671,8 +653,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id) /* Check if the Transmission for the first buffer is completed */ tx_status = xemaclite_readl(base_addr + XEL_TSR_OFFSET); if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && - (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { - + (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; xemaclite_writel(tx_status, base_addr + XEL_TSR_OFFSET); @@ -682,8 +663,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id) /* Check if the Transmission for the second buffer is completed */ tx_status = xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && - (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { - + (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; xemaclite_writel(tx_status, base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); @@ -840,6 +820,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) of_address_to_resource(npp, 0, &res); if (lp->ndev->mem_start != res.start) { struct phy_device *phydev; + phydev = of_phy_find_device(lp->phy_node); if (!phydev) dev_info(dev, diff --git a/drivers/net/ethernet/xscale/ptp_ixp46x.c b/drivers/net/ethernet/xscale/ptp_ixp46x.c index 1f382777aa5a..9abbdb71e629 100644 --- a/drivers/net/ethernet/xscale/ptp_ixp46x.c +++ b/drivers/net/ethernet/xscale/ptp_ixp46x.c @@ -271,7 +271,7 @@ static int ptp_ixp_probe(struct platform_device *pdev) ixp_clock.master_irq = platform_get_irq(pdev, 0); ixp_clock.slave_irq = platform_get_irq(pdev, 1); if (IS_ERR(ixp_clock.regs) || - !ixp_clock.master_irq || !ixp_clock.slave_irq) + ixp_clock.master_irq < 0 || ixp_clock.slave_irq < 0) return -ENXIO; ixp_clock.caps = ptp_ixp_caps; diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 7db6c135ac6c..2495a5719e1c 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -533,14 +533,16 @@ static struct sk_buff *geneve_gro_receive(struct sock *sk, } } + skb_gro_pull(skb, gh_len); + skb_gro_postpull_rcsum(skb, gh, gh_len); type = gh->proto_type; + if (likely(type == htons(ETH_P_TEB))) + return call_gro_receive(eth_gro_receive, head, skb); ptype = gro_find_receive_by_type(type); if (!ptype) goto out; - skb_gro_pull(skb, gh_len); - skb_gro_postpull_rcsum(skb, gh, gh_len); pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); flush = 0; @@ -563,6 +565,10 @@ static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb, gh_len = geneve_hlen(gh); type = gh->proto_type; + /* since skb->encapsulation is set, eth_gro_complete() sets the inner mac header */ + if (likely(type == htons(ETH_P_TEB))) + return eth_gro_complete(skb, nhoff + gh_len); + ptype = gro_find_complete_by_type(type); if (ptype) err = ptype->callbacks.gro_complete(skb, nhoff + gh_len); diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig index 441da03c23ee..a9c44f08199d 100644 --- a/drivers/net/hamradio/Kconfig +++ b/drivers/net/hamradio/Kconfig @@ -45,40 +45,6 @@ config BPQETHER useful if some other computer on your local network has a direct amateur radio connection. -config DMASCC - tristate "High-speed (DMA) SCC driver for AX.25" - depends on ISA && AX25 && BROKEN_ON_SMP && ISA_DMA_API - depends on VIRT_TO_BUS - help - This is a driver for high-speed SCC boards, i.e. those supporting - DMA on one port. You usually use those boards to connect your - computer to an amateur radio modem (such as the WA4DSY 56kbps - modem), in order to send and receive AX.25 packet radio network - traffic. - - Currently, this driver supports Ottawa PI/PI2, Paccomm/Gracilis - PackeTwin, and S5SCC/DMA boards. They are detected automatically. - If you have one of these cards, say Y here and read the AX25-HOWTO, - available from <http://www.tldp.org/docs.html#howto>. - - This driver can operate multiple boards simultaneously. If you - compile it as a module (by saying M instead of Y), it will be called - dmascc. If you don't pass any parameter to the driver, all - possible I/O addresses are probed. This could irritate other devices - that are currently not in use. You may specify the list of addresses - to be probed by "dmascc.io=addr1,addr2,..." (when compiled into the - kernel image) or "io=addr1,addr2,..." (when loaded as a module). The - network interfaces will be called dmascc0 and dmascc1 for the board - detected first, dmascc2 and dmascc3 for the second one, and so on. - - Before you configure each interface with ifconfig, you MUST set - certain parameters, such as channel access timing, clock mode, and - DMA channel. This is accomplished with a small utility program, - dmascc_cfg, available at - <http://www.linux-ax25.org/wiki/Ax25-tools>. Please be sure to - get at least version 1.27 of dmascc_cfg, as older versions will not - work with the current driver. - config SCC tristate "Z8530 SCC driver" depends on ISA && AX25 && ISA_DMA_API diff --git a/drivers/net/hamradio/Makefile b/drivers/net/hamradio/Makefile index 7a1518d763e3..25fc400369ba 100644 --- a/drivers/net/hamradio/Makefile +++ b/drivers/net/hamradio/Makefile @@ -11,7 +11,6 @@ # Christoph Hellwig <hch@infradead.org> # -obj-$(CONFIG_DMASCC) += dmascc.o obj-$(CONFIG_SCC) += scc.o obj-$(CONFIG_MKISS) += mkiss.o obj-$(CONFIG_6PACK) += 6pack.o diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c deleted file mode 100644 index a2a12208e3ad..000000000000 --- a/drivers/net/hamradio/dmascc.c +++ /dev/null @@ -1,1450 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Driver for high-speed SCC boards (those with DMA support) - * Copyright (C) 1997-2000 Klaus Kudielka - * - * S5SCC/DMA support by Janko Koleznik S52HI - */ - - -#include <linux/module.h> -#include <linux/bitops.h> -#include <linux/delay.h> -#include <linux/errno.h> -#include <linux/if_arp.h> -#include <linux/in.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/netdevice.h> -#include <linux/slab.h> -#include <linux/rtnetlink.h> -#include <linux/sockios.h> -#include <linux/workqueue.h> -#include <linux/atomic.h> -#include <asm/dma.h> -#include <asm/io.h> -#include <asm/irq.h> -#include <linux/uaccess.h> -#include <linux/jiffies.h> -#include <net/ax25.h> -#include "z8530.h" - - -/* Number of buffers per channel */ - -#define NUM_TX_BUF 2 /* NUM_TX_BUF >= 1 (min. 2 recommended) */ -#define NUM_RX_BUF 6 /* NUM_RX_BUF >= 1 (min. 2 recommended) */ -#define BUF_SIZE 1576 /* BUF_SIZE >= mtu + hard_header_len */ - - -/* Cards supported */ - -#define HW_PI { "Ottawa PI", 0x300, 0x20, 0x10, 8, \ - 0, 8, 1843200, 3686400 } -#define HW_PI2 { "Ottawa PI2", 0x300, 0x20, 0x10, 8, \ - 0, 8, 3686400, 7372800 } -#define HW_TWIN { "Gracilis PackeTwin", 0x200, 0x10, 0x10, 32, \ - 0, 4, 6144000, 6144000 } -#define HW_S5 { "S5SCC/DMA", 0x200, 0x10, 0x10, 32, \ - 0, 8, 4915200, 9830400 } - -#define HARDWARE { HW_PI, HW_PI2, HW_TWIN, HW_S5 } - -#define TMR_0_HZ 25600 /* Frequency of timer 0 */ - -#define TYPE_PI 0 -#define TYPE_PI2 1 -#define TYPE_TWIN 2 -#define TYPE_S5 3 -#define NUM_TYPES 4 - -#define MAX_NUM_DEVS 32 - - -/* SCC chips supported */ - -#define Z8530 0 -#define Z85C30 1 -#define Z85230 2 - -#define CHIPNAMES { "Z8530", "Z85C30", "Z85230" } - - -/* I/O registers */ - -/* 8530 registers relative to card base */ -#define SCCB_CMD 0x00 -#define SCCB_DATA 0x01 -#define SCCA_CMD 0x02 -#define SCCA_DATA 0x03 - -/* 8253/8254 registers relative to card base */ -#define TMR_CNT0 0x00 -#define TMR_CNT1 0x01 -#define TMR_CNT2 0x02 -#define TMR_CTRL 0x03 - -/* Additional PI/PI2 registers relative to card base */ -#define PI_DREQ_MASK 0x04 - -/* Additional PackeTwin registers relative to card base */ -#define TWIN_INT_REG 0x08 -#define TWIN_CLR_TMR1 0x09 -#define TWIN_CLR_TMR2 0x0a -#define TWIN_SPARE_1 0x0b -#define TWIN_DMA_CFG 0x08 -#define TWIN_SERIAL_CFG 0x09 -#define TWIN_DMA_CLR_FF 0x0a -#define TWIN_SPARE_2 0x0b - - -/* PackeTwin I/O register values */ - -/* INT_REG */ -#define TWIN_SCC_MSK 0x01 -#define TWIN_TMR1_MSK 0x02 -#define TWIN_TMR2_MSK 0x04 -#define TWIN_INT_MSK 0x07 - -/* SERIAL_CFG */ -#define TWIN_DTRA_ON 0x01 -#define TWIN_DTRB_ON 0x02 -#define TWIN_EXTCLKA 0x04 -#define TWIN_EXTCLKB 0x08 -#define TWIN_LOOPA_ON 0x10 -#define TWIN_LOOPB_ON 0x20 -#define TWIN_EI 0x80 - -/* DMA_CFG */ -#define TWIN_DMA_HDX_T1 0x08 -#define TWIN_DMA_HDX_R1 0x0a -#define TWIN_DMA_HDX_T3 0x14 -#define TWIN_DMA_HDX_R3 0x16 -#define TWIN_DMA_FDX_T3R1 0x1b -#define TWIN_DMA_FDX_T1R3 0x1d - - -/* Status values */ - -#define IDLE 0 -#define TX_HEAD 1 -#define TX_DATA 2 -#define TX_PAUSE 3 -#define TX_TAIL 4 -#define RTS_OFF 5 -#define WAIT 6 -#define DCD_ON 7 -#define RX_ON 8 -#define DCD_OFF 9 - - -/* Ioctls */ - -#define SIOCGSCCPARAM SIOCDEVPRIVATE -#define SIOCSSCCPARAM (SIOCDEVPRIVATE+1) - - -/* Data types */ - -struct scc_param { - int pclk_hz; /* frequency of BRG input (don't change) */ - int brg_tc; /* BRG terminal count; BRG disabled if < 0 */ - int nrzi; /* 0 (nrz), 1 (nrzi) */ - int clocks; /* see dmascc_cfg documentation */ - int txdelay; /* [1/TMR_0_HZ] */ - int txtimeout; /* [1/HZ] */ - int txtail; /* [1/TMR_0_HZ] */ - int waittime; /* [1/TMR_0_HZ] */ - int slottime; /* [1/TMR_0_HZ] */ - int persist; /* 1 ... 256 */ - int dma; /* -1 (disable), 0, 1, 3 */ - int txpause; /* [1/TMR_0_HZ] */ - int rtsoff; /* [1/TMR_0_HZ] */ - int dcdon; /* [1/TMR_0_HZ] */ - int dcdoff; /* [1/TMR_0_HZ] */ -}; - -struct scc_hardware { - char *name; - int io_region; - int io_delta; - int io_size; - int num_devs; - int scc_offset; - int tmr_offset; - int tmr_hz; - int pclk_hz; -}; - -struct scc_priv { - int type; - int chip; - struct net_device *dev; - struct scc_info *info; - - int channel; - int card_base, scc_cmd, scc_data; - int tmr_cnt, tmr_ctrl, tmr_mode; - struct scc_param param; - char rx_buf[NUM_RX_BUF][BUF_SIZE]; - int rx_len[NUM_RX_BUF]; - int rx_ptr; - struct work_struct rx_work; - int rx_head, rx_tail, rx_count; - int rx_over; - char tx_buf[NUM_TX_BUF][BUF_SIZE]; - int tx_len[NUM_TX_BUF]; - int tx_ptr; - int tx_head, tx_tail, tx_count; - int state; - unsigned long tx_start; - int rr0; - spinlock_t *register_lock; /* Per scc_info */ - spinlock_t ring_lock; -}; - -struct scc_info { - int irq_used; - int twin_serial_cfg; - struct net_device *dev[2]; - struct scc_priv priv[2]; - struct scc_info *next; - spinlock_t register_lock; /* Per device register lock */ -}; - - -/* Function declarations */ -static int setup_adapter(int card_base, int type, int n) __init; - -static void write_scc(struct scc_priv *priv, int reg, int val); -static void write_scc_data(struct scc_priv *priv, int val, int fast); -static int read_scc(struct scc_priv *priv, int reg); -static int read_scc_data(struct scc_priv *priv); - -static int scc_open(struct net_device *dev); -static int scc_close(struct net_device *dev); -static int scc_siocdevprivate(struct net_device *dev, struct ifreq *ifr, - void __user *data, int cmd); -static int scc_send_packet(struct sk_buff *skb, struct net_device *dev); -static int scc_set_mac_address(struct net_device *dev, void *sa); - -static inline void tx_on(struct scc_priv *priv); -static inline void rx_on(struct scc_priv *priv); -static inline void rx_off(struct scc_priv *priv); -static void start_timer(struct scc_priv *priv, int t, int r15); -static inline unsigned char random(void); - -static inline void z8530_isr(struct scc_info *info); -static irqreturn_t scc_isr(int irq, void *dev_id); -static void rx_isr(struct scc_priv *priv); -static void special_condition(struct scc_priv *priv, int rc); -static void rx_bh(struct work_struct *); -static void tx_isr(struct scc_priv *priv); -static void es_isr(struct scc_priv *priv); -static void tm_isr(struct scc_priv *priv); - - -/* Initialization variables */ - -static int io[MAX_NUM_DEVS] __initdata = { 0, }; - -/* Beware! hw[] is also used in dmascc_exit(). */ -static struct scc_hardware hw[NUM_TYPES] = HARDWARE; - - -/* Global variables */ - -static struct scc_info *first; -static unsigned long rand; - - -MODULE_AUTHOR("Klaus Kudielka"); -MODULE_DESCRIPTION("Driver for high-speed SCC boards"); -module_param_hw_array(io, int, ioport, NULL, 0); -MODULE_LICENSE("GPL"); - -static void __exit dmascc_exit(void) -{ - int i; - struct scc_info *info; - - while (first) { - info = first; - - /* Unregister devices */ - for (i = 0; i < 2; i++) - unregister_netdev(info->dev[i]); - - /* Reset board */ - if (info->priv[0].type == TYPE_TWIN) - outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG); - write_scc(&info->priv[0], R9, FHWRES); - release_region(info->dev[0]->base_addr, - hw[info->priv[0].type].io_size); - - for (i = 0; i < 2; i++) - free_netdev(info->dev[i]); - - /* Free memory */ - first = info->next; - kfree(info); - } -} - -static int __init dmascc_init(void) -{ - int h, i, j, n; - int base[MAX_NUM_DEVS], tcmd[MAX_NUM_DEVS], t0[MAX_NUM_DEVS], - t1[MAX_NUM_DEVS]; - unsigned t_val; - unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS], - counting[MAX_NUM_DEVS]; - - /* Initialize random number generator */ - rand = jiffies; - /* Cards found = 0 */ - n = 0; - /* Warning message */ - if (!io[0]) - printk(KERN_INFO "dmascc: autoprobing (dangerous)\n"); - - /* Run autodetection for each card type */ - for (h = 0; h < NUM_TYPES; h++) { - - if (io[0]) { - /* User-specified I/O address regions */ - for (i = 0; i < hw[h].num_devs; i++) - base[i] = 0; - for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) { - j = (io[i] - - hw[h].io_region) / hw[h].io_delta; - if (j >= 0 && j < hw[h].num_devs && - hw[h].io_region + - j * hw[h].io_delta == io[i]) { - base[j] = io[i]; - } - } - } else { - /* Default I/O address regions */ - for (i = 0; i < hw[h].num_devs; i++) { - base[i] = - hw[h].io_region + i * hw[h].io_delta; - } - } - - /* Check valid I/O address regions */ - for (i = 0; i < hw[h].num_devs; i++) - if (base[i]) { - if (!request_region - (base[i], hw[h].io_size, "dmascc")) - base[i] = 0; - else { - tcmd[i] = - base[i] + hw[h].tmr_offset + - TMR_CTRL; - t0[i] = - base[i] + hw[h].tmr_offset + - TMR_CNT0; - t1[i] = - base[i] + hw[h].tmr_offset + - TMR_CNT1; - } - } - - /* Start timers */ - for (i = 0; i < hw[h].num_devs; i++) - if (base[i]) { - /* Timer 0: LSB+MSB, Mode 3, TMR_0_HZ */ - outb(0x36, tcmd[i]); - outb((hw[h].tmr_hz / TMR_0_HZ) & 0xFF, - t0[i]); - outb((hw[h].tmr_hz / TMR_0_HZ) >> 8, - t0[i]); - /* Timer 1: LSB+MSB, Mode 0, HZ/10 */ - outb(0x70, tcmd[i]); - outb((TMR_0_HZ / HZ * 10) & 0xFF, t1[i]); - outb((TMR_0_HZ / HZ * 10) >> 8, t1[i]); - start[i] = jiffies; - delay[i] = 0; - counting[i] = 1; - /* Timer 2: LSB+MSB, Mode 0 */ - outb(0xb0, tcmd[i]); - } - time = jiffies; - /* Wait until counter registers are loaded */ - udelay(2000000 / TMR_0_HZ); - - /* Timing loop */ - while (time_is_after_jiffies(time + 13)) { - for (i = 0; i < hw[h].num_devs; i++) - if (base[i] && counting[i]) { - /* Read back Timer 1: latch; read LSB; read MSB */ - outb(0x40, tcmd[i]); - t_val = - inb(t1[i]) + (inb(t1[i]) << 8); - /* Also check whether counter did wrap */ - if (t_val == 0 || - t_val > TMR_0_HZ / HZ * 10) - counting[i] = 0; - delay[i] = jiffies - start[i]; - } - } - - /* Evaluate measurements */ - for (i = 0; i < hw[h].num_devs; i++) - if (base[i]) { - if ((delay[i] >= 9 && delay[i] <= 11) && - /* Ok, we have found an adapter */ - (setup_adapter(base[i], h, n) == 0)) - n++; - else - release_region(base[i], - hw[h].io_size); - } - - } /* NUM_TYPES */ - - /* If any adapter was successfully initialized, return ok */ - if (n) - return 0; - - /* If no adapter found, return error */ - printk(KERN_INFO "dmascc: no adapters found\n"); - return -EIO; -} - -module_init(dmascc_init); -module_exit(dmascc_exit); - -static void __init dev_setup(struct net_device *dev) -{ - dev->type = ARPHRD_AX25; - dev->hard_header_len = AX25_MAX_HEADER_LEN; - dev->mtu = 1500; - dev->addr_len = AX25_ADDR_LEN; - dev->tx_queue_len = 64; - memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); - dev_addr_set(dev, (u8 *)&ax25_defaddr); -} - -static const struct net_device_ops scc_netdev_ops = { - .ndo_open = scc_open, - .ndo_stop = scc_close, - .ndo_start_xmit = scc_send_packet, - .ndo_siocdevprivate = scc_siocdevprivate, - .ndo_set_mac_address = scc_set_mac_address, -}; - -static int __init setup_adapter(int card_base, int type, int n) -{ - int i, irq, chip, err; - struct scc_info *info; - struct net_device *dev; - struct scc_priv *priv; - unsigned long time; - unsigned int irqs; - int tmr_base = card_base + hw[type].tmr_offset; - int scc_base = card_base + hw[type].scc_offset; - char *chipnames[] = CHIPNAMES; - - /* Initialize what is necessary for write_scc and write_scc_data */ - info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA); - if (!info) { - err = -ENOMEM; - goto out; - } - - info->dev[0] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup); - if (!info->dev[0]) { - printk(KERN_ERR "dmascc: " - "could not allocate memory for %s at %#3x\n", - hw[type].name, card_base); - err = -ENOMEM; - goto out1; - } - - info->dev[1] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup); - if (!info->dev[1]) { - printk(KERN_ERR "dmascc: " - "could not allocate memory for %s at %#3x\n", - hw[type].name, card_base); - err = -ENOMEM; - goto out2; - } - spin_lock_init(&info->register_lock); - - priv = &info->priv[0]; - priv->type = type; - priv->card_base = card_base; - priv->scc_cmd = scc_base + SCCA_CMD; - priv->scc_data = scc_base + SCCA_DATA; - priv->register_lock = &info->register_lock; - - /* Reset SCC */ - write_scc(priv, R9, FHWRES | MIE | NV); - - /* Determine type of chip by enabling SDLC/HDLC enhancements */ - write_scc(priv, R15, SHDLCE); - if (!read_scc(priv, R15)) { - /* WR7' not present. This is an ordinary Z8530 SCC. */ - chip = Z8530; - } else { - /* Put one character in TX FIFO */ - write_scc_data(priv, 0, 0); - if (read_scc(priv, R0) & Tx_BUF_EMP) { - /* TX FIFO not full. This is a Z85230 ESCC with a 4-byte FIFO. */ - chip = Z85230; - } else { - /* TX FIFO full. This is a Z85C30 SCC with a 1-byte FIFO. */ - chip = Z85C30; - } - } - write_scc(priv, R15, 0); - - /* Start IRQ auto-detection */ - irqs = probe_irq_on(); - - /* Enable interrupts */ - if (type == TYPE_TWIN) { - outb(0, card_base + TWIN_DMA_CFG); - inb(card_base + TWIN_CLR_TMR1); - inb(card_base + TWIN_CLR_TMR2); - info->twin_serial_cfg = TWIN_EI; - outb(info->twin_serial_cfg, card_base + TWIN_SERIAL_CFG); - } else { - write_scc(priv, R15, CTSIE); - write_scc(priv, R0, RES_EXT_INT); - write_scc(priv, R1, EXT_INT_ENAB); - } - - /* Start timer */ - outb(1, tmr_base + TMR_CNT1); - outb(0, tmr_base + TMR_CNT1); - - /* Wait and detect IRQ */ - time = jiffies; - while (time_is_after_jiffies(time + 2 + HZ / TMR_0_HZ)); - irq = probe_irq_off(irqs); - - /* Clear pending interrupt, disable interrupts */ - if (type == TYPE_TWIN) { - inb(card_base + TWIN_CLR_TMR1); - } else { - write_scc(priv, R1, 0); - write_scc(priv, R15, 0); - write_scc(priv, R0, RES_EXT_INT); - } - - if (irq <= 0) { - printk(KERN_ERR - "dmascc: could not find irq of %s at %#3x (irq=%d)\n", - hw[type].name, card_base, irq); - err = -ENODEV; - goto out3; - } - - /* Set up data structures */ - for (i = 0; i < 2; i++) { - dev = info->dev[i]; - priv = &info->priv[i]; - priv->type = type; - priv->chip = chip; - priv->dev = dev; - priv->info = info; - priv->channel = i; - spin_lock_init(&priv->ring_lock); - priv->register_lock = &info->register_lock; - priv->card_base = card_base; - priv->scc_cmd = scc_base + (i ? SCCB_CMD : SCCA_CMD); - priv->scc_data = scc_base + (i ? SCCB_DATA : SCCA_DATA); - priv->tmr_cnt = tmr_base + (i ? TMR_CNT2 : TMR_CNT1); - priv->tmr_ctrl = tmr_base + TMR_CTRL; - priv->tmr_mode = i ? 0xb0 : 0x70; - priv->param.pclk_hz = hw[type].pclk_hz; - priv->param.brg_tc = -1; - priv->param.clocks = TCTRxCP | RCRTxCP; - priv->param.persist = 256; - priv->param.dma = -1; - INIT_WORK(&priv->rx_work, rx_bh); - dev->ml_priv = priv; - snprintf(dev->name, sizeof(dev->name), "dmascc%i", 2 * n + i); - dev->base_addr = card_base; - dev->irq = irq; - dev->netdev_ops = &scc_netdev_ops; - dev->header_ops = &ax25_header_ops; - } - if (register_netdev(info->dev[0])) { - printk(KERN_ERR "dmascc: could not register %s\n", - info->dev[0]->name); - err = -ENODEV; - goto out3; - } - if (register_netdev(info->dev[1])) { - printk(KERN_ERR "dmascc: could not register %s\n", - info->dev[1]->name); - err = -ENODEV; - goto out4; - } - - - info->next = first; - first = info; - printk(KERN_INFO "dmascc: found %s (%s) at %#3x, irq %d\n", - hw[type].name, chipnames[chip], card_base, irq); - return 0; - - out4: - unregister_netdev(info->dev[0]); - out3: - if (info->priv[0].type == TYPE_TWIN) - outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG); - write_scc(&info->priv[0], R9, FHWRES); - free_netdev(info->dev[1]); - out2: - free_netdev(info->dev[0]); - out1: - kfree(info); - out: - return err; -} - - -/* Driver functions */ - -static void write_scc(struct scc_priv *priv, int reg, int val) -{ - unsigned long flags; - switch (priv->type) { - case TYPE_S5: - if (reg) - outb(reg, priv->scc_cmd); - outb(val, priv->scc_cmd); - return; - case TYPE_TWIN: - if (reg) - outb_p(reg, priv->scc_cmd); - outb_p(val, priv->scc_cmd); - return; - default: - spin_lock_irqsave(priv->register_lock, flags); - outb_p(0, priv->card_base + PI_DREQ_MASK); - if (reg) - outb_p(reg, priv->scc_cmd); - outb_p(val, priv->scc_cmd); - outb(1, priv->card_base + PI_DREQ_MASK); - spin_unlock_irqrestore(priv->register_lock, flags); - return; - } -} - - -static void write_scc_data(struct scc_priv *priv, int val, int fast) -{ - unsigned long flags; - switch (priv->type) { - case TYPE_S5: - outb(val, priv->scc_data); - return; - case TYPE_TWIN: - outb_p(val, priv->scc_data); - return; - default: - if (fast) - outb_p(val, priv->scc_data); - else { - spin_lock_irqsave(priv->register_lock, flags); - outb_p(0, priv->card_base + PI_DREQ_MASK); - outb_p(val, priv->scc_data); - outb(1, priv->card_base + PI_DREQ_MASK); - spin_unlock_irqrestore(priv->register_lock, flags); - } - return; - } -} - - -static int read_scc(struct scc_priv *priv, int reg) -{ - int rc; - unsigned long flags; - switch (priv->type) { - case TYPE_S5: - if (reg) - outb(reg, priv->scc_cmd); - return inb(priv->scc_cmd); - case TYPE_TWIN: - if (reg) - outb_p(reg, priv->scc_cmd); - return inb_p(priv->scc_cmd); - default: - spin_lock_irqsave(priv->register_lock, flags); - outb_p(0, priv->card_base + PI_DREQ_MASK); - if (reg) - outb_p(reg, priv->scc_cmd); - rc = inb_p(priv->scc_cmd); - outb(1, priv->card_base + PI_DREQ_MASK); - spin_unlock_irqrestore(priv->register_lock, flags); - return rc; - } -} - - -static int read_scc_data(struct scc_priv *priv) -{ - int rc; - unsigned long flags; - switch (priv->type) { - case TYPE_S5: - return inb(priv->scc_data); - case TYPE_TWIN: - return inb_p(priv->scc_data); - default: - spin_lock_irqsave(priv->register_lock, flags); - outb_p(0, priv->card_base + PI_DREQ_MASK); - rc = inb_p(priv->scc_data); - outb(1, priv->card_base + PI_DREQ_MASK); - spin_unlock_irqrestore(priv->register_lock, flags); - return rc; - } -} - - -static int scc_open(struct net_device *dev) -{ - struct scc_priv *priv = dev->ml_priv; - struct scc_info *info = priv->info; - int card_base = priv->card_base; - - /* Request IRQ if not already used by other channel */ - if (!info->irq_used) { - if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) { - return -EAGAIN; - } - } - info->irq_used++; - - /* Request DMA if required */ - if (priv->param.dma >= 0) { - if (request_dma(priv->param.dma, "dmascc")) { - if (--info->irq_used == 0) - free_irq(dev->irq, info); - return -EAGAIN; - } else { - unsigned long flags = claim_dma_lock(); - clear_dma_ff(priv->param.dma); - release_dma_lock(flags); - } - } - - /* Initialize local variables */ - priv->rx_ptr = 0; - priv->rx_over = 0; - priv->rx_head = priv->rx_tail = priv->rx_count = 0; - priv->state = IDLE; - priv->tx_head = priv->tx_tail = priv->tx_count = 0; - priv->tx_ptr = 0; - - /* Reset channel */ - write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV); - /* X1 clock, SDLC mode */ - write_scc(priv, R4, SDLC | X1CLK); - /* DMA */ - write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN); - /* 8 bit RX char, RX disable */ - write_scc(priv, R3, Rx8); - /* 8 bit TX char, TX disable */ - write_scc(priv, R5, Tx8); - /* SDLC address field */ - write_scc(priv, R6, 0); - /* SDLC flag */ - write_scc(priv, R7, FLAG); - switch (priv->chip) { - case Z85C30: - /* Select WR7' */ - write_scc(priv, R15, SHDLCE); - /* Auto EOM reset */ - write_scc(priv, R7, AUTOEOM); - write_scc(priv, R15, 0); - break; - case Z85230: - /* Select WR7' */ - write_scc(priv, R15, SHDLCE); - /* The following bits are set (see 2.5.2.1): - - Automatic EOM reset - - Interrupt request if RX FIFO is half full - This bit should be ignored in DMA mode (according to the - documentation), but actually isn't. The receiver doesn't work if - it is set. Thus, we have to clear it in DMA mode. - - Interrupt/DMA request if TX FIFO is completely empty - a) If set, the ESCC behaves as if it had no TX FIFO (Z85C30 - compatibility). - b) If cleared, DMA requests may follow each other very quickly, - filling up the TX FIFO. - Advantage: TX works even in case of high bus latency. - Disadvantage: Edge-triggered DMA request circuitry may miss - a request. No more data is delivered, resulting - in a TX FIFO underrun. - Both PI2 and S5SCC/DMA seem to work fine with TXFIFOE cleared. - The PackeTwin doesn't. I don't know about the PI, but let's - assume it behaves like the PI2. - */ - if (priv->param.dma >= 0) { - if (priv->type == TYPE_TWIN) - write_scc(priv, R7, AUTOEOM | TXFIFOE); - else - write_scc(priv, R7, AUTOEOM); - } else { - write_scc(priv, R7, AUTOEOM | RXFIFOH); - } - write_scc(priv, R15, 0); - break; - } - /* Preset CRC, NRZ(I) encoding */ - write_scc(priv, R10, CRCPS | (priv->param.nrzi ? NRZI : NRZ)); - - /* Configure baud rate generator */ - if (priv->param.brg_tc >= 0) { - /* Program BR generator */ - write_scc(priv, R12, priv->param.brg_tc & 0xFF); - write_scc(priv, R13, (priv->param.brg_tc >> 8) & 0xFF); - /* BRG source = SYS CLK; enable BRG; DTR REQ function (required by - PackeTwin, not connected on the PI2); set DPLL source to BRG */ - write_scc(priv, R14, SSBR | DTRREQ | BRSRC | BRENABL); - /* Enable DPLL */ - write_scc(priv, R14, SEARCH | DTRREQ | BRSRC | BRENABL); - } else { - /* Disable BR generator */ - write_scc(priv, R14, DTRREQ | BRSRC); - } - - /* Configure clocks */ - if (priv->type == TYPE_TWIN) { - /* Disable external TX clock receiver */ - outb((info->twin_serial_cfg &= - ~(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)), - card_base + TWIN_SERIAL_CFG); - } - write_scc(priv, R11, priv->param.clocks); - if ((priv->type == TYPE_TWIN) && !(priv->param.clocks & TRxCOI)) { - /* Enable external TX clock receiver */ - outb((info->twin_serial_cfg |= - (priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)), - card_base + TWIN_SERIAL_CFG); - } - - /* Configure PackeTwin */ - if (priv->type == TYPE_TWIN) { - /* Assert DTR, enable interrupts */ - outb((info->twin_serial_cfg |= TWIN_EI | - (priv->channel ? TWIN_DTRB_ON : TWIN_DTRA_ON)), - card_base + TWIN_SERIAL_CFG); - } - - /* Read current status */ - priv->rr0 = read_scc(priv, R0); - /* Enable DCD interrupt */ - write_scc(priv, R15, DCDIE); - - netif_start_queue(dev); - - return 0; -} - - -static int scc_close(struct net_device *dev) -{ - struct scc_priv *priv = dev->ml_priv; - struct scc_info *info = priv->info; - int card_base = priv->card_base; - - netif_stop_queue(dev); - - if (priv->type == TYPE_TWIN) { - /* Drop DTR */ - outb((info->twin_serial_cfg &= - (priv->channel ? ~TWIN_DTRB_ON : ~TWIN_DTRA_ON)), - card_base + TWIN_SERIAL_CFG); - } - - /* Reset channel, free DMA and IRQ */ - write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV); - if (priv->param.dma >= 0) { - if (priv->type == TYPE_TWIN) - outb(0, card_base + TWIN_DMA_CFG); - free_dma(priv->param.dma); - } - if (--info->irq_used == 0) - free_irq(dev->irq, info); - - return 0; -} - - -static int scc_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd) -{ - struct scc_priv *priv = dev->ml_priv; - - switch (cmd) { - case SIOCGSCCPARAM: - if (copy_to_user(data, &priv->param, sizeof(struct scc_param))) - return -EFAULT; - return 0; - case SIOCSSCCPARAM: - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - if (netif_running(dev)) - return -EAGAIN; - if (copy_from_user(&priv->param, data, - sizeof(struct scc_param))) - return -EFAULT; - return 0; - default: - return -EOPNOTSUPP; - } -} - - -static int scc_send_packet(struct sk_buff *skb, struct net_device *dev) -{ - struct scc_priv *priv = dev->ml_priv; - unsigned long flags; - int i; - - if (skb->protocol == htons(ETH_P_IP)) - return ax25_ip_xmit(skb); - - /* Temporarily stop the scheduler feeding us packets */ - netif_stop_queue(dev); - - /* Transfer data to DMA buffer */ - i = priv->tx_head; - skb_copy_from_linear_data_offset(skb, 1, priv->tx_buf[i], skb->len - 1); - priv->tx_len[i] = skb->len - 1; - - /* Clear interrupts while we touch our circular buffers */ - - spin_lock_irqsave(&priv->ring_lock, flags); - /* Move the ring buffer's head */ - priv->tx_head = (i + 1) % NUM_TX_BUF; - priv->tx_count++; - - /* If we just filled up the last buffer, leave queue stopped. - The higher layers must wait until we have a DMA buffer - to accept the data. */ - if (priv->tx_count < NUM_TX_BUF) - netif_wake_queue(dev); - - /* Set new TX state */ - if (priv->state == IDLE) { - /* Assert RTS, start timer */ - priv->state = TX_HEAD; - priv->tx_start = jiffies; - write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8); - write_scc(priv, R15, 0); - start_timer(priv, priv->param.txdelay, 0); - } - - /* Turn interrupts back on and free buffer */ - spin_unlock_irqrestore(&priv->ring_lock, flags); - dev_kfree_skb(skb); - - return NETDEV_TX_OK; -} - - -static int scc_set_mac_address(struct net_device *dev, void *sa) -{ - dev_addr_set(dev, ((struct sockaddr *)sa)->sa_data); - return 0; -} - - -static inline void tx_on(struct scc_priv *priv) -{ - int i, n; - unsigned long flags; - - if (priv->param.dma >= 0) { - n = (priv->chip == Z85230) ? 3 : 1; - /* Program DMA controller */ - flags = claim_dma_lock(); - set_dma_mode(priv->param.dma, DMA_MODE_WRITE); - set_dma_addr(priv->param.dma, - virt_to_bus(priv->tx_buf[priv->tx_tail]) + n); - set_dma_count(priv->param.dma, - priv->tx_len[priv->tx_tail] - n); - release_dma_lock(flags); - /* Enable TX underrun interrupt */ - write_scc(priv, R15, TxUIE); - /* Configure DREQ */ - if (priv->type == TYPE_TWIN) - outb((priv->param.dma == - 1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3, - priv->card_base + TWIN_DMA_CFG); - else - write_scc(priv, R1, - EXT_INT_ENAB | WT_FN_RDYFN | - WT_RDY_ENAB); - /* Write first byte(s) */ - spin_lock_irqsave(priv->register_lock, flags); - for (i = 0; i < n; i++) - write_scc_data(priv, - priv->tx_buf[priv->tx_tail][i], 1); - enable_dma(priv->param.dma); - spin_unlock_irqrestore(priv->register_lock, flags); - } else { - write_scc(priv, R15, TxUIE); - write_scc(priv, R1, - EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB); - tx_isr(priv); - } - /* Reset EOM latch if we do not have the AUTOEOM feature */ - if (priv->chip == Z8530) - write_scc(priv, R0, RES_EOM_L); -} - - -static inline void rx_on(struct scc_priv *priv) -{ - unsigned long flags; - - /* Clear RX FIFO */ - while (read_scc(priv, R0) & Rx_CH_AV) - read_scc_data(priv); - priv->rx_over = 0; - if (priv->param.dma >= 0) { - /* Program DMA controller */ - flags = claim_dma_lock(); - set_dma_mode(priv->param.dma, DMA_MODE_READ); - set_dma_addr(priv->param.dma, - virt_to_bus(priv->rx_buf[priv->rx_head])); - set_dma_count(priv->param.dma, BUF_SIZE); - release_dma_lock(flags); - enable_dma(priv->param.dma); - /* Configure PackeTwin DMA */ - if (priv->type == TYPE_TWIN) { - outb((priv->param.dma == - 1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3, - priv->card_base + TWIN_DMA_CFG); - } - /* Sp. cond. intr. only, ext int enable, RX DMA enable */ - write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx | - WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB); - } else { - /* Reset current frame */ - priv->rx_ptr = 0; - /* Intr. on all Rx characters and Sp. cond., ext int enable */ - write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT | - WT_FN_RDYFN); - } - write_scc(priv, R0, ERR_RES); - write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB); -} - - -static inline void rx_off(struct scc_priv *priv) -{ - /* Disable receiver */ - write_scc(priv, R3, Rx8); - /* Disable DREQ / RX interrupt */ - if (priv->param.dma >= 0 && priv->type == TYPE_TWIN) - outb(0, priv->card_base + TWIN_DMA_CFG); - else - write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN); - /* Disable DMA */ - if (priv->param.dma >= 0) - disable_dma(priv->param.dma); -} - - -static void start_timer(struct scc_priv *priv, int t, int r15) -{ - outb(priv->tmr_mode, priv->tmr_ctrl); - if (t == 0) { - tm_isr(priv); - } else if (t > 0) { - outb(t & 0xFF, priv->tmr_cnt); - outb((t >> 8) & 0xFF, priv->tmr_cnt); - if (priv->type != TYPE_TWIN) { - write_scc(priv, R15, r15 | CTSIE); - priv->rr0 |= CTS; - } - } -} - - -static inline unsigned char random(void) -{ - /* See "Numerical Recipes in C", second edition, p. 284 */ - rand = rand * 1664525L + 1013904223L; - return (unsigned char) (rand >> 24); -} - -static inline void z8530_isr(struct scc_info *info) -{ - int is, i = 100; - - while ((is = read_scc(&info->priv[0], R3)) && i--) { - if (is & CHARxIP) { - rx_isr(&info->priv[0]); - } else if (is & CHATxIP) { - tx_isr(&info->priv[0]); - } else if (is & CHAEXT) { - es_isr(&info->priv[0]); - } else if (is & CHBRxIP) { - rx_isr(&info->priv[1]); - } else if (is & CHBTxIP) { - tx_isr(&info->priv[1]); - } else { - es_isr(&info->priv[1]); - } - write_scc(&info->priv[0], R0, RES_H_IUS); - i++; - } - if (i < 0) { - printk(KERN_ERR "dmascc: stuck in ISR with RR3=0x%02x.\n", - is); - } - /* Ok, no interrupts pending from this 8530. The INT line should - be inactive now. */ -} - - -static irqreturn_t scc_isr(int irq, void *dev_id) -{ - struct scc_info *info = dev_id; - - spin_lock(info->priv[0].register_lock); - /* At this point interrupts are enabled, and the interrupt under service - is already acknowledged, but masked off. - - Interrupt processing: We loop until we know that the IRQ line is - low. If another positive edge occurs afterwards during the ISR, - another interrupt will be triggered by the interrupt controller - as soon as the IRQ level is enabled again (see asm/irq.h). - - Bottom-half handlers will be processed after scc_isr(). This is - important, since we only have small ringbuffers and want new data - to be fetched/delivered immediately. */ - - if (info->priv[0].type == TYPE_TWIN) { - int is, card_base = info->priv[0].card_base; - while ((is = ~inb(card_base + TWIN_INT_REG)) & - TWIN_INT_MSK) { - if (is & TWIN_SCC_MSK) { - z8530_isr(info); - } else if (is & TWIN_TMR1_MSK) { - inb(card_base + TWIN_CLR_TMR1); - tm_isr(&info->priv[0]); - } else { - inb(card_base + TWIN_CLR_TMR2); - tm_isr(&info->priv[1]); - } - } - } else - z8530_isr(info); - spin_unlock(info->priv[0].register_lock); - return IRQ_HANDLED; -} - - -static void rx_isr(struct scc_priv *priv) -{ - if (priv->param.dma >= 0) { - /* Check special condition and perform error reset. See 2.4.7.5. */ - special_condition(priv, read_scc(priv, R1)); - write_scc(priv, R0, ERR_RES); - } else { - /* Check special condition for each character. Error reset not necessary. - Same algorithm for SCC and ESCC. See 2.4.7.1 and 2.4.7.4. */ - int rc; - while (read_scc(priv, R0) & Rx_CH_AV) { - rc = read_scc(priv, R1); - if (priv->rx_ptr < BUF_SIZE) - priv->rx_buf[priv->rx_head][priv-> - rx_ptr++] = - read_scc_data(priv); - else { - priv->rx_over = 2; - read_scc_data(priv); - } - special_condition(priv, rc); - } - } -} - - -static void special_condition(struct scc_priv *priv, int rc) -{ - int cb; - unsigned long flags; - - /* See Figure 2-15. Only overrun and EOF need to be checked. */ - - if (rc & Rx_OVR) { - /* Receiver overrun */ - priv->rx_over = 1; - if (priv->param.dma < 0) - write_scc(priv, R0, ERR_RES); - } else if (rc & END_FR) { - /* End of frame. Get byte count */ - if (priv->param.dma >= 0) { - flags = claim_dma_lock(); - cb = BUF_SIZE - get_dma_residue(priv->param.dma) - - 2; - release_dma_lock(flags); - } else { - cb = priv->rx_ptr - 2; - } - if (priv->rx_over) { - /* We had an overrun */ - priv->dev->stats.rx_errors++; - if (priv->rx_over == 2) - priv->dev->stats.rx_length_errors++; - else - priv->dev->stats.rx_fifo_errors++; - priv->rx_over = 0; - } else if (rc & CRC_ERR) { - /* Count invalid CRC only if packet length >= minimum */ - if (cb >= 15) { - priv->dev->stats.rx_errors++; - priv->dev->stats.rx_crc_errors++; - } - } else { - if (cb >= 15) { - if (priv->rx_count < NUM_RX_BUF - 1) { - /* Put good frame in FIFO */ - priv->rx_len[priv->rx_head] = cb; - priv->rx_head = - (priv->rx_head + - 1) % NUM_RX_BUF; - priv->rx_count++; - schedule_work(&priv->rx_work); - } else { - priv->dev->stats.rx_errors++; - priv->dev->stats.rx_over_errors++; - } - } - } - /* Get ready for new frame */ - if (priv->param.dma >= 0) { - flags = claim_dma_lock(); - set_dma_addr(priv->param.dma, - virt_to_bus(priv->rx_buf[priv->rx_head])); - set_dma_count(priv->param.dma, BUF_SIZE); - release_dma_lock(flags); - } else { - priv->rx_ptr = 0; - } - } -} - - -static void rx_bh(struct work_struct *ugli_api) -{ - struct scc_priv *priv = container_of(ugli_api, struct scc_priv, rx_work); - int i = priv->rx_tail; - int cb; - unsigned long flags; - struct sk_buff *skb; - unsigned char *data; - - spin_lock_irqsave(&priv->ring_lock, flags); - while (priv->rx_count) { - spin_unlock_irqrestore(&priv->ring_lock, flags); - cb = priv->rx_len[i]; - /* Allocate buffer */ - skb = dev_alloc_skb(cb + 1); - if (skb == NULL) { - /* Drop packet */ - priv->dev->stats.rx_dropped++; - } else { - /* Fill buffer */ - data = skb_put(skb, cb + 1); - data[0] = 0; - memcpy(&data[1], priv->rx_buf[i], cb); - skb->protocol = ax25_type_trans(skb, priv->dev); - netif_rx(skb); - priv->dev->stats.rx_packets++; - priv->dev->stats.rx_bytes += cb; - } - spin_lock_irqsave(&priv->ring_lock, flags); - /* Move tail */ - priv->rx_tail = i = (i + 1) % NUM_RX_BUF; - priv->rx_count--; - } - spin_unlock_irqrestore(&priv->ring_lock, flags); -} - - -static void tx_isr(struct scc_priv *priv) -{ - int i = priv->tx_tail, p = priv->tx_ptr; - - /* Suspend TX interrupts if we don't want to send anything. - See Figure 2-22. */ - if (p == priv->tx_len[i]) { - write_scc(priv, R0, RES_Tx_P); - return; - } - - /* Write characters */ - while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) { - write_scc_data(priv, priv->tx_buf[i][p++], 0); - } - - /* Reset EOM latch of Z8530 */ - if (!priv->tx_ptr && p && priv->chip == Z8530) - write_scc(priv, R0, RES_EOM_L); - - priv->tx_ptr = p; -} - - -static void es_isr(struct scc_priv *priv) -{ - int i, rr0, drr0, res; - unsigned long flags; - - /* Read status, reset interrupt bit (open latches) */ - rr0 = read_scc(priv, R0); - write_scc(priv, R0, RES_EXT_INT); - drr0 = priv->rr0 ^ rr0; - priv->rr0 = rr0; - - /* Transmit underrun (2.4.9.6). We can't check the TxEOM flag, since - it might have already been cleared again by AUTOEOM. */ - if (priv->state == TX_DATA) { - /* Get remaining bytes */ - i = priv->tx_tail; - if (priv->param.dma >= 0) { - disable_dma(priv->param.dma); - flags = claim_dma_lock(); - res = get_dma_residue(priv->param.dma); - release_dma_lock(flags); - } else { - res = priv->tx_len[i] - priv->tx_ptr; - priv->tx_ptr = 0; - } - /* Disable DREQ / TX interrupt */ - if (priv->param.dma >= 0 && priv->type == TYPE_TWIN) - outb(0, priv->card_base + TWIN_DMA_CFG); - else - write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN); - if (res) { - /* Update packet statistics */ - priv->dev->stats.tx_errors++; - priv->dev->stats.tx_fifo_errors++; - /* Other underrun interrupts may already be waiting */ - write_scc(priv, R0, RES_EXT_INT); - write_scc(priv, R0, RES_EXT_INT); - } else { - /* Update packet statistics */ - priv->dev->stats.tx_packets++; - priv->dev->stats.tx_bytes += priv->tx_len[i]; - /* Remove frame from FIFO */ - priv->tx_tail = (i + 1) % NUM_TX_BUF; - priv->tx_count--; - /* Inform upper layers */ - netif_wake_queue(priv->dev); - } - /* Switch state */ - write_scc(priv, R15, 0); - if (priv->tx_count && - time_is_after_jiffies(priv->tx_start + priv->param.txtimeout)) { - priv->state = TX_PAUSE; - start_timer(priv, priv->param.txpause, 0); - } else { - priv->state = TX_TAIL; - start_timer(priv, priv->param.txtail, 0); - } - } - - /* DCD transition */ - if (drr0 & DCD) { - if (rr0 & DCD) { - switch (priv->state) { - case IDLE: - case WAIT: - priv->state = DCD_ON; - write_scc(priv, R15, 0); - start_timer(priv, priv->param.dcdon, 0); - } - } else { - switch (priv->state) { - case RX_ON: - rx_off(priv); - priv->state = DCD_OFF; - write_scc(priv, R15, 0); - start_timer(priv, priv->param.dcdoff, 0); - } - } - } - - /* CTS transition */ - if ((drr0 & CTS) && (~rr0 & CTS) && priv->type != TYPE_TWIN) - tm_isr(priv); - -} - - -static void tm_isr(struct scc_priv *priv) -{ - switch (priv->state) { - case TX_HEAD: - case TX_PAUSE: - tx_on(priv); - priv->state = TX_DATA; - break; - case TX_TAIL: - write_scc(priv, R5, TxCRC_ENAB | Tx8); - priv->state = RTS_OFF; - if (priv->type != TYPE_TWIN) - write_scc(priv, R15, 0); - start_timer(priv, priv->param.rtsoff, 0); - break; - case RTS_OFF: - write_scc(priv, R15, DCDIE); - priv->rr0 = read_scc(priv, R0); - if (priv->rr0 & DCD) { - priv->dev->stats.collisions++; - rx_on(priv); - priv->state = RX_ON; - } else { - priv->state = WAIT; - start_timer(priv, priv->param.waittime, DCDIE); - } - break; - case WAIT: - if (priv->tx_count) { - priv->state = TX_HEAD; - priv->tx_start = jiffies; - write_scc(priv, R5, - TxCRC_ENAB | RTS | TxENAB | Tx8); - write_scc(priv, R15, 0); - start_timer(priv, priv->param.txdelay, 0); - } else { - priv->state = IDLE; - if (priv->type != TYPE_TWIN) - write_scc(priv, R15, DCDIE); - } - break; - case DCD_ON: - case DCD_OFF: - write_scc(priv, R15, DCDIE); - priv->rr0 = read_scc(priv, R0); - if (priv->rr0 & DCD) { - rx_on(priv); - priv->state = RX_ON; - } else { - priv->state = WAIT; - start_timer(priv, - random() / priv->param.persist * - priv->param.slottime, DCDIE); - } - break; - } -} diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c index 16105292b140..74e845fa2e07 100644 --- a/drivers/net/hippi/rrunner.c +++ b/drivers/net/hippi/rrunner.c @@ -1355,7 +1355,9 @@ static int rr_close(struct net_device *dev) rrpriv->fw_running = 0; + spin_unlock_irqrestore(&rrpriv->lock, flags); del_timer_sync(&rrpriv->timer); + spin_lock_irqsave(&rrpriv->lock, flags); writel(0, ®s->TxPi); writel(0, ®s->IpRxPi); diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index cf69da0e296c..25b38a374e3c 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -15,6 +15,7 @@ #include <linux/list.h> #include <linux/hyperv.h> #include <linux/rndis.h> +#include <linux/jhash.h> /* RSS related */ #define OID_GEN_RECEIVE_SCALE_CAPABILITIES 0x00010203 /* query only */ @@ -237,6 +238,7 @@ int netvsc_recv_callback(struct net_device *net, void netvsc_channel_cb(void *context); int netvsc_poll(struct napi_struct *napi, int budget); +void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev); u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan, struct xdp_buff *xdp); unsigned int netvsc_xdp_fraglen(unsigned int len); @@ -246,6 +248,8 @@ int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog, struct netvsc_device *nvdev); int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog); int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf); +int netvsc_ndoxdp_xmit(struct net_device *ndev, int n, + struct xdp_frame **frames, u32 flags); int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev, @@ -942,12 +946,21 @@ struct nvsc_rsc { #define NVSC_RSC_CSUM_INFO BIT(1) /* valid/present bit for 'csum_info' */ #define NVSC_RSC_HASH_INFO BIT(2) /* valid/present bit for 'hash_info' */ -struct netvsc_stats { +struct netvsc_stats_tx { + u64 packets; + u64 bytes; + u64 xdp_xmit; + struct u64_stats_sync syncp; +}; + +struct netvsc_stats_rx { u64 packets; u64 bytes; u64 broadcast; u64 multicast; u64 xdp_drop; + u64 xdp_redirect; + u64 xdp_tx; struct u64_stats_sync syncp; }; @@ -1046,6 +1059,55 @@ struct net_device_context { struct netvsc_device_info *saved_netvsc_dev_info; }; +/* Azure hosts don't support non-TCP port numbers in hashing for fragmented + * packets. We can use ethtool to change UDP hash level when necessary. + */ +static inline u32 netvsc_get_hash(struct sk_buff *skb, + const struct net_device_context *ndc) +{ + struct flow_keys flow; + u32 hash, pkt_proto = 0; + static u32 hashrnd __read_mostly; + + net_get_random_once(&hashrnd, sizeof(hashrnd)); + + if (!skb_flow_dissect_flow_keys(skb, &flow, 0)) + return 0; + + switch (flow.basic.ip_proto) { + case IPPROTO_TCP: + if (flow.basic.n_proto == htons(ETH_P_IP)) + pkt_proto = HV_TCP4_L4HASH; + else if (flow.basic.n_proto == htons(ETH_P_IPV6)) + pkt_proto = HV_TCP6_L4HASH; + + break; + + case IPPROTO_UDP: + if (flow.basic.n_proto == htons(ETH_P_IP)) + pkt_proto = HV_UDP4_L4HASH; + else if (flow.basic.n_proto == htons(ETH_P_IPV6)) + pkt_proto = HV_UDP6_L4HASH; + + break; + } + + if (pkt_proto & ndc->l4_hash) { + return skb_get_hash(skb); + } else { + if (flow.basic.n_proto == htons(ETH_P_IP)) + hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd); + else if (flow.basic.n_proto == htons(ETH_P_IPV6)) + hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd); + else + return 0; + + __skb_set_sw_hash(skb, hash, false); + } + + return hash; +} + /* Per channel data */ struct netvsc_channel { struct vmbus_channel *channel; @@ -1060,9 +1122,10 @@ struct netvsc_channel { struct bpf_prog __rcu *bpf_prog; struct xdp_rxq_info xdp_rxq; + bool xdp_flush; - struct netvsc_stats tx_stats; - struct netvsc_stats rx_stats; + struct netvsc_stats_tx tx_stats; + struct netvsc_stats_rx rx_stats; }; /* Per netvsc device */ diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 9442f751ad3a..6e42cb03e226 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -20,6 +20,7 @@ #include <linux/vmalloc.h> #include <linux/rtnetlink.h> #include <linux/prefetch.h> +#include <linux/filter.h> #include <asm/sync_bitops.h> #include <asm/mshyperv.h> @@ -792,9 +793,9 @@ static void netvsc_send_tx_complete(struct net_device *ndev, int queue_sends; u64 cmd_rqst; - cmd_rqst = channel->request_addr_callback(channel, (u64)desc->trans_id); + cmd_rqst = channel->request_addr_callback(channel, desc->trans_id); if (cmd_rqst == VMBUS_RQST_ERROR) { - netdev_err(ndev, "Incorrect transaction id\n"); + netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id); return; } @@ -805,7 +806,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev, struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)skb->cb; u32 send_index = packet->send_buf_index; - struct netvsc_stats *tx_stats; + struct netvsc_stats_tx *tx_stats; if (send_index != NETVSC_INVALID_INDEX) netvsc_free_send_slot(net_device, send_index); @@ -854,9 +855,9 @@ static void netvsc_send_completion(struct net_device *ndev, /* First check if this is a VMBUS completion without data payload */ if (!msglen) { cmd_rqst = incoming_channel->request_addr_callback(incoming_channel, - (u64)desc->trans_id); + desc->trans_id); if (cmd_rqst == VMBUS_RQST_ERROR) { - netdev_err(ndev, "Invalid transaction id\n"); + netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id); return; } @@ -1670,12 +1671,17 @@ int netvsc_poll(struct napi_struct *napi, int budget) if (!nvchan->desc) nvchan->desc = hv_pkt_iter_first(channel); + nvchan->xdp_flush = false; + while (nvchan->desc && work_done < budget) { work_done += netvsc_process_raw_pkt(device, nvchan, net_device, ndev, nvchan->desc, budget); nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc); } + if (nvchan->xdp_flush) + xdp_do_flush(); + /* Send any pending receive completions */ ret = send_recv_completions(ndev, net_device, nvchan); diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c index 7856905414eb..4a9522689fa4 100644 --- a/drivers/net/hyperv/netvsc_bpf.c +++ b/drivers/net/hyperv/netvsc_bpf.c @@ -10,6 +10,7 @@ #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> +#include <linux/netpoll.h> #include <linux/bpf.h> #include <linux/bpf_trace.h> #include <linux/kernel.h> @@ -23,11 +24,13 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan, struct xdp_buff *xdp) { + struct netvsc_stats_rx *rx_stats = &nvchan->rx_stats; void *data = nvchan->rsc.data[0]; u32 len = nvchan->rsc.len[0]; struct page *page = NULL; struct bpf_prog *prog; u32 act = XDP_PASS; + bool drop = true; xdp->data_hard_start = NULL; @@ -60,9 +63,34 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan, switch (act) { case XDP_PASS: case XDP_TX: + drop = false; + break; + case XDP_DROP: break; + case XDP_REDIRECT: + if (!xdp_do_redirect(ndev, xdp, prog)) { + nvchan->xdp_flush = true; + drop = false; + + u64_stats_update_begin(&rx_stats->syncp); + + rx_stats->xdp_redirect++; + rx_stats->packets++; + rx_stats->bytes += nvchan->rsc.pktlen; + + u64_stats_update_end(&rx_stats->syncp); + + break; + } else { + u64_stats_update_begin(&rx_stats->syncp); + rx_stats->xdp_drop++; + u64_stats_update_end(&rx_stats->syncp); + } + + fallthrough; + case XDP_ABORTED: trace_xdp_exception(ndev, prog, act); break; @@ -74,7 +102,7 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan, out: rcu_read_unlock(); - if (page && act != XDP_PASS && act != XDP_TX) { + if (page && drop) { __free_page(page); xdp->data_hard_start = NULL; } @@ -137,7 +165,6 @@ int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog, int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog) { struct netdev_bpf xdp; - bpf_op_t ndo_bpf; int ret; ASSERT_RTNL(); @@ -145,8 +172,7 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog) if (!vf_netdev) return 0; - ndo_bpf = vf_netdev->netdev_ops->ndo_bpf; - if (!ndo_bpf) + if (!vf_netdev->netdev_ops->ndo_bpf) return 0; memset(&xdp, 0, sizeof(xdp)); @@ -157,7 +183,7 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog) xdp.command = XDP_SETUP_PROG; xdp.prog = prog; - ret = ndo_bpf(vf_netdev, &xdp); + ret = vf_netdev->netdev_ops->ndo_bpf(vf_netdev, &xdp); if (ret && prog) bpf_prog_put(prog); @@ -199,3 +225,68 @@ int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf) return -EINVAL; } } + +static int netvsc_ndoxdp_xmit_fm(struct net_device *ndev, + struct xdp_frame *frame, u16 q_idx) +{ + struct sk_buff *skb; + + skb = xdp_build_skb_from_frame(frame, ndev); + if (unlikely(!skb)) + return -ENOMEM; + + netvsc_get_hash(skb, netdev_priv(ndev)); + + skb_record_rx_queue(skb, q_idx); + + netvsc_xdp_xmit(skb, ndev); + + return 0; +} + +int netvsc_ndoxdp_xmit(struct net_device *ndev, int n, + struct xdp_frame **frames, u32 flags) +{ + struct net_device_context *ndev_ctx = netdev_priv(ndev); + const struct net_device_ops *vf_ops; + struct netvsc_stats_tx *tx_stats; + struct netvsc_device *nvsc_dev; + struct net_device *vf_netdev; + int i, count = 0; + u16 q_idx; + + /* Don't transmit if netvsc_device is gone */ + nvsc_dev = rcu_dereference_bh(ndev_ctx->nvdev); + if (unlikely(!nvsc_dev || nvsc_dev->destroy)) + return 0; + + /* If VF is present and up then redirect packets to it. + * Skip the VF if it is marked down or has no carrier. + * If netpoll is in uses, then VF can not be used either. + */ + vf_netdev = rcu_dereference_bh(ndev_ctx->vf_netdev); + if (vf_netdev && netif_running(vf_netdev) && + netif_carrier_ok(vf_netdev) && !netpoll_tx_running(ndev) && + vf_netdev->netdev_ops->ndo_xdp_xmit && + ndev_ctx->data_path_is_vf) { + vf_ops = vf_netdev->netdev_ops; + return vf_ops->ndo_xdp_xmit(vf_netdev, n, frames, flags); + } + + q_idx = smp_processor_id() % ndev->real_num_tx_queues; + + for (i = 0; i < n; i++) { + if (netvsc_ndoxdp_xmit_fm(ndev, frames[i], q_idx)) + break; + + count++; + } + + tx_stats = &nvsc_dev->chan_table[q_idx].tx_stats; + + u64_stats_update_begin(&tx_stats->syncp); + tx_stats->xdp_xmit += count; + u64_stats_update_end(&tx_stats->syncp); + + return count; +} diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index fde1c492ca02..27f6bbca6619 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -242,56 +242,6 @@ static inline void *init_ppi_data(struct rndis_message *msg, return ppi + 1; } -/* Azure hosts don't support non-TCP port numbers in hashing for fragmented - * packets. We can use ethtool to change UDP hash level when necessary. - */ -static inline u32 netvsc_get_hash( - struct sk_buff *skb, - const struct net_device_context *ndc) -{ - struct flow_keys flow; - u32 hash, pkt_proto = 0; - static u32 hashrnd __read_mostly; - - net_get_random_once(&hashrnd, sizeof(hashrnd)); - - if (!skb_flow_dissect_flow_keys(skb, &flow, 0)) - return 0; - - switch (flow.basic.ip_proto) { - case IPPROTO_TCP: - if (flow.basic.n_proto == htons(ETH_P_IP)) - pkt_proto = HV_TCP4_L4HASH; - else if (flow.basic.n_proto == htons(ETH_P_IPV6)) - pkt_proto = HV_TCP6_L4HASH; - - break; - - case IPPROTO_UDP: - if (flow.basic.n_proto == htons(ETH_P_IP)) - pkt_proto = HV_UDP4_L4HASH; - else if (flow.basic.n_proto == htons(ETH_P_IPV6)) - pkt_proto = HV_UDP6_L4HASH; - - break; - } - - if (pkt_proto & ndc->l4_hash) { - return skb_get_hash(skb); - } else { - if (flow.basic.n_proto == htons(ETH_P_IP)) - hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd); - else if (flow.basic.n_proto == htons(ETH_P_IPV6)) - hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd); - else - return 0; - - __skb_set_sw_hash(skb, hash, false); - } - - return hash; -} - static inline int netvsc_get_tx_queue(struct net_device *ndev, struct sk_buff *skb, int old_idx) { @@ -804,7 +754,7 @@ void netvsc_linkstatus_callback(struct net_device *net, } /* This function should only be called after skb_record_rx_queue() */ -static void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev) +void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev) { int rc; @@ -925,7 +875,7 @@ int netvsc_recv_callback(struct net_device *net, struct vmbus_channel *channel = nvchan->channel; u16 q_idx = channel->offermsg.offer.sub_channel_index; struct sk_buff *skb; - struct netvsc_stats *rx_stats = &nvchan->rx_stats; + struct netvsc_stats_rx *rx_stats = &nvchan->rx_stats; struct xdp_buff xdp; u32 act; @@ -934,6 +884,9 @@ int netvsc_recv_callback(struct net_device *net, act = netvsc_run_xdp(net, nvchan, &xdp); + if (act == XDP_REDIRECT) + return NVSP_STAT_SUCCESS; + if (act != XDP_PASS && act != XDP_TX) { u64_stats_update_begin(&rx_stats->syncp); rx_stats->xdp_drop++; @@ -958,6 +911,9 @@ int netvsc_recv_callback(struct net_device *net, * statistics will not work correctly. */ u64_stats_update_begin(&rx_stats->syncp); + if (act == XDP_TX) + rx_stats->xdp_tx++; + rx_stats->packets++; rx_stats->bytes += nvchan->rsc.pktlen; @@ -1353,28 +1309,29 @@ static void netvsc_get_pcpu_stats(struct net_device *net, /* fetch percpu stats of netvsc */ for (i = 0; i < nvdev->num_chn; i++) { const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; - const struct netvsc_stats *stats; + const struct netvsc_stats_tx *tx_stats; + const struct netvsc_stats_rx *rx_stats; struct netvsc_ethtool_pcpu_stats *this_tot = &pcpu_tot[nvchan->channel->target_cpu]; u64 packets, bytes; unsigned int start; - stats = &nvchan->tx_stats; + tx_stats = &nvchan->tx_stats; do { - start = u64_stats_fetch_begin_irq(&stats->syncp); - packets = stats->packets; - bytes = stats->bytes; - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + start = u64_stats_fetch_begin_irq(&tx_stats->syncp); + packets = tx_stats->packets; + bytes = tx_stats->bytes; + } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); this_tot->tx_bytes += bytes; this_tot->tx_packets += packets; - stats = &nvchan->rx_stats; + rx_stats = &nvchan->rx_stats; do { - start = u64_stats_fetch_begin_irq(&stats->syncp); - packets = stats->packets; - bytes = stats->bytes; - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + start = u64_stats_fetch_begin_irq(&rx_stats->syncp); + packets = rx_stats->packets; + bytes = rx_stats->bytes; + } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); this_tot->rx_bytes += bytes; this_tot->rx_packets += packets; @@ -1406,27 +1363,28 @@ static void netvsc_get_stats64(struct net_device *net, for (i = 0; i < nvdev->num_chn; i++) { const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; - const struct netvsc_stats *stats; + const struct netvsc_stats_tx *tx_stats; + const struct netvsc_stats_rx *rx_stats; u64 packets, bytes, multicast; unsigned int start; - stats = &nvchan->tx_stats; + tx_stats = &nvchan->tx_stats; do { - start = u64_stats_fetch_begin_irq(&stats->syncp); - packets = stats->packets; - bytes = stats->bytes; - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + start = u64_stats_fetch_begin_irq(&tx_stats->syncp); + packets = tx_stats->packets; + bytes = tx_stats->bytes; + } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); t->tx_bytes += bytes; t->tx_packets += packets; - stats = &nvchan->rx_stats; + rx_stats = &nvchan->rx_stats; do { - start = u64_stats_fetch_begin_irq(&stats->syncp); - packets = stats->packets; - bytes = stats->bytes; - multicast = stats->multicast + stats->broadcast; - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + start = u64_stats_fetch_begin_irq(&rx_stats->syncp); + packets = rx_stats->packets; + bytes = rx_stats->bytes; + multicast = rx_stats->multicast + rx_stats->broadcast; + } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); t->rx_bytes += bytes; t->rx_packets += packets; @@ -1515,8 +1473,8 @@ static const struct { /* statistics per queue (rx/tx packets/bytes) */ #define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats)) -/* 5 statistics per queue (rx/tx packets/bytes, rx xdp_drop) */ -#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 5) +/* 8 statistics per queue (rx/tx packets/bytes, XDP actions) */ +#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 8) static int netvsc_get_sset_count(struct net_device *dev, int string_set) { @@ -1543,12 +1501,16 @@ static void netvsc_get_ethtool_stats(struct net_device *dev, struct net_device_context *ndc = netdev_priv(dev); struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); const void *nds = &ndc->eth_stats; - const struct netvsc_stats *qstats; + const struct netvsc_stats_tx *tx_stats; + const struct netvsc_stats_rx *rx_stats; struct netvsc_vf_pcpu_stats sum; struct netvsc_ethtool_pcpu_stats *pcpu_sum; unsigned int start; u64 packets, bytes; u64 xdp_drop; + u64 xdp_redirect; + u64 xdp_tx; + u64 xdp_xmit; int i, j, cpu; if (!nvdev) @@ -1562,26 +1524,32 @@ static void netvsc_get_ethtool_stats(struct net_device *dev, data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset); for (j = 0; j < nvdev->num_chn; j++) { - qstats = &nvdev->chan_table[j].tx_stats; + tx_stats = &nvdev->chan_table[j].tx_stats; do { - start = u64_stats_fetch_begin_irq(&qstats->syncp); - packets = qstats->packets; - bytes = qstats->bytes; - } while (u64_stats_fetch_retry_irq(&qstats->syncp, start)); + start = u64_stats_fetch_begin_irq(&tx_stats->syncp); + packets = tx_stats->packets; + bytes = tx_stats->bytes; + xdp_xmit = tx_stats->xdp_xmit; + } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); data[i++] = packets; data[i++] = bytes; + data[i++] = xdp_xmit; - qstats = &nvdev->chan_table[j].rx_stats; + rx_stats = &nvdev->chan_table[j].rx_stats; do { - start = u64_stats_fetch_begin_irq(&qstats->syncp); - packets = qstats->packets; - bytes = qstats->bytes; - xdp_drop = qstats->xdp_drop; - } while (u64_stats_fetch_retry_irq(&qstats->syncp, start)); + start = u64_stats_fetch_begin_irq(&rx_stats->syncp); + packets = rx_stats->packets; + bytes = rx_stats->bytes; + xdp_drop = rx_stats->xdp_drop; + xdp_redirect = rx_stats->xdp_redirect; + xdp_tx = rx_stats->xdp_tx; + } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); data[i++] = packets; data[i++] = bytes; data[i++] = xdp_drop; + data[i++] = xdp_redirect; + data[i++] = xdp_tx; } pcpu_sum = kvmalloc_array(num_possible_cpus(), @@ -1622,9 +1590,12 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) for (i = 0; i < nvdev->num_chn; i++) { ethtool_sprintf(&p, "tx_queue_%u_packets", i); ethtool_sprintf(&p, "tx_queue_%u_bytes", i); + ethtool_sprintf(&p, "tx_queue_%u_xdp_xmit", i); ethtool_sprintf(&p, "rx_queue_%u_packets", i); ethtool_sprintf(&p, "rx_queue_%u_bytes", i); ethtool_sprintf(&p, "rx_queue_%u_xdp_drop", i); + ethtool_sprintf(&p, "rx_queue_%u_xdp_redirect", i); + ethtool_sprintf(&p, "rx_queue_%u_xdp_tx", i); } for_each_present_cpu(cpu) { @@ -2057,6 +2028,7 @@ static const struct net_device_ops device_ops = { .ndo_select_queue = netvsc_select_queue, .ndo_get_stats64 = netvsc_get_stats64, .ndo_bpf = netvsc_bpf, + .ndo_xdp_xmit = netvsc_ndoxdp_xmit, }; /* diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig index 0f7c6dc2ed15..95da876c5613 100644 --- a/drivers/net/ieee802154/Kconfig +++ b/drivers/net/ieee802154/Kconfig @@ -33,13 +33,6 @@ config IEEE802154_AT86RF230 This driver can also be built as a module. To do so, say M here. the module will be called 'at86rf230'. -config IEEE802154_AT86RF230_DEBUGFS - depends on IEEE802154_AT86RF230 - bool "AT86RF230 debugfs interface" - depends on DEBUG_FS - help - This option compiles debugfs code for the at86rf230 driver. - config IEEE802154_MRF24J40 tristate "Microchip MRF24J40 transceiver driver" depends on IEEE802154_DRIVERS && MAC802154 diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 549d04b5f3d4..15f283b26721 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -23,7 +23,6 @@ #include <linux/skbuff.h> #include <linux/of_gpio.h> #include <linux/ieee802154.h> -#include <linux/debugfs.h> #include <net/mac802154.h> #include <net/cfg802154.h> @@ -72,19 +71,11 @@ struct at86rf230_state_change { void (*complete)(void *context); u8 from_state; u8 to_state; + int trac; bool free; }; -struct at86rf230_trac { - u64 success; - u64 success_data_pending; - u64 success_wait_for_ack; - u64 channel_access_failure; - u64 no_ack; - u64 invalid; -}; - struct at86rf230_local { struct spi_device *spi; @@ -104,8 +95,6 @@ struct at86rf230_local { u8 tx_retry; struct sk_buff *tx_skb; struct at86rf230_state_change tx; - - struct at86rf230_trac trac; }; #define AT86RF2XX_NUMREGS 0x3F @@ -346,8 +335,7 @@ at86rf230_async_error_recover_complete(void *context) if (lp->was_tx) { lp->was_tx = 0; - dev_kfree_skb_any(lp->tx_skb); - ieee802154_wake_queue(lp->hw); + ieee802154_xmit_hw_error(lp->hw, lp->tx_skb); } } @@ -653,7 +641,11 @@ at86rf230_tx_complete(void *context) struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; - ieee802154_xmit_complete(lp->hw, lp->tx_skb, false); + if (ctx->trac == IEEE802154_SUCCESS) + ieee802154_xmit_complete(lp->hw, lp->tx_skb, false); + else + ieee802154_xmit_error(lp->hw, lp->tx_skb, ctx->trac); + kfree(ctx); } @@ -672,30 +664,21 @@ at86rf230_tx_trac_check(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; + u8 trac = TRAC_MASK(ctx->buf[1]); - if (IS_ENABLED(CONFIG_IEEE802154_AT86RF230_DEBUGFS)) { - u8 trac = TRAC_MASK(ctx->buf[1]); - - switch (trac) { - case TRAC_SUCCESS: - lp->trac.success++; - break; - case TRAC_SUCCESS_DATA_PENDING: - lp->trac.success_data_pending++; - break; - case TRAC_CHANNEL_ACCESS_FAILURE: - lp->trac.channel_access_failure++; - break; - case TRAC_NO_ACK: - lp->trac.no_ack++; - break; - case TRAC_INVALID: - lp->trac.invalid++; - break; - default: - WARN_ONCE(1, "received tx trac status %d\n", trac); - break; - } + switch (trac) { + case TRAC_SUCCESS: + case TRAC_SUCCESS_DATA_PENDING: + ctx->trac = IEEE802154_SUCCESS; + break; + case TRAC_CHANNEL_ACCESS_FAILURE: + ctx->trac = IEEE802154_CHANNEL_ACCESS_FAILURE; + break; + case TRAC_NO_ACK: + ctx->trac = IEEE802154_NO_ACK; + break; + default: + ctx->trac = IEEE802154_SYSTEM_ERROR; } at86rf230_async_state_change(lp, ctx, STATE_TX_ON, at86rf230_tx_on); @@ -737,25 +720,6 @@ at86rf230_rx_trac_check(void *context) u8 *buf = ctx->buf; int rc; - if (IS_ENABLED(CONFIG_IEEE802154_AT86RF230_DEBUGFS)) { - u8 trac = TRAC_MASK(buf[1]); - - switch (trac) { - case TRAC_SUCCESS: - lp->trac.success++; - break; - case TRAC_SUCCESS_WAIT_FOR_ACK: - lp->trac.success_wait_for_ack++; - break; - case TRAC_INVALID: - lp->trac.invalid++; - break; - default: - WARN_ONCE(1, "received rx trac status %d\n", trac); - break; - } - } - buf[0] = CMD_FB; ctx->trx.len = AT86RF2XX_MAX_BUF; ctx->msg.complete = at86rf230_rx_read_frame_complete; @@ -951,10 +915,6 @@ at86rf230_start(struct ieee802154_hw *hw) { struct at86rf230_local *lp = hw->priv; - /* reset trac stats on start */ - if (IS_ENABLED(CONFIG_IEEE802154_AT86RF230_DEBUGFS)) - memset(&lp->trac, 0, sizeof(struct at86rf230_trac)); - at86rf230_awake(lp); enable_irq(lp->spi->irq); @@ -1064,36 +1024,6 @@ at86rf212_set_channel(struct at86rf230_local *lp, u8 page, u8 channel) if (rc < 0) return rc; - /* This sets the symbol_duration according frequency on the 212. - * TODO move this handling while set channel and page in cfg802154. - * We can do that, this timings are according 802.15.4 standard. - * If we do that in cfg802154, this is a more generic calculation. - * - * This should also protected from ifs_timer. Means cancel timer and - * init with a new value. For now, this is okay. - */ - if (channel == 0) { - if (page == 0) { - /* SUB:0 and BPSK:0 -> BPSK-20 */ - lp->hw->phy->symbol_duration = 50; - } else { - /* SUB:1 and BPSK:0 -> BPSK-40 */ - lp->hw->phy->symbol_duration = 25; - } - } else { - if (page == 0) - /* SUB:0 and BPSK:1 -> OQPSK-100/200/400 */ - lp->hw->phy->symbol_duration = 40; - else - /* SUB:1 and BPSK:1 -> OQPSK-250/500/1000 */ - lp->hw->phy->symbol_duration = 16; - } - - lp->hw->phy->lifs_period = IEEE802154_LIFS_PERIOD * - lp->hw->phy->symbol_duration; - lp->hw->phy->sifs_period = IEEE802154_SIFS_PERIOD * - lp->hw->phy->symbol_duration; - return at86rf230_write_subreg(lp, SR_CHANNEL, channel); } @@ -1569,7 +1499,6 @@ at86rf230_detect_device(struct at86rf230_local *lp) lp->data = &at86rf231_data; lp->hw->phy->supported.channels[0] = 0x7FFF800; lp->hw->phy->current_channel = 11; - lp->hw->phy->symbol_duration = 16; lp->hw->phy->supported.tx_powers = at86rf231_powers; lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf231_powers); lp->hw->phy->supported.cca_ed_levels = at86rf231_ed_levels; @@ -1582,7 +1511,6 @@ at86rf230_detect_device(struct at86rf230_local *lp) lp->hw->phy->supported.channels[0] = 0x00007FF; lp->hw->phy->supported.channels[2] = 0x00007FF; lp->hw->phy->current_channel = 5; - lp->hw->phy->symbol_duration = 25; lp->hw->phy->supported.lbt = NL802154_SUPPORTED_BOOL_BOTH; lp->hw->phy->supported.tx_powers = at86rf212_powers; lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf212_powers); @@ -1594,7 +1522,6 @@ at86rf230_detect_device(struct at86rf230_local *lp) lp->data = &at86rf233_data; lp->hw->phy->supported.channels[0] = 0x7FFF800; lp->hw->phy->current_channel = 13; - lp->hw->phy->symbol_duration = 16; lp->hw->phy->supported.tx_powers = at86rf233_powers; lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf233_powers); lp->hw->phy->supported.cca_ed_levels = at86rf233_ed_levels; @@ -1615,47 +1542,6 @@ not_supp: return rc; } -#ifdef CONFIG_IEEE802154_AT86RF230_DEBUGFS -static struct dentry *at86rf230_debugfs_root; - -static int at86rf230_stats_show(struct seq_file *file, void *offset) -{ - struct at86rf230_local *lp = file->private; - - seq_printf(file, "SUCCESS:\t\t%8llu\n", lp->trac.success); - seq_printf(file, "SUCCESS_DATA_PENDING:\t%8llu\n", - lp->trac.success_data_pending); - seq_printf(file, "SUCCESS_WAIT_FOR_ACK:\t%8llu\n", - lp->trac.success_wait_for_ack); - seq_printf(file, "CHANNEL_ACCESS_FAILURE:\t%8llu\n", - lp->trac.channel_access_failure); - seq_printf(file, "NO_ACK:\t\t\t%8llu\n", lp->trac.no_ack); - seq_printf(file, "INVALID:\t\t%8llu\n", lp->trac.invalid); - return 0; -} -DEFINE_SHOW_ATTRIBUTE(at86rf230_stats); - -static void at86rf230_debugfs_init(struct at86rf230_local *lp) -{ - char debugfs_dir_name[DNAME_INLINE_LEN + 1] = "at86rf230-"; - - strncat(debugfs_dir_name, dev_name(&lp->spi->dev), DNAME_INLINE_LEN); - - at86rf230_debugfs_root = debugfs_create_dir(debugfs_dir_name, NULL); - - debugfs_create_file("trac_stats", 0444, at86rf230_debugfs_root, lp, - &at86rf230_stats_fops); -} - -static void at86rf230_debugfs_remove(void) -{ - debugfs_remove_recursive(at86rf230_debugfs_root); -} -#else -static void at86rf230_debugfs_init(struct at86rf230_local *lp) { } -static void at86rf230_debugfs_remove(void) { } -#endif - static int at86rf230_probe(struct spi_device *spi) { struct ieee802154_hw *hw; @@ -1752,16 +1638,12 @@ static int at86rf230_probe(struct spi_device *spi) /* going into sleep by default */ at86rf230_sleep(lp); - at86rf230_debugfs_init(lp); - rc = ieee802154_register_hw(lp->hw); if (rc) - goto free_debugfs; + goto free_dev; return rc; -free_debugfs: - at86rf230_debugfs_remove(); free_dev: ieee802154_free_hw(lp->hw); @@ -1776,7 +1658,6 @@ static void at86rf230_remove(struct spi_device *spi) at86rf230_write_subreg(lp, SR_IRQ_MASK, 0); ieee802154_unregister_hw(lp->hw); ieee802154_free_hw(lp->hw); - at86rf230_debugfs_remove(); dev_dbg(&spi->dev, "unregistered at86rf230\n"); } diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c index 07bafbf94680..2c338783893d 100644 --- a/drivers/net/ieee802154/atusb.c +++ b/drivers/net/ieee802154/atusb.c @@ -206,9 +206,7 @@ static void atusb_tx_done(struct atusb *atusb, u8 seq) * unlikely case now that seq == expect is then true, but can * happen and fail with a tx_skb = NULL; */ - ieee802154_wake_queue(atusb->hw); - if (atusb->tx_skb) - dev_kfree_skb_irq(atusb->tx_skb); + ieee802154_xmit_hw_error(atusb->hw, atusb->tx_skb); } } @@ -614,36 +612,6 @@ static int hulusb_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel) if (rc < 0) return rc; - /* This sets the symbol_duration according frequency on the 212. - * TODO move this handling while set channel and page in cfg802154. - * We can do that, this timings are according 802.15.4 standard. - * If we do that in cfg802154, this is a more generic calculation. - * - * This should also protected from ifs_timer. Means cancel timer and - * init with a new value. For now, this is okay. - */ - if (channel == 0) { - if (page == 0) { - /* SUB:0 and BPSK:0 -> BPSK-20 */ - lp->hw->phy->symbol_duration = 50; - } else { - /* SUB:1 and BPSK:0 -> BPSK-40 */ - lp->hw->phy->symbol_duration = 25; - } - } else { - if (page == 0) - /* SUB:0 and BPSK:1 -> OQPSK-100/200/400 */ - lp->hw->phy->symbol_duration = 40; - else - /* SUB:1 and BPSK:1 -> OQPSK-250/500/1000 */ - lp->hw->phy->symbol_duration = 16; - } - - lp->hw->phy->lifs_period = IEEE802154_LIFS_PERIOD * - lp->hw->phy->symbol_duration; - lp->hw->phy->sifs_period = IEEE802154_SIFS_PERIOD * - lp->hw->phy->symbol_duration; - return atusb_write_subreg(lp, SR_CHANNEL, channel); } @@ -869,7 +837,6 @@ static int atusb_get_and_conf_chip(struct atusb *atusb) chip = "AT86RF230"; atusb->hw->phy->supported.channels[0] = 0x7FFF800; atusb->hw->phy->current_channel = 11; /* reset default */ - atusb->hw->phy->symbol_duration = 16; atusb->hw->phy->supported.tx_powers = atusb_powers; atusb->hw->phy->supported.tx_powers_size = ARRAY_SIZE(atusb_powers); hw->phy->supported.cca_ed_levels = atusb_ed_levels; @@ -879,7 +846,6 @@ static int atusb_get_and_conf_chip(struct atusb *atusb) chip = "AT86RF231"; atusb->hw->phy->supported.channels[0] = 0x7FFF800; atusb->hw->phy->current_channel = 11; /* reset default */ - atusb->hw->phy->symbol_duration = 16; atusb->hw->phy->supported.tx_powers = atusb_powers; atusb->hw->phy->supported.tx_powers_size = ARRAY_SIZE(atusb_powers); hw->phy->supported.cca_ed_levels = atusb_ed_levels; @@ -891,7 +857,6 @@ static int atusb_get_and_conf_chip(struct atusb *atusb) atusb->hw->phy->supported.channels[0] = 0x00007FF; atusb->hw->phy->supported.channels[2] = 0x00007FF; atusb->hw->phy->current_channel = 5; - atusb->hw->phy->symbol_duration = 25; atusb->hw->phy->supported.lbt = NL802154_SUPPORTED_BOOL_BOTH; atusb->hw->phy->supported.tx_powers = at86rf212_powers; atusb->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf212_powers); diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index 187cbc634ce8..42c0b451088d 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -89,48 +89,6 @@ #define CA8210_TEST_INT_FILE_NAME "ca8210_test" #define CA8210_TEST_INT_FIFO_SIZE 256 -/* MAC status enumerations */ -#define MAC_SUCCESS (0x00) -#define MAC_ERROR (0x01) -#define MAC_CANCELLED (0x02) -#define MAC_READY_FOR_POLL (0x03) -#define MAC_COUNTER_ERROR (0xDB) -#define MAC_IMPROPER_KEY_TYPE (0xDC) -#define MAC_IMPROPER_SECURITY_LEVEL (0xDD) -#define MAC_UNSUPPORTED_LEGACY (0xDE) -#define MAC_UNSUPPORTED_SECURITY (0xDF) -#define MAC_BEACON_LOST (0xE0) -#define MAC_CHANNEL_ACCESS_FAILURE (0xE1) -#define MAC_DENIED (0xE2) -#define MAC_DISABLE_TRX_FAILURE (0xE3) -#define MAC_SECURITY_ERROR (0xE4) -#define MAC_FRAME_TOO_LONG (0xE5) -#define MAC_INVALID_GTS (0xE6) -#define MAC_INVALID_HANDLE (0xE7) -#define MAC_INVALID_PARAMETER (0xE8) -#define MAC_NO_ACK (0xE9) -#define MAC_NO_BEACON (0xEA) -#define MAC_NO_DATA (0xEB) -#define MAC_NO_SHORT_ADDRESS (0xEC) -#define MAC_OUT_OF_CAP (0xED) -#define MAC_PAN_ID_CONFLICT (0xEE) -#define MAC_REALIGNMENT (0xEF) -#define MAC_TRANSACTION_EXPIRED (0xF0) -#define MAC_TRANSACTION_OVERFLOW (0xF1) -#define MAC_TX_ACTIVE (0xF2) -#define MAC_UNAVAILABLE_KEY (0xF3) -#define MAC_UNSUPPORTED_ATTRIBUTE (0xF4) -#define MAC_INVALID_ADDRESS (0xF5) -#define MAC_ON_TIME_TOO_LONG (0xF6) -#define MAC_PAST_TIME (0xF7) -#define MAC_TRACKING_OFF (0xF8) -#define MAC_INVALID_INDEX (0xF9) -#define MAC_LIMIT_REACHED (0xFA) -#define MAC_READ_ONLY (0xFB) -#define MAC_SCAN_IN_PROGRESS (0xFC) -#define MAC_SUPERFRAME_OVERLAP (0xFD) -#define MAC_SYSTEM_ERROR (0xFF) - /* HWME attribute IDs */ #define HWME_EDTHRESHOLD (0x04) #define HWME_EDVALUE (0x06) @@ -551,58 +509,58 @@ static int link_to_linux_err(int link_status) return link_status; } switch (link_status) { - case MAC_SUCCESS: - case MAC_REALIGNMENT: + case IEEE802154_SUCCESS: + case IEEE802154_REALIGNMENT: return 0; - case MAC_IMPROPER_KEY_TYPE: + case IEEE802154_IMPROPER_KEY_TYPE: return -EKEYREJECTED; - case MAC_IMPROPER_SECURITY_LEVEL: - case MAC_UNSUPPORTED_LEGACY: - case MAC_DENIED: + case IEEE802154_IMPROPER_SECURITY_LEVEL: + case IEEE802154_UNSUPPORTED_LEGACY: + case IEEE802154_DENIED: return -EACCES; - case MAC_BEACON_LOST: - case MAC_NO_ACK: - case MAC_NO_BEACON: + case IEEE802154_BEACON_LOST: + case IEEE802154_NO_ACK: + case IEEE802154_NO_BEACON: return -ENETUNREACH; - case MAC_CHANNEL_ACCESS_FAILURE: - case MAC_TX_ACTIVE: - case MAC_SCAN_IN_PROGRESS: + case IEEE802154_CHANNEL_ACCESS_FAILURE: + case IEEE802154_TX_ACTIVE: + case IEEE802154_SCAN_IN_PROGRESS: return -EBUSY; - case MAC_DISABLE_TRX_FAILURE: - case MAC_OUT_OF_CAP: + case IEEE802154_DISABLE_TRX_FAILURE: + case IEEE802154_OUT_OF_CAP: return -EAGAIN; - case MAC_FRAME_TOO_LONG: + case IEEE802154_FRAME_TOO_LONG: return -EMSGSIZE; - case MAC_INVALID_GTS: - case MAC_PAST_TIME: + case IEEE802154_INVALID_GTS: + case IEEE802154_PAST_TIME: return -EBADSLT; - case MAC_INVALID_HANDLE: + case IEEE802154_INVALID_HANDLE: return -EBADMSG; - case MAC_INVALID_PARAMETER: - case MAC_UNSUPPORTED_ATTRIBUTE: - case MAC_ON_TIME_TOO_LONG: - case MAC_INVALID_INDEX: + case IEEE802154_INVALID_PARAMETER: + case IEEE802154_UNSUPPORTED_ATTRIBUTE: + case IEEE802154_ON_TIME_TOO_LONG: + case IEEE802154_INVALID_INDEX: return -EINVAL; - case MAC_NO_DATA: + case IEEE802154_NO_DATA: return -ENODATA; - case MAC_NO_SHORT_ADDRESS: + case IEEE802154_NO_SHORT_ADDRESS: return -EFAULT; - case MAC_PAN_ID_CONFLICT: + case IEEE802154_PAN_ID_CONFLICT: return -EADDRINUSE; - case MAC_TRANSACTION_EXPIRED: + case IEEE802154_TRANSACTION_EXPIRED: return -ETIME; - case MAC_TRANSACTION_OVERFLOW: + case IEEE802154_TRANSACTION_OVERFLOW: return -ENOBUFS; - case MAC_UNAVAILABLE_KEY: + case IEEE802154_UNAVAILABLE_KEY: return -ENOKEY; - case MAC_INVALID_ADDRESS: + case IEEE802154_INVALID_ADDRESS: return -ENXIO; - case MAC_TRACKING_OFF: - case MAC_SUPERFRAME_OVERLAP: + case IEEE802154_TRACKING_OFF: + case IEEE802154_SUPERFRAME_OVERLAP: return -EREMOTEIO; - case MAC_LIMIT_REACHED: + case IEEE802154_LIMIT_REACHED: return -EDQUOT; - case MAC_READ_ONLY: + case IEEE802154_READ_ONLY: return -EROFS; default: return -EPROTO; @@ -754,7 +712,7 @@ static void ca8210_rx_done(struct cas_control *cas_ctl) ca8210_net_rx(priv->hw, buf, len); if (buf[0] == SPI_MCPS_DATA_CONFIRM) { - if (buf[3] == MAC_TRANSACTION_OVERFLOW) { + if (buf[3] == IEEE802154_TRANSACTION_OVERFLOW) { dev_info( &priv->spi->dev, "Waiting for transaction overflow to stabilise...\n"); @@ -1128,7 +1086,7 @@ static u8 tdme_setsfr_request_sync( ); if (ret) { dev_crit(&spi->dev, "cascoda_api_downstream returned %d", ret); - return MAC_SYSTEM_ERROR; + return IEEE802154_SYSTEM_ERROR; } if (response.command_id != SPI_TDME_SETSFR_CONFIRM) { @@ -1137,7 +1095,7 @@ static u8 tdme_setsfr_request_sync( "sync response to SPI_TDME_SETSFR_REQUEST was not SPI_TDME_SETSFR_CONFIRM, it was %d\n", response.command_id ); - return MAC_SYSTEM_ERROR; + return IEEE802154_SYSTEM_ERROR; } return response.pdata.tdme_set_sfr_cnf.status; @@ -1151,7 +1109,7 @@ static u8 tdme_setsfr_request_sync( */ static u8 tdme_chipinit(void *device_ref) { - u8 status = MAC_SUCCESS; + u8 status = IEEE802154_SUCCESS; u8 sfr_address; struct spi_device *spi = device_ref; struct preamble_cfg_sfr pre_cfg_value = { @@ -1220,7 +1178,7 @@ static u8 tdme_chipinit(void *device_ref) goto finish; finish: - if (status != MAC_SUCCESS) { + if (status != IEEE802154_SUCCESS) { dev_err( &spi->dev, "failed to set sfr at %#03x, status = %#03x\n", @@ -1287,7 +1245,7 @@ static u8 tdme_checkpibattribute( const void *pib_attribute_value ) { - u8 status = MAC_SUCCESS; + u8 status = IEEE802154_SUCCESS; u8 value; value = *((u8 *)pib_attribute_value); @@ -1296,52 +1254,52 @@ static u8 tdme_checkpibattribute( /* PHY */ case PHY_TRANSMIT_POWER: if (value > 0x3F) - status = MAC_INVALID_PARAMETER; + status = IEEE802154_INVALID_PARAMETER; break; case PHY_CCA_MODE: if (value > 0x03) - status = MAC_INVALID_PARAMETER; + status = IEEE802154_INVALID_PARAMETER; break; /* MAC */ case MAC_BATT_LIFE_EXT_PERIODS: if (value < 6 || value > 41) - status = MAC_INVALID_PARAMETER; + status = IEEE802154_INVALID_PARAMETER; break; case MAC_BEACON_PAYLOAD: if (pib_attribute_length > MAX_BEACON_PAYLOAD_LENGTH) - status = MAC_INVALID_PARAMETER; + status = IEEE802154_INVALID_PARAMETER; break; case MAC_BEACON_PAYLOAD_LENGTH: if (value > MAX_BEACON_PAYLOAD_LENGTH) - status = MAC_INVALID_PARAMETER; + status = IEEE802154_INVALID_PARAMETER; break; case MAC_BEACON_ORDER: if (value > 15) - status = MAC_INVALID_PARAMETER; + status = IEEE802154_INVALID_PARAMETER; break; case MAC_MAX_BE: if (value < 3 || value > 8) - status = MAC_INVALID_PARAMETER; + status = IEEE802154_INVALID_PARAMETER; break; case MAC_MAX_CSMA_BACKOFFS: if (value > 5) - status = MAC_INVALID_PARAMETER; + status = IEEE802154_INVALID_PARAMETER; break; case MAC_MAX_FRAME_RETRIES: if (value > 7) - status = MAC_INVALID_PARAMETER; + status = IEEE802154_INVALID_PARAMETER; break; case MAC_MIN_BE: if (value > 8) - status = MAC_INVALID_PARAMETER; + status = IEEE802154_INVALID_PARAMETER; break; case MAC_RESPONSE_WAIT_TIME: if (value < 2 || value > 64) - status = MAC_INVALID_PARAMETER; + status = IEEE802154_INVALID_PARAMETER; break; case MAC_SUPERFRAME_ORDER: if (value > 15) - status = MAC_INVALID_PARAMETER; + status = IEEE802154_INVALID_PARAMETER; break; /* boolean */ case MAC_ASSOCIATED_PAN_COORD: @@ -1353,16 +1311,16 @@ static u8 tdme_checkpibattribute( case MAC_RX_ON_WHEN_IDLE: case MAC_SECURITY_ENABLED: if (value > 1) - status = MAC_INVALID_PARAMETER; + status = IEEE802154_INVALID_PARAMETER; break; /* MAC SEC */ case MAC_AUTO_REQUEST_SECURITY_LEVEL: if (value > 7) - status = MAC_INVALID_PARAMETER; + status = IEEE802154_INVALID_PARAMETER; break; case MAC_AUTO_REQUEST_KEY_ID_MODE: if (value > 3) - status = MAC_INVALID_PARAMETER; + status = IEEE802154_INVALID_PARAMETER; break; default: break; @@ -1522,9 +1480,9 @@ static u8 mcps_data_request( if (ca8210_spi_transfer(device_ref, &command.command_id, command.length + 2)) - return MAC_SYSTEM_ERROR; + return IEEE802154_SYSTEM_ERROR; - return MAC_SUCCESS; + return IEEE802154_SUCCESS; } /** @@ -1553,11 +1511,11 @@ static u8 mlme_reset_request_sync( &response.command_id, device_ref)) { dev_err(&spi->dev, "cascoda_api_downstream failed\n"); - return MAC_SYSTEM_ERROR; + return IEEE802154_SYSTEM_ERROR; } if (response.command_id != SPI_MLME_RESET_CONFIRM) - return MAC_SYSTEM_ERROR; + return IEEE802154_SYSTEM_ERROR; status = response.pdata.status; @@ -1600,7 +1558,7 @@ static u8 mlme_set_request_sync( */ if (tdme_checkpibattribute( pib_attribute, pib_attribute_length, pib_attribute_value)) { - return MAC_INVALID_PARAMETER; + return IEEE802154_INVALID_PARAMETER; } if (pib_attribute == PHY_CURRENT_CHANNEL) { @@ -1636,11 +1594,11 @@ static u8 mlme_set_request_sync( command.length + 2, &response.command_id, device_ref)) { - return MAC_SYSTEM_ERROR; + return IEEE802154_SYSTEM_ERROR; } if (response.command_id != SPI_MLME_SET_CONFIRM) - return MAC_SYSTEM_ERROR; + return IEEE802154_SYSTEM_ERROR; return response.pdata.status; } @@ -1678,11 +1636,11 @@ static u8 hwme_set_request_sync( command.length + 2, &response.command_id, device_ref)) { - return MAC_SYSTEM_ERROR; + return IEEE802154_SYSTEM_ERROR; } if (response.command_id != SPI_HWME_SET_CONFIRM) - return MAC_SYSTEM_ERROR; + return IEEE802154_SYSTEM_ERROR; return response.pdata.hwme_set_cnf.status; } @@ -1714,13 +1672,13 @@ static u8 hwme_get_request_sync( command.length + 2, &response.command_id, device_ref)) { - return MAC_SYSTEM_ERROR; + return IEEE802154_SYSTEM_ERROR; } if (response.command_id != SPI_HWME_GET_CONFIRM) - return MAC_SYSTEM_ERROR; + return IEEE802154_SYSTEM_ERROR; - if (response.pdata.hwme_get_cnf.status == MAC_SUCCESS) { + if (response.pdata.hwme_get_cnf.status == IEEE802154_SUCCESS) { *hw_attribute_length = response.pdata.hwme_get_cnf.hw_attribute_length; memcpy( @@ -1770,9 +1728,8 @@ static int ca8210_async_xmit_complete( "Link transmission unsuccessful, status = %d\n", status ); - if (status != MAC_TRANSACTION_OVERFLOW) { - dev_kfree_skb_any(priv->tx_skb); - ieee802154_wake_queue(priv->hw); + if (status != IEEE802154_TRANSACTION_OVERFLOW) { + ieee802154_xmit_error(priv->hw, priv->tx_skb, status); return 0; } } @@ -2436,7 +2393,7 @@ static int ca8210_test_check_upstream(u8 *buf, void *device_ref) if (ret) { response[0] = SPI_MLME_SET_CONFIRM; response[1] = 3; - response[2] = MAC_INVALID_PARAMETER; + response[2] = IEEE802154_INVALID_PARAMETER; response[3] = buf[2]; response[4] = buf[3]; if (cascoda_api_upstream) diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c index c927a5ae0d05..2fe0e4a0a0c4 100644 --- a/drivers/net/ieee802154/mcr20a.c +++ b/drivers/net/ieee802154/mcr20a.c @@ -975,10 +975,6 @@ static void mcr20a_hw_setup(struct mcr20a_local *lp) dev_dbg(printdev(lp), "%s\n", __func__); - phy->symbol_duration = 16; - phy->lifs_period = 40 * phy->symbol_duration; - phy->sifs_period = 12 * phy->symbol_duration; - hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT | IEEE802154_HW_PROMISCUOUS; @@ -1006,7 +1002,6 @@ static void mcr20a_hw_setup(struct mcr20a_local *lp) phy->current_page = 0; /* MCR20A default reset value */ phy->current_channel = 20; - phy->symbol_duration = 16; phy->supported.tx_powers = mcr20a_powers; phy->supported.tx_powers_size = ARRAY_SIZE(mcr20a_powers); phy->cca_ed_level = phy->supported.cca_ed_levels[75]; diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 888e94278a84..e133eb2bebcf 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -130,9 +130,10 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, */ if (data->endpoint.config.aggregation) { limit += SZ_1K * aggr_byte_limit_max(ipa->version); - if (buffer_size > limit) { + if (buffer_size - NET_SKB_PAD > limit) { dev_err(dev, "RX buffer size too large for aggregated RX endpoint %u (%u > %u)\n", - data->endpoint_id, buffer_size, limit); + data->endpoint_id, + buffer_size - NET_SKB_PAD, limit); return false; } @@ -739,6 +740,7 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) if (endpoint->data->aggregation) { if (!endpoint->toward_ipa) { const struct ipa_endpoint_rx_data *rx_data; + u32 buffer_size; bool close_eof; u32 limit; @@ -746,7 +748,8 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK); val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK); - limit = ipa_aggr_size_kb(rx_data->buffer_size); + buffer_size = rx_data->buffer_size; + limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD); val |= aggr_byte_limit_encoded(version, limit); limit = IPA_AGGR_TIME_LIMIT; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 069e8824c264..b00bc8173abe 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -460,8 +460,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) return RX_HANDLER_CONSUMED; *pskb = skb; eth = eth_hdr(skb); - if (macvlan_forward_source(skb, port, eth->h_source)) + if (macvlan_forward_source(skb, port, eth->h_source)) { + kfree_skb(skb); return RX_HANDLER_CONSUMED; + } src = macvlan_hash_lookup(port, eth->h_source); if (src && src->mode != MACVLAN_MODE_VEPA && src->mode != MACVLAN_MODE_BRIDGE) { @@ -480,8 +482,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) return RX_HANDLER_PASS; } - if (macvlan_forward_source(skb, port, eth->h_source)) + if (macvlan_forward_source(skb, port, eth->h_source)) { + kfree_skb(skb); return RX_HANDLER_CONSUMED; + } if (macvlan_passthru(port)) vlan = list_first_or_null_rcu(&port->vlans, struct macvlan_dev, list); diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c index baf7afac7857..53846c6b56ca 100644 --- a/drivers/net/mctp/mctp-i2c.c +++ b/drivers/net/mctp/mctp-i2c.c @@ -553,7 +553,7 @@ static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev, hdr->source_slave = ((llsrc << 1) & 0xff) | 0x01; mhdr->ver = 0x01; - return 0; + return sizeof(struct mctp_i2c_hdr); } static int mctp_i2c_tx_thread(void *data) diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c index 1becb1a731f6..1c1584fca632 100644 --- a/drivers/net/mdio/fwnode_mdio.c +++ b/drivers/net/mdio/fwnode_mdio.c @@ -43,6 +43,11 @@ int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio, int rc; rc = fwnode_irq_get(child, 0); + /* Don't wait forever if the IRQ provider doesn't become available, + * just fall back to poll mode + */ + if (rc == -EPROBE_DEFER) + rc = driver_deferred_probe_check_state(&phy->mdio.dev); if (rc == -EPROBE_DEFER) return rc; diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c index e2273588c75b..944d005d2bd1 100644 --- a/drivers/net/mdio/mdio-aspeed.c +++ b/drivers/net/mdio/mdio-aspeed.c @@ -3,6 +3,7 @@ #include <linux/bitfield.h> #include <linux/delay.h> +#include <linux/reset.h> #include <linux/iopoll.h> #include <linux/mdio.h> #include <linux/module.h> @@ -21,6 +22,10 @@ #define ASPEED_MDIO_CTRL_OP GENMASK(27, 26) #define MDIO_C22_OP_WRITE 0b01 #define MDIO_C22_OP_READ 0b10 +#define MDIO_C45_OP_ADDR 0b00 +#define MDIO_C45_OP_WRITE 0b01 +#define MDIO_C45_OP_PREAD 0b10 +#define MDIO_C45_OP_READ 0b11 #define ASPEED_MDIO_CTRL_PHYAD GENMASK(25, 21) #define ASPEED_MDIO_CTRL_REGAD GENMASK(20, 16) #define ASPEED_MDIO_CTRL_MIIWDATA GENMASK(15, 0) @@ -37,36 +42,38 @@ struct aspeed_mdio { void __iomem *base; + struct reset_control *reset; }; -static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum) +static int aspeed_mdio_op(struct mii_bus *bus, u8 st, u8 op, u8 phyad, u8 regad, + u16 data) { struct aspeed_mdio *ctx = bus->priv; u32 ctrl; - u32 data; - int rc; - dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d\n", __func__, addr, - regnum); - - /* Just clause 22 for the moment */ - if (regnum & MII_ADDR_C45) - return -EOPNOTSUPP; + dev_dbg(&bus->dev, "%s: st: %u op: %u, phyad: %u, regad: %u, data: %u\n", + __func__, st, op, phyad, regad, data); ctrl = ASPEED_MDIO_CTRL_FIRE - | FIELD_PREP(ASPEED_MDIO_CTRL_ST, ASPEED_MDIO_CTRL_ST_C22) - | FIELD_PREP(ASPEED_MDIO_CTRL_OP, MDIO_C22_OP_READ) - | FIELD_PREP(ASPEED_MDIO_CTRL_PHYAD, addr) - | FIELD_PREP(ASPEED_MDIO_CTRL_REGAD, regnum); + | FIELD_PREP(ASPEED_MDIO_CTRL_ST, st) + | FIELD_PREP(ASPEED_MDIO_CTRL_OP, op) + | FIELD_PREP(ASPEED_MDIO_CTRL_PHYAD, phyad) + | FIELD_PREP(ASPEED_MDIO_CTRL_REGAD, regad) + | FIELD_PREP(ASPEED_MDIO_DATA_MIIRDATA, data); iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL); - rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl, + return readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl, !(ctrl & ASPEED_MDIO_CTRL_FIRE), ASPEED_MDIO_INTERVAL_US, ASPEED_MDIO_TIMEOUT_US); - if (rc < 0) - return rc; +} + +static int aspeed_mdio_get_data(struct mii_bus *bus) +{ + struct aspeed_mdio *ctx = bus->priv; + u32 data; + int rc; rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_DATA, data, data & ASPEED_MDIO_DATA_IDLE, @@ -78,31 +85,80 @@ static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum) return FIELD_GET(ASPEED_MDIO_DATA_MIIRDATA, data); } -static int aspeed_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val) +static int aspeed_mdio_read_c22(struct mii_bus *bus, int addr, int regnum) { - struct aspeed_mdio *ctx = bus->priv; - u32 ctrl; + int rc; - dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d, val: 0x%x\n", - __func__, addr, regnum, val); + rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C22, MDIO_C22_OP_READ, + addr, regnum, 0); + if (rc < 0) + return rc; + + return aspeed_mdio_get_data(bus); +} + +static int aspeed_mdio_write_c22(struct mii_bus *bus, int addr, int regnum, + u16 val) +{ + return aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C22, MDIO_C22_OP_WRITE, + addr, regnum, val); +} + +static int aspeed_mdio_read_c45(struct mii_bus *bus, int addr, int regnum) +{ + u8 c45_dev = (regnum >> 16) & 0x1F; + u16 c45_addr = regnum & 0xFFFF; + int rc; + + rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_ADDR, + addr, c45_dev, c45_addr); + if (rc < 0) + return rc; + + rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_READ, + addr, c45_dev, 0); + if (rc < 0) + return rc; + + return aspeed_mdio_get_data(bus); +} + +static int aspeed_mdio_write_c45(struct mii_bus *bus, int addr, int regnum, + u16 val) +{ + u8 c45_dev = (regnum >> 16) & 0x1F; + u16 c45_addr = regnum & 0xFFFF; + int rc; + + rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_ADDR, + addr, c45_dev, c45_addr); + if (rc < 0) + return rc; + + return aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_WRITE, + addr, c45_dev, val); +} + +static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum) +{ + dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d\n", __func__, addr, + regnum); - /* Just clause 22 for the moment */ if (regnum & MII_ADDR_C45) - return -EOPNOTSUPP; + return aspeed_mdio_read_c45(bus, addr, regnum); - ctrl = ASPEED_MDIO_CTRL_FIRE - | FIELD_PREP(ASPEED_MDIO_CTRL_ST, ASPEED_MDIO_CTRL_ST_C22) - | FIELD_PREP(ASPEED_MDIO_CTRL_OP, MDIO_C22_OP_WRITE) - | FIELD_PREP(ASPEED_MDIO_CTRL_PHYAD, addr) - | FIELD_PREP(ASPEED_MDIO_CTRL_REGAD, regnum) - | FIELD_PREP(ASPEED_MDIO_CTRL_MIIWDATA, val); + return aspeed_mdio_read_c22(bus, addr, regnum); +} - iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL); +static int aspeed_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val) +{ + dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d, val: 0x%x\n", + __func__, addr, regnum, val); - return readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl, - !(ctrl & ASPEED_MDIO_CTRL_FIRE), - ASPEED_MDIO_INTERVAL_US, - ASPEED_MDIO_TIMEOUT_US); + if (regnum & MII_ADDR_C45) + return aspeed_mdio_write_c45(bus, addr, regnum, val); + + return aspeed_mdio_write_c22(bus, addr, regnum, val); } static int aspeed_mdio_probe(struct platform_device *pdev) @@ -120,15 +176,23 @@ static int aspeed_mdio_probe(struct platform_device *pdev) if (IS_ERR(ctx->base)) return PTR_ERR(ctx->base); + ctx->reset = devm_reset_control_get_optional_shared(&pdev->dev, NULL); + if (IS_ERR(ctx->reset)) + return PTR_ERR(ctx->reset); + + reset_control_deassert(ctx->reset); + bus->name = DRV_NAME; snprintf(bus->id, MII_BUS_ID_SIZE, "%s%d", pdev->name, pdev->id); bus->parent = &pdev->dev; bus->read = aspeed_mdio_read; bus->write = aspeed_mdio_write; + bus->probe_capabilities = MDIOBUS_C22_C45; rc = of_mdiobus_register(bus, pdev->dev.of_node); if (rc) { dev_err(&pdev->dev, "Cannot register MDIO bus!\n"); + reset_control_assert(ctx->reset); return rc; } @@ -139,7 +203,11 @@ static int aspeed_mdio_probe(struct platform_device *pdev) static int aspeed_mdio_remove(struct platform_device *pdev) { - mdiobus_unregister(platform_get_drvdata(pdev)); + struct mii_bus *bus = (struct mii_bus *)platform_get_drvdata(pdev); + struct aspeed_mdio *ctx = bus->priv; + + reset_control_assert(ctx->reset); + mdiobus_unregister(bus); return 0; } diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c index c483ba67c21f..08541007b18a 100644 --- a/drivers/net/mdio/mdio-mscc-miim.c +++ b/drivers/net/mdio/mdio-mscc-miim.c @@ -7,6 +7,7 @@ */ #include <linux/bitops.h> +#include <linux/clk.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/kernel.h> @@ -30,6 +31,8 @@ #define MSCC_MIIM_CMD_VLD BIT(31) #define MSCC_MIIM_REG_DATA 0xC #define MSCC_MIIM_DATA_ERROR (BIT(16) | BIT(17)) +#define MSCC_MIIM_REG_CFG 0x10 +#define MSCC_MIIM_CFG_PRESCALE_MASK GENMASK(7, 0) #define MSCC_PHY_REG_PHY_CFG 0x0 #define PHY_CFG_PHY_ENA (BIT(0) | BIT(1) | BIT(2) | BIT(3)) @@ -50,6 +53,8 @@ struct mscc_miim_dev { int mii_status_offset; struct regmap *phy_regs; const struct mscc_miim_info *info; + struct clk *clk; + u32 bus_freq; }; /* When high resolution timers aren't built-in: we can't use usleep_range() as @@ -102,6 +107,9 @@ static int mscc_miim_read(struct mii_bus *bus, int mii_id, int regnum) u32 val; int ret; + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + ret = mscc_miim_wait_pending(bus); if (ret) goto out; @@ -145,6 +153,9 @@ static int mscc_miim_write(struct mii_bus *bus, int mii_id, struct mscc_miim_dev *miim = bus->priv; int ret; + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + ret = mscc_miim_wait_pending(bus); if (ret < 0) goto out; @@ -235,9 +246,33 @@ int mscc_miim_setup(struct device *dev, struct mii_bus **pbus, const char *name, } EXPORT_SYMBOL(mscc_miim_setup); +static int mscc_miim_clk_set(struct mii_bus *bus) +{ + struct mscc_miim_dev *miim = bus->priv; + unsigned long rate; + u32 div; + + /* Keep the current settings */ + if (!miim->bus_freq) + return 0; + + rate = clk_get_rate(miim->clk); + + div = DIV_ROUND_UP(rate, 2 * miim->bus_freq) - 1; + if (div == 0 || div & ~MSCC_MIIM_CFG_PRESCALE_MASK) { + dev_err(&bus->dev, "Incorrect MDIO clock frequency\n"); + return -EINVAL; + } + + return regmap_update_bits(miim->regs, MSCC_MIIM_REG_CFG, + MSCC_MIIM_CFG_PRESCALE_MASK, div); +} + static int mscc_miim_probe(struct platform_device *pdev) { struct regmap *mii_regmap, *phy_regmap = NULL; + struct device_node *np = pdev->dev.of_node; + struct device *dev = &pdev->dev; void __iomem *regs, *phy_regs; struct mscc_miim_dev *miim; struct resource *res; @@ -246,63 +281,87 @@ static int mscc_miim_probe(struct platform_device *pdev) regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(regs)) { - dev_err(&pdev->dev, "Unable to map MIIM registers\n"); + dev_err(dev, "Unable to map MIIM registers\n"); return PTR_ERR(regs); } - mii_regmap = devm_regmap_init_mmio(&pdev->dev, regs, - &mscc_miim_regmap_config); + mii_regmap = devm_regmap_init_mmio(dev, regs, &mscc_miim_regmap_config); if (IS_ERR(mii_regmap)) { - dev_err(&pdev->dev, "Unable to create MIIM regmap\n"); + dev_err(dev, "Unable to create MIIM regmap\n"); return PTR_ERR(mii_regmap); } /* This resource is optional */ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (res) { - phy_regs = devm_ioremap_resource(&pdev->dev, res); + phy_regs = devm_ioremap_resource(dev, res); if (IS_ERR(phy_regs)) { - dev_err(&pdev->dev, "Unable to map internal phy registers\n"); + dev_err(dev, "Unable to map internal phy registers\n"); return PTR_ERR(phy_regs); } - phy_regmap = devm_regmap_init_mmio(&pdev->dev, phy_regs, + phy_regmap = devm_regmap_init_mmio(dev, phy_regs, &mscc_miim_phy_regmap_config); if (IS_ERR(phy_regmap)) { - dev_err(&pdev->dev, "Unable to create phy register regmap\n"); + dev_err(dev, "Unable to create phy register regmap\n"); return PTR_ERR(phy_regmap); } } - ret = mscc_miim_setup(&pdev->dev, &bus, "mscc_miim", mii_regmap, 0); + ret = mscc_miim_setup(dev, &bus, "mscc_miim", mii_regmap, 0); if (ret < 0) { - dev_err(&pdev->dev, "Unable to setup the MDIO bus\n"); + dev_err(dev, "Unable to setup the MDIO bus\n"); return ret; } miim = bus->priv; miim->phy_regs = phy_regmap; - miim->info = device_get_match_data(&pdev->dev); + miim->info = device_get_match_data(dev); if (!miim->info) return -EINVAL; - ret = of_mdiobus_register(bus, pdev->dev.of_node); - if (ret < 0) { - dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret); + miim->clk = devm_clk_get_optional(dev, NULL); + if (IS_ERR(miim->clk)) + return PTR_ERR(miim->clk); + + of_property_read_u32(np, "clock-frequency", &miim->bus_freq); + + if (miim->bus_freq && !miim->clk) { + dev_err(dev, "cannot use clock-frequency without a clock\n"); + return -EINVAL; + } + + ret = clk_prepare_enable(miim->clk); + if (ret) return ret; + + ret = mscc_miim_clk_set(bus); + if (ret) + goto out_disable_clk; + + ret = of_mdiobus_register(bus, np); + if (ret < 0) { + dev_err(dev, "Cannot register MDIO bus (%d)\n", ret); + goto out_disable_clk; } platform_set_drvdata(pdev, bus); return 0; + +out_disable_clk: + clk_disable_unprepare(miim->clk); + return ret; } static int mscc_miim_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); + struct mscc_miim_dev *miim = bus->priv; + clk_disable_unprepare(miim->clk); mdiobus_unregister(bus); return 0; diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c index 378ee779061c..c8f398f5bc5b 100644 --- a/drivers/net/netdevsim/fib.c +++ b/drivers/net/netdevsim/fib.c @@ -22,6 +22,7 @@ #include <linux/spinlock_types.h> #include <linux/types.h> #include <net/fib_notifier.h> +#include <net/inet_dscp.h> #include <net/ip_fib.h> #include <net/ip6_fib.h> #include <net/fib_rules.h> @@ -78,7 +79,7 @@ struct nsim_fib_rt { struct nsim_fib4_rt { struct nsim_fib_rt common; struct fib_info *fi; - u8 tos; + dscp_t dscp; u8 type; }; @@ -283,7 +284,7 @@ nsim_fib4_rt_create(struct nsim_fib_data *data, fib4_rt->fi = fen_info->fi; fib_info_hold(fib4_rt->fi); - fib4_rt->tos = fen_info->tos; + fib4_rt->dscp = fen_info->dscp; fib4_rt->type = fen_info->type; return fib4_rt; @@ -322,7 +323,7 @@ nsim_fib4_rt_offload_failed_flag_set(struct net *net, fri.tb_id = fen_info->tb_id; fri.dst = cpu_to_be32(*p_dst); fri.dst_len = fen_info->dst_len; - fri.tos = fen_info->tos; + fri.dscp = fen_info->dscp; fri.type = fen_info->type; fri.offload = false; fri.trap = false; @@ -342,7 +343,7 @@ static void nsim_fib4_rt_hw_flags_set(struct net *net, fri.tb_id = fib4_rt->common.key.tb_id; fri.dst = cpu_to_be32(*p_dst); fri.dst_len = dst_len; - fri.tos = fib4_rt->tos; + fri.dscp = fib4_rt->dscp; fri.type = fib4_rt->type; fri.offload = false; fri.trap = trap; diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c index 61418d4dc0cd..4cfd05c15aee 100644 --- a/drivers/net/pcs/pcs-xpcs.c +++ b/drivers/net/pcs/pcs-xpcs.c @@ -175,20 +175,18 @@ static bool __xpcs_linkmode_supported(const struct xpcs_compat *compat, int xpcs_read(struct dw_xpcs *xpcs, int dev, u32 reg) { - u32 reg_addr = mdiobus_c45_addr(dev, reg); struct mii_bus *bus = xpcs->mdiodev->bus; int addr = xpcs->mdiodev->addr; - return mdiobus_read(bus, addr, reg_addr); + return mdiobus_c45_read(bus, addr, dev, reg); } int xpcs_write(struct dw_xpcs *xpcs, int dev, u32 reg, u16 val) { - u32 reg_addr = mdiobus_c45_addr(dev, reg); struct mii_bus *bus = xpcs->mdiodev->bus; int addr = xpcs->mdiodev->addr; - return mdiobus_write(bus, addr, reg_addr, val); + return mdiobus_c45_write(bus, addr, dev, reg, val); } static int xpcs_read_vendor(struct dw_xpcs *xpcs, int dev, u32 reg) diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index ea7571a2b39b..bbbf6c07ea53 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -83,6 +83,13 @@ config ADIN_PHY - ADIN1300 - Robust,Industrial, Low Latency 10/100/1000 Gigabit Ethernet PHY +config ADIN1100_PHY + tristate "Analog Devices Industrial Ethernet T1L PHYs" + help + Adds support for the Analog Devices Industrial T1L Ethernet PHYs. + Currently supports the: + - ADIN1100 - Robust,Industrial, Low Power 10BASE-T1L Ethernet PHY + config AQUANTIA_PHY tristate "Aquantia PHYs" help diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index b2728d00fc9a..b82651b57043 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -31,6 +31,7 @@ sfp-obj-$(CONFIG_SFP) += sfp-bus.o obj-y += $(sfp-obj-y) $(sfp-obj-m) obj-$(CONFIG_ADIN_PHY) += adin.o +obj-$(CONFIG_ADIN1100_PHY) += adin1100.o obj-$(CONFIG_AMD_PHY) += amd.o aquantia-objs += aquantia_main.o ifdef CONFIG_HWMON diff --git a/drivers/net/phy/adin1100.c b/drivers/net/phy/adin1100.c new file mode 100644 index 000000000000..b6d139501199 --- /dev/null +++ b/drivers/net/phy/adin1100.c @@ -0,0 +1,292 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* + * Driver for Analog Devices Industrial Ethernet T1L PHYs + * + * Copyright 2020 Analog Devices Inc. + */ +#include <linux/kernel.h> +#include <linux/bitfield.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/mii.h> +#include <linux/phy.h> +#include <linux/property.h> + +#define PHY_ID_ADIN1100 0x0283bc81 + +#define ADIN_FORCED_MODE 0x8000 +#define ADIN_FORCED_MODE_EN BIT(0) + +#define ADIN_CRSM_SFT_RST 0x8810 +#define ADIN_CRSM_SFT_RST_EN BIT(0) + +#define ADIN_CRSM_SFT_PD_CNTRL 0x8812 +#define ADIN_CRSM_SFT_PD_CNTRL_EN BIT(0) + +#define ADIN_AN_PHY_INST_STATUS 0x8030 +#define ADIN_IS_CFG_SLV BIT(2) +#define ADIN_IS_CFG_MST BIT(3) + +#define ADIN_CRSM_STAT 0x8818 +#define ADIN_CRSM_SFT_PD_RDY BIT(1) +#define ADIN_CRSM_SYS_RDY BIT(0) + +#define ADIN_MSE_VAL 0x830B + +#define ADIN_SQI_MAX 7 + +struct adin_mse_sqi_range { + u16 start; + u16 end; +}; + +static const struct adin_mse_sqi_range adin_mse_sqi_map[] = { + { 0x0A74, 0xFFFF }, + { 0x084E, 0x0A74 }, + { 0x0698, 0x084E }, + { 0x053D, 0x0698 }, + { 0x0429, 0x053D }, + { 0x034E, 0x0429 }, + { 0x02A0, 0x034E }, + { 0x0000, 0x02A0 }, +}; + +/** + * struct adin_priv - ADIN PHY driver private data + * @tx_level_2v4_able: set if the PHY supports 2.4V TX levels (10BASE-T1L) + * @tx_level_2v4: set if the PHY requests 2.4V TX levels (10BASE-T1L) + * @tx_level_prop_present: set if the TX level is specified in DT + */ +struct adin_priv { + unsigned int tx_level_2v4_able:1; + unsigned int tx_level_2v4:1; + unsigned int tx_level_prop_present:1; +}; + +static int adin_read_status(struct phy_device *phydev) +{ + int ret; + + ret = genphy_c45_read_status(phydev); + if (ret) + return ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_AN, ADIN_AN_PHY_INST_STATUS); + if (ret < 0) + return ret; + + if (ret & ADIN_IS_CFG_SLV) + phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE; + + if (ret & ADIN_IS_CFG_MST) + phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER; + + return 0; +} + +static int adin_config_aneg(struct phy_device *phydev) +{ + struct adin_priv *priv = phydev->priv; + int ret; + + if (phydev->autoneg == AUTONEG_DISABLE) { + ret = genphy_c45_pma_setup_forced(phydev); + if (ret < 0) + return ret; + + if (priv->tx_level_prop_present && priv->tx_level_2v4) + ret = phy_set_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_B10L_PMA_CTRL, + MDIO_PMA_10T1L_CTRL_2V4_EN); + else + ret = phy_clear_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_B10L_PMA_CTRL, + MDIO_PMA_10T1L_CTRL_2V4_EN); + if (ret < 0) + return ret; + + /* Force PHY to use above configurations */ + return phy_set_bits_mmd(phydev, MDIO_MMD_AN, ADIN_FORCED_MODE, ADIN_FORCED_MODE_EN); + } + + ret = phy_clear_bits_mmd(phydev, MDIO_MMD_AN, ADIN_FORCED_MODE, ADIN_FORCED_MODE_EN); + if (ret < 0) + return ret; + + /* Request increased transmit level from LP. */ + if (priv->tx_level_prop_present && priv->tx_level_2v4) { + ret = phy_set_bits_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_H, + MDIO_AN_T1_ADV_H_10L_TX_HI | + MDIO_AN_T1_ADV_H_10L_TX_HI_REQ); + if (ret < 0) + return ret; + } + + /* Disable 2.4 Vpp transmit level. */ + if ((priv->tx_level_prop_present && !priv->tx_level_2v4) || !priv->tx_level_2v4_able) { + ret = phy_clear_bits_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_H, + MDIO_AN_T1_ADV_H_10L_TX_HI | + MDIO_AN_T1_ADV_H_10L_TX_HI_REQ); + if (ret < 0) + return ret; + } + + return genphy_c45_config_aneg(phydev); +} + +static int adin_set_powerdown_mode(struct phy_device *phydev, bool en) +{ + int ret; + int val; + + val = en ? ADIN_CRSM_SFT_PD_CNTRL_EN : 0; + ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, + ADIN_CRSM_SFT_PD_CNTRL, val); + if (ret < 0) + return ret; + + return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, ADIN_CRSM_STAT, ret, + (ret & ADIN_CRSM_SFT_PD_RDY) == val, + 1000, 30000, true); +} + +static int adin_suspend(struct phy_device *phydev) +{ + return adin_set_powerdown_mode(phydev, true); +} + +static int adin_resume(struct phy_device *phydev) +{ + return adin_set_powerdown_mode(phydev, false); +} + +static int adin_set_loopback(struct phy_device *phydev, bool enable) +{ + if (enable) + return phy_set_bits_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_10T1L_CTRL, + BMCR_LOOPBACK); + + /* PCS loopback (according to 10BASE-T1L spec) */ + return phy_clear_bits_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_10T1L_CTRL, + BMCR_LOOPBACK); +} + +static int adin_soft_reset(struct phy_device *phydev) +{ + int ret; + + ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, ADIN_CRSM_SFT_RST, ADIN_CRSM_SFT_RST_EN); + if (ret < 0) + return ret; + + return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, ADIN_CRSM_STAT, ret, + (ret & ADIN_CRSM_SYS_RDY), + 10000, 30000, true); +} + +static int adin_get_features(struct phy_device *phydev) +{ + struct adin_priv *priv = phydev->priv; + struct device *dev = &phydev->mdio.dev; + int ret; + u8 val; + + ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10T1L_STAT); + if (ret < 0) + return ret; + + /* This depends on the voltage level from the power source */ + priv->tx_level_2v4_able = !!(ret & MDIO_PMA_10T1L_STAT_2V4_ABLE); + + phydev_dbg(phydev, "PHY supports 2.4V TX level: %s\n", + priv->tx_level_2v4_able ? "yes" : "no"); + + priv->tx_level_prop_present = device_property_present(dev, "phy-10base-t1l-2.4vpp"); + if (priv->tx_level_prop_present) { + ret = device_property_read_u8(dev, "phy-10base-t1l-2.4vpp", &val); + if (ret < 0) + return ret; + + priv->tx_level_2v4 = val; + if (!priv->tx_level_2v4 && priv->tx_level_2v4_able) + phydev_info(phydev, + "PHY supports 2.4V TX level, but disabled via config\n"); + } + + linkmode_set_bit_array(phy_basic_ports_array, ARRAY_SIZE(phy_basic_ports_array), + phydev->supported); + + return genphy_c45_pma_read_abilities(phydev); +} + +static int adin_get_sqi(struct phy_device *phydev) +{ + u16 mse_val; + int sqi; + int ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_STAT1); + if (ret < 0) + return ret; + else if (!(ret & MDIO_STAT1_LSTATUS)) + return 0; + + ret = phy_read_mmd(phydev, MDIO_STAT1, ADIN_MSE_VAL); + if (ret < 0) + return ret; + + mse_val = 0xFFFF & ret; + for (sqi = 0; sqi < ARRAY_SIZE(adin_mse_sqi_map); sqi++) { + if (mse_val >= adin_mse_sqi_map[sqi].start && mse_val <= adin_mse_sqi_map[sqi].end) + return sqi; + } + + return -EINVAL; +} + +static int adin_get_sqi_max(struct phy_device *phydev) +{ + return ADIN_SQI_MAX; +} + +static int adin_probe(struct phy_device *phydev) +{ + struct device *dev = &phydev->mdio.dev; + struct adin_priv *priv; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + phydev->priv = priv; + + return 0; +} + +static struct phy_driver adin_driver[] = { + { + PHY_ID_MATCH_MODEL(PHY_ID_ADIN1100), + .name = "ADIN1100", + .get_features = adin_get_features, + .soft_reset = adin_soft_reset, + .probe = adin_probe, + .config_aneg = adin_config_aneg, + .read_status = adin_read_status, + .set_loopback = adin_set_loopback, + .suspend = adin_suspend, + .resume = adin_resume, + .get_sqi = adin_get_sqi, + .get_sqi_max = adin_get_sqi_max, + }, +}; + +module_phy_driver(adin_driver); + +static struct mdio_device_id __maybe_unused adin_tbl[] = { + { PHY_ID_MATCH_MODEL(PHY_ID_ADIN1100) }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, adin_tbl); +MODULE_DESCRIPTION("Analog Devices Industrial Ethernet T1L PHY driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c index 313563482690..cc2858107668 100644 --- a/drivers/net/phy/bcm87xx.c +++ b/drivers/net/phy/bcm87xx.c @@ -10,12 +10,12 @@ #define PHY_ID_BCM8706 0x0143bdc1 #define PHY_ID_BCM8727 0x0143bff0 -#define BCM87XX_PMD_RX_SIGNAL_DETECT (MII_ADDR_C45 | 0x1000a) -#define BCM87XX_10GBASER_PCS_STATUS (MII_ADDR_C45 | 0x30020) -#define BCM87XX_XGXS_LANE_STATUS (MII_ADDR_C45 | 0x40018) +#define BCM87XX_PMD_RX_SIGNAL_DETECT 0x000a +#define BCM87XX_10GBASER_PCS_STATUS 0x0020 +#define BCM87XX_XGXS_LANE_STATUS 0x0018 -#define BCM87XX_LASI_CONTROL (MII_ADDR_C45 | 0x39002) -#define BCM87XX_LASI_STATUS (MII_ADDR_C45 | 0x39005) +#define BCM87XX_LASI_CONTROL 0x9002 +#define BCM87XX_LASI_STATUS 0x9005 #if IS_ENABLED(CONFIG_OF_MDIO) /* Set and/or override some configuration registers based on the @@ -54,11 +54,10 @@ static int bcm87xx_of_reg_init(struct phy_device *phydev) u16 reg = be32_to_cpup(paddr++); u16 mask = be32_to_cpup(paddr++); u16 val_bits = be32_to_cpup(paddr++); - u32 regnum = mdiobus_c45_addr(devid, reg); int val = 0; if (mask) { - val = phy_read(phydev, regnum); + val = phy_read_mmd(phydev, devid, reg); if (val < 0) { ret = val; goto err; @@ -67,7 +66,7 @@ static int bcm87xx_of_reg_init(struct phy_device *phydev) } val |= val_bits; - ret = phy_write(phydev, regnum, val); + ret = phy_write_mmd(phydev, devid, reg, val); if (ret < 0) goto err; } @@ -104,21 +103,24 @@ static int bcm87xx_read_status(struct phy_device *phydev) int pcs_status; int xgxs_lane_status; - rx_signal_detect = phy_read(phydev, BCM87XX_PMD_RX_SIGNAL_DETECT); + rx_signal_detect = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, + BCM87XX_PMD_RX_SIGNAL_DETECT); if (rx_signal_detect < 0) return rx_signal_detect; if ((rx_signal_detect & 1) == 0) goto no_link; - pcs_status = phy_read(phydev, BCM87XX_10GBASER_PCS_STATUS); + pcs_status = phy_read_mmd(phydev, MDIO_MMD_PCS, + BCM87XX_10GBASER_PCS_STATUS); if (pcs_status < 0) return pcs_status; if ((pcs_status & 1) == 0) goto no_link; - xgxs_lane_status = phy_read(phydev, BCM87XX_XGXS_LANE_STATUS); + xgxs_lane_status = phy_read_mmd(phydev, MDIO_MMD_PHYXS, + BCM87XX_XGXS_LANE_STATUS); if (xgxs_lane_status < 0) return xgxs_lane_status; @@ -139,25 +141,27 @@ static int bcm87xx_config_intr(struct phy_device *phydev) { int reg, err; - reg = phy_read(phydev, BCM87XX_LASI_CONTROL); + reg = phy_read_mmd(phydev, MDIO_MMD_PCS, BCM87XX_LASI_CONTROL); if (reg < 0) return reg; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { - err = phy_read(phydev, BCM87XX_LASI_STATUS); + err = phy_read_mmd(phydev, MDIO_MMD_PCS, BCM87XX_LASI_STATUS); if (err) return err; reg |= 1; - err = phy_write(phydev, BCM87XX_LASI_CONTROL, reg); + err = phy_write_mmd(phydev, MDIO_MMD_PCS, + BCM87XX_LASI_CONTROL, reg); } else { reg &= ~1; - err = phy_write(phydev, BCM87XX_LASI_CONTROL, reg); + err = phy_write_mmd(phydev, MDIO_MMD_PCS, + BCM87XX_LASI_CONTROL, reg); if (err) return err; - err = phy_read(phydev, BCM87XX_LASI_STATUS); + err = phy_read_mmd(phydev, MDIO_MMD_PCS, BCM87XX_LASI_STATUS); } return err; diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 2702faf7b0f6..47e83c1e9051 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -961,7 +961,21 @@ static int m88e1111_config_init(struct phy_device *phydev) if (err < 0) return err; - return genphy_soft_reset(phydev); + err = genphy_soft_reset(phydev); + if (err < 0) + return err; + + if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { + /* If the HWCFG_MODE was changed from another mode (such as + * 1000BaseX) to SGMII, the state of the support bits may have + * also changed now that the PHY has been reset. + * Update the PHY abilities accordingly. + */ + err = genphy_read_abilities(phydev); + linkmode_or(phydev->advertising, phydev->advertising, + phydev->supported); + } + return err; } static int m88e1111_get_downshift(struct phy_device *phydev, u8 *data) diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index b6fea119fe13..2b7d0720720b 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -880,7 +880,7 @@ static int mv3310_read_status_copper(struct phy_device *phydev) cssr1 = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_CSSR1); if (cssr1 < 0) - return val; + return cssr1; /* If the link settings are not resolved, mark the link down */ if (!(cssr1 & MV_PCS_CSSR1_RESOLVED)) { diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 19b11e896460..685a0ab5453c 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -32,6 +32,7 @@ #include <linux/ptp_clock.h> #include <linux/ptp_classify.h> #include <linux/net_tstamp.h> +#include <linux/gpio/consumer.h> /* Operation Mode Strap Override */ #define MII_KSZPHY_OMSO 0x16 @@ -70,6 +71,27 @@ #define KSZ8081_LMD_SHORT_INDICATOR BIT(12) #define KSZ8081_LMD_DELTA_TIME_MASK GENMASK(8, 0) +#define KSZ9x31_LMD 0x12 +#define KSZ9x31_LMD_VCT_EN BIT(15) +#define KSZ9x31_LMD_VCT_DIS_TX BIT(14) +#define KSZ9x31_LMD_VCT_PAIR(n) (((n) & 0x3) << 12) +#define KSZ9x31_LMD_VCT_SEL_RESULT 0 +#define KSZ9x31_LMD_VCT_SEL_THRES_HI BIT(10) +#define KSZ9x31_LMD_VCT_SEL_THRES_LO BIT(11) +#define KSZ9x31_LMD_VCT_SEL_MASK GENMASK(11, 10) +#define KSZ9x31_LMD_VCT_ST_NORMAL 0 +#define KSZ9x31_LMD_VCT_ST_OPEN 1 +#define KSZ9x31_LMD_VCT_ST_SHORT 2 +#define KSZ9x31_LMD_VCT_ST_FAIL 3 +#define KSZ9x31_LMD_VCT_ST_MASK GENMASK(9, 8) +#define KSZ9x31_LMD_VCT_DATA_REFLECTED_INVALID BIT(7) +#define KSZ9x31_LMD_VCT_DATA_SIG_WAIT_TOO_LONG BIT(6) +#define KSZ9x31_LMD_VCT_DATA_MASK100 BIT(5) +#define KSZ9x31_LMD_VCT_DATA_NLP_FLP BIT(4) +#define KSZ9x31_LMD_VCT_DATA_LO_PULSE_MASK GENMASK(3, 2) +#define KSZ9x31_LMD_VCT_DATA_HI_PULSE_MASK GENMASK(1, 0) +#define KSZ9x31_LMD_VCT_DATA_MASK GENMASK(7, 0) + /* Lan8814 general Interrupt control/status reg in GPHY specific block. */ #define LAN8814_INTC 0x18 #define LAN8814_INTS 0x1B @@ -99,15 +121,6 @@ #define PTP_TIMESTAMP_EN_PDREQ_ BIT(2) #define PTP_TIMESTAMP_EN_PDRES_ BIT(3) -#define PTP_RX_LATENCY_1000 0x0224 -#define PTP_TX_LATENCY_1000 0x0225 - -#define PTP_RX_LATENCY_100 0x0222 -#define PTP_TX_LATENCY_100 0x0223 - -#define PTP_RX_LATENCY_10 0x0220 -#define PTP_TX_LATENCY_10 0x0221 - #define PTP_TX_PARSE_L2_ADDR_EN 0x0284 #define PTP_RX_PARSE_L2_ADDR_EN 0x0244 @@ -268,15 +281,6 @@ struct lan8814_ptp_rx_ts { u16 seq_id; }; -struct kszphy_latencies { - u16 rx_10; - u16 tx_10; - u16 rx_100; - u16 tx_100; - u16 rx_1000; - u16 tx_1000; -}; - struct kszphy_ptp_priv { struct mii_timestamper mii_ts; struct phy_device *phydev; @@ -296,22 +300,14 @@ struct kszphy_ptp_priv { struct kszphy_priv { struct kszphy_ptp_priv ptp_priv; - struct kszphy_latencies latencies; const struct kszphy_type *type; int led_mode; + u16 vct_ctrl1000; bool rmii_ref_clk_sel; bool rmii_ref_clk_sel_val; u64 stats[ARRAY_SIZE(kszphy_hw_stats)]; }; -static struct kszphy_latencies lan8814_latencies = { - .rx_10 = 0x22AA, - .tx_10 = 0x2E4A, - .rx_100 = 0x092A, - .tx_100 = 0x02C1, - .rx_1000 = 0x01AD, - .tx_1000 = 0x00C9, -}; static const struct kszphy_type ksz8021_type = { .led_mode_reg = MII_KSZPHY_CTRL_2, .has_broadcast_disable = true, @@ -1353,6 +1349,199 @@ static int ksz9031_read_status(struct phy_device *phydev) return 0; } +static int ksz9x31_cable_test_start(struct phy_device *phydev) +{ + struct kszphy_priv *priv = phydev->priv; + int ret; + + /* KSZ9131RNX, DS00002841B-page 38, 4.14 LinkMD (R) Cable Diagnostic + * Prior to running the cable diagnostics, Auto-negotiation should + * be disabled, full duplex set and the link speed set to 1000Mbps + * via the Basic Control Register. + */ + ret = phy_modify(phydev, MII_BMCR, + BMCR_SPEED1000 | BMCR_FULLDPLX | + BMCR_ANENABLE | BMCR_SPEED100, + BMCR_SPEED1000 | BMCR_FULLDPLX); + if (ret) + return ret; + + /* KSZ9131RNX, DS00002841B-page 38, 4.14 LinkMD (R) Cable Diagnostic + * The Master-Slave configuration should be set to Slave by writing + * a value of 0x1000 to the Auto-Negotiation Master Slave Control + * Register. + */ + ret = phy_read(phydev, MII_CTRL1000); + if (ret < 0) + return ret; + + /* Cache these bits, they need to be restored once LinkMD finishes. */ + priv->vct_ctrl1000 = ret & (CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER); + ret &= ~(CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER); + ret |= CTL1000_ENABLE_MASTER; + + return phy_write(phydev, MII_CTRL1000, ret); +} + +static int ksz9x31_cable_test_result_trans(u16 status) +{ + switch (FIELD_GET(KSZ9x31_LMD_VCT_ST_MASK, status)) { + case KSZ9x31_LMD_VCT_ST_NORMAL: + return ETHTOOL_A_CABLE_RESULT_CODE_OK; + case KSZ9x31_LMD_VCT_ST_OPEN: + return ETHTOOL_A_CABLE_RESULT_CODE_OPEN; + case KSZ9x31_LMD_VCT_ST_SHORT: + return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT; + case KSZ9x31_LMD_VCT_ST_FAIL: + fallthrough; + default: + return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC; + } +} + +static bool ksz9x31_cable_test_failed(u16 status) +{ + int stat = FIELD_GET(KSZ9x31_LMD_VCT_ST_MASK, status); + + return stat == KSZ9x31_LMD_VCT_ST_FAIL; +} + +static bool ksz9x31_cable_test_fault_length_valid(u16 status) +{ + switch (FIELD_GET(KSZ9x31_LMD_VCT_ST_MASK, status)) { + case KSZ9x31_LMD_VCT_ST_OPEN: + fallthrough; + case KSZ9x31_LMD_VCT_ST_SHORT: + return true; + } + return false; +} + +static int ksz9x31_cable_test_fault_length(struct phy_device *phydev, u16 stat) +{ + int dt = FIELD_GET(KSZ9x31_LMD_VCT_DATA_MASK, stat); + + /* KSZ9131RNX, DS00002841B-page 38, 4.14 LinkMD (R) Cable Diagnostic + * + * distance to fault = (VCT_DATA - 22) * 4 / cable propagation velocity + */ + if ((phydev->phy_id & MICREL_PHY_ID_MASK) == PHY_ID_KSZ9131) + dt = clamp(dt - 22, 0, 255); + + return (dt * 400) / 10; +} + +static int ksz9x31_cable_test_wait_for_completion(struct phy_device *phydev) +{ + int val, ret; + + ret = phy_read_poll_timeout(phydev, KSZ9x31_LMD, val, + !(val & KSZ9x31_LMD_VCT_EN), + 30000, 100000, true); + + return ret < 0 ? ret : 0; +} + +static int ksz9x31_cable_test_get_pair(int pair) +{ + static const int ethtool_pair[] = { + ETHTOOL_A_CABLE_PAIR_A, + ETHTOOL_A_CABLE_PAIR_B, + ETHTOOL_A_CABLE_PAIR_C, + ETHTOOL_A_CABLE_PAIR_D, + }; + + return ethtool_pair[pair]; +} + +static int ksz9x31_cable_test_one_pair(struct phy_device *phydev, int pair) +{ + int ret, val; + + /* KSZ9131RNX, DS00002841B-page 38, 4.14 LinkMD (R) Cable Diagnostic + * To test each individual cable pair, set the cable pair in the Cable + * Diagnostics Test Pair (VCT_PAIR[1:0]) field of the LinkMD Cable + * Diagnostic Register, along with setting the Cable Diagnostics Test + * Enable (VCT_EN) bit. The Cable Diagnostics Test Enable (VCT_EN) bit + * will self clear when the test is concluded. + */ + ret = phy_write(phydev, KSZ9x31_LMD, + KSZ9x31_LMD_VCT_EN | KSZ9x31_LMD_VCT_PAIR(pair)); + if (ret) + return ret; + + ret = ksz9x31_cable_test_wait_for_completion(phydev); + if (ret) + return ret; + + val = phy_read(phydev, KSZ9x31_LMD); + if (val < 0) + return val; + + if (ksz9x31_cable_test_failed(val)) + return -EAGAIN; + + ret = ethnl_cable_test_result(phydev, + ksz9x31_cable_test_get_pair(pair), + ksz9x31_cable_test_result_trans(val)); + if (ret) + return ret; + + if (!ksz9x31_cable_test_fault_length_valid(val)) + return 0; + + return ethnl_cable_test_fault_length(phydev, + ksz9x31_cable_test_get_pair(pair), + ksz9x31_cable_test_fault_length(phydev, val)); +} + +static int ksz9x31_cable_test_get_status(struct phy_device *phydev, + bool *finished) +{ + struct kszphy_priv *priv = phydev->priv; + unsigned long pair_mask = 0xf; + int retries = 20; + int pair, ret, rv; + + *finished = false; + + /* Try harder if link partner is active */ + while (pair_mask && retries--) { + for_each_set_bit(pair, &pair_mask, 4) { + ret = ksz9x31_cable_test_one_pair(phydev, pair); + if (ret == -EAGAIN) + continue; + if (ret < 0) + return ret; + clear_bit(pair, &pair_mask); + } + /* If link partner is in autonegotiation mode it will send 2ms + * of FLPs with at least 6ms of silence. + * Add 2ms sleep to have better chances to hit this silence. + */ + if (pair_mask) + usleep_range(2000, 3000); + } + + /* Report remaining unfinished pair result as unknown. */ + for_each_set_bit(pair, &pair_mask, 4) { + ret = ethnl_cable_test_result(phydev, + ksz9x31_cable_test_get_pair(pair), + ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC); + } + + *finished = true; + + /* Restore cached bits from before LinkMD got started. */ + rv = phy_modify(phydev, MII_CTRL1000, + CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER, + priv->vct_ctrl1000); + if (rv) + return rv; + + return ret; +} + static int ksz8873mll_config_aneg(struct phy_device *phydev) { return 0; @@ -2541,6 +2730,10 @@ static void lan8814_ptp_init(struct phy_device *phydev) struct kszphy_ptp_priv *ptp_priv = &priv->ptp_priv; u32 temp; + if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) || + !IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) + return; + lanphy_write_page_reg(phydev, 5, TSU_HARD_RESET, TSU_HARD_RESET_); temp = lanphy_read_page_reg(phydev, 5, PTP_TX_MOD); @@ -2579,6 +2772,10 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev) { struct lan8814_shared_priv *shared = phydev->shared->priv; + if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) || + !IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) + return 0; + /* Initialise shared lock for clock*/ mutex_init(&shared->shared_lock); @@ -2618,55 +2815,6 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev) return 0; } -static int lan8814_read_status(struct phy_device *phydev) -{ - struct kszphy_priv *priv = phydev->priv; - struct kszphy_latencies *latencies = &priv->latencies; - int err; - int regval; - - err = genphy_read_status(phydev); - if (err) - return err; - - switch (phydev->speed) { - case SPEED_1000: - lanphy_write_page_reg(phydev, 5, PTP_RX_LATENCY_1000, - latencies->rx_1000); - lanphy_write_page_reg(phydev, 5, PTP_TX_LATENCY_1000, - latencies->tx_1000); - break; - case SPEED_100: - lanphy_write_page_reg(phydev, 5, PTP_RX_LATENCY_100, - latencies->rx_100); - lanphy_write_page_reg(phydev, 5, PTP_TX_LATENCY_100, - latencies->tx_100); - break; - case SPEED_10: - lanphy_write_page_reg(phydev, 5, PTP_RX_LATENCY_10, - latencies->rx_10); - lanphy_write_page_reg(phydev, 5, PTP_TX_LATENCY_10, - latencies->tx_10); - break; - default: - break; - } - - /* Make sure the PHY is not broken. Read idle error count, - * and reset the PHY if it is maxed out. - */ - regval = phy_read(phydev, MII_STAT1000); - if ((regval & 0xFF) == 0xFF) { - phy_init_hw(phydev); - phydev->link = 0; - if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev)) - phydev->drv->config_intr(phydev); - return genphy_config_aneg(phydev); - } - - return 0; -} - static int lan8814_config_init(struct phy_device *phydev) { int val; @@ -2690,30 +2838,23 @@ static int lan8814_config_init(struct phy_device *phydev) return 0; } -static void lan8814_parse_latency(struct phy_device *phydev) +static int lan8814_release_coma_mode(struct phy_device *phydev) { - const struct device_node *np = phydev->mdio.dev.of_node; - struct kszphy_priv *priv = phydev->priv; - struct kszphy_latencies *latency = &priv->latencies; - u32 val; - - if (!of_property_read_u32(np, "lan8814,latency_rx_10", &val)) - latency->rx_10 = val; - if (!of_property_read_u32(np, "lan8814,latency_tx_10", &val)) - latency->tx_10 = val; - if (!of_property_read_u32(np, "lan8814,latency_rx_100", &val)) - latency->rx_100 = val; - if (!of_property_read_u32(np, "lan8814,latency_tx_100", &val)) - latency->tx_100 = val; - if (!of_property_read_u32(np, "lan8814,latency_rx_1000", &val)) - latency->rx_1000 = val; - if (!of_property_read_u32(np, "lan8814,latency_tx_1000", &val)) - latency->tx_1000 = val; + struct gpio_desc *gpiod; + + gpiod = devm_gpiod_get_optional(&phydev->mdio.dev, "coma-mode", + GPIOD_OUT_HIGH_OPEN_DRAIN); + if (IS_ERR(gpiod)) + return PTR_ERR(gpiod); + + gpiod_set_consumer_name(gpiod, "LAN8814 coma mode"); + gpiod_set_value_cansleep(gpiod, 0); + + return 0; } static int lan8814_probe(struct phy_device *phydev) { - const struct device_node *np = phydev->mdio.dev.of_node; struct kszphy_priv *priv; u16 addr; int err; @@ -2724,15 +2865,8 @@ static int lan8814_probe(struct phy_device *phydev) priv->led_mode = -1; - priv->latencies = lan8814_latencies; - phydev->priv = priv; - if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) || - !IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING) || - of_property_read_bool(np, "lan8814,ignore-ts")) - return 0; - /* Strap-in value for PHY address, below register read gives starting * phy address value */ @@ -2741,12 +2875,15 @@ static int lan8814_probe(struct phy_device *phydev) addr, sizeof(struct lan8814_shared_priv)); if (phy_package_init_once(phydev)) { + err = lan8814_release_coma_mode(phydev); + if (err) + return err; + err = lan8814_ptp_probe_once(phydev); if (err) return err; } - lan8814_parse_latency(phydev); lan8814_ptp_init(phydev); return 0; @@ -2908,6 +3045,7 @@ static struct phy_driver ksphy_driver[] = { .phy_id = PHY_ID_KSZ9031, .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ9031 Gigabit PHY", + .flags = PHY_POLL_CABLE_TEST, .driver_data = &ksz9021_type, .probe = kszphy_probe, .get_features = ksz9031_get_features, @@ -2921,6 +3059,8 @@ static struct phy_driver ksphy_driver[] = { .get_stats = kszphy_get_stats, .suspend = kszphy_suspend, .resume = kszphy_resume, + .cable_test_start = ksz9x31_cable_test_start, + .cable_test_get_status = ksz9x31_cable_test_get_status, }, { .phy_id = PHY_ID_LAN8814, .phy_id_mask = MICREL_PHY_ID_MASK, @@ -2928,7 +3068,7 @@ static struct phy_driver ksphy_driver[] = { .config_init = lan8814_config_init, .probe = lan8814_probe, .soft_reset = genphy_soft_reset, - .read_status = lan8814_read_status, + .read_status = ksz9031_read_status, .get_sset_count = kszphy_get_sset_count, .get_strings = kszphy_get_strings, .get_stats = kszphy_get_stats, @@ -2955,6 +3095,7 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Microchip KSZ9131 Gigabit PHY", /* PHY_GBIT_FEATURES */ + .flags = PHY_POLL_CABLE_TEST, .driver_data = &ksz9021_type, .probe = kszphy_probe, .config_init = ksz9131_config_init, @@ -2965,6 +3106,8 @@ static struct phy_driver ksphy_driver[] = { .get_stats = kszphy_get_stats, .suspend = kszphy_suspend, .resume = kszphy_resume, + .cable_test_start = ksz9x31_cable_test_start, + .cable_test_get_status = ksz9x31_cable_test_get_status, }, { .phy_id = PHY_ID_KSZ8873MLL, .phy_id_mask = MICREL_PHY_ID_MASK, diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c index 389df3f4293c..d4c93d59bc53 100644 --- a/drivers/net/phy/microchip_t1.c +++ b/drivers/net/phy/microchip_t1.c @@ -68,7 +68,12 @@ #define T1_POST_LCK_MUFACT_CFG_REG 0x1C #define T1_TX_RX_FIFO_CFG_REG 0x02 #define T1_TX_LPF_FIR_CFG_REG 0x55 +#define T1_COEF_CLK_PWR_DN_CFG 0x04 +#define T1_COEF_RW_CTL_CFG 0x0D #define T1_SQI_CONFIG_REG 0x2E +#define T1_SQI_CONFIG2_REG 0x4A +#define T1_DCQ_SQI_REG 0xC3 +#define T1_DCQ_SQI_MSK GENMASK(3, 1) #define T1_MDIO_CONTROL2_REG 0x10 #define T1_INTERRUPT_SOURCE_REG 0x18 #define T1_INTERRUPT2_SOURCE_REG 0x08 @@ -82,6 +87,9 @@ #define T1_MODE_STAT_REG 0x11 #define T1_LINK_UP_MSK BIT(0) +/* SQI defines */ +#define LAN87XX_MAX_SQI 0x07 + #define DRIVER_AUTHOR "Nisar Sayed <nisar.sayed@microchip.com>" #define DRIVER_DESC "Microchip LAN87XX/LAN937x T1 PHY driver" @@ -346,9 +354,20 @@ static int lan87xx_phy_init(struct phy_device *phydev) T1_TX_LPF_FIR_CFG_REG, 0x1011, 0 }, { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, T1_TX_LPF_FIR_CFG_REG, 0x1000, 0 }, + /* Setup SQI measurement */ + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_COEF_CLK_PWR_DN_CFG, 0x16d6, 0 }, /* SQI enable */ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, T1_SQI_CONFIG_REG, 0x9572, 0 }, + /* SQI select mode 5 */ + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_SQI_CONFIG2_REG, 0x0001, 0 }, + /* Throws the first SQI reading */ + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_COEF_RW_CTL_CFG, 0x0301, 0 }, + { PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_DSP, + T1_DCQ_SQI_REG, 0, 0 }, /* Flag LPS and WUR as idle errors */ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_SMI, T1_MDIO_CONTROL2_REG, 0x0014, 0 }, @@ -706,7 +725,6 @@ static int lan87xx_read_status(struct phy_device *phydev) static int lan87xx_config_aneg(struct phy_device *phydev) { u16 ctl = 0; - int rc; switch (phydev->master_slave_set) { case MASTER_SLAVE_CFG_MASTER_FORCE: @@ -722,11 +740,32 @@ static int lan87xx_config_aneg(struct phy_device *phydev) return -EOPNOTSUPP; } - rc = phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl); - if (rc == 1) - rc = genphy_soft_reset(phydev); + return phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl); +} + +static int lan87xx_get_sqi(struct phy_device *phydev) +{ + u8 sqi_value = 0; + int rc; - return rc; + rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE, + PHYACC_ATTR_BANK_DSP, T1_COEF_RW_CTL_CFG, 0x0301); + if (rc < 0) + return rc; + + rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ, + PHYACC_ATTR_BANK_DSP, T1_DCQ_SQI_REG, 0x0); + if (rc < 0) + return rc; + + sqi_value = FIELD_GET(T1_DCQ_SQI_MSK, rc); + + return sqi_value; +} + +static int lan87xx_get_sqi_max(struct phy_device *phydev) +{ + return LAN87XX_MAX_SQI; } static struct phy_driver microchip_t1_phy_driver[] = { @@ -742,18 +781,25 @@ static struct phy_driver microchip_t1_phy_driver[] = { .resume = genphy_resume, .config_aneg = lan87xx_config_aneg, .read_status = lan87xx_read_status, + .get_sqi = lan87xx_get_sqi, + .get_sqi_max = lan87xx_get_sqi_max, .cable_test_start = lan87xx_cable_test_start, .cable_test_get_status = lan87xx_cable_test_get_status, }, { PHY_ID_MATCH_MODEL(PHY_ID_LAN937X), .name = "Microchip LAN937x T1", + .flags = PHY_POLL_CABLE_TEST, .features = PHY_BASIC_T1_FEATURES, .config_init = lan87xx_config_init, + .config_intr = lan87xx_phy_config_intr, + .handle_interrupt = lan87xx_handle_interrupt, .suspend = genphy_suspend, .resume = genphy_resume, .config_aneg = lan87xx_config_aneg, .read_status = lan87xx_read_status, + .get_sqi = lan87xx_get_sqi, + .get_sqi_max = lan87xx_get_sqi_max, .cable_test_start = lan87xx_cable_test_start, .cable_test_get_status = lan87xx_cable_test_get_status, } diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index db709d30bf84..eefdd67d5556 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -9,6 +9,25 @@ #include <linux/phy.h> /** + * genphy_c45_baset1_able - checks if the PMA has BASE-T1 extended abilities + * @phydev: target phy_device struct + */ +static bool genphy_c45_baset1_able(struct phy_device *phydev) +{ + int val; + + if (phydev->pma_extable == -ENODATA) { + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_EXTABLE); + if (val < 0) + return false; + + phydev->pma_extable = val; + } + + return !!(phydev->pma_extable & MDIO_PMA_EXTABLE_BT1); +} + +/** * genphy_c45_pma_can_sleep - checks if the PMA have sleep support * @phydev: target phy_device struct */ @@ -80,7 +99,10 @@ int genphy_c45_pma_setup_forced(struct phy_device *phydev) switch (phydev->speed) { case SPEED_10: - ctrl2 |= MDIO_PMA_CTRL2_10BT; + if (genphy_c45_baset1_able(phydev)) + ctrl2 |= MDIO_PMA_CTRL2_BASET1; + else + ctrl2 |= MDIO_PMA_CTRL2_10BT; break; case SPEED_100: ctrl1 |= MDIO_PMA_CTRL1_SPEED100; @@ -118,10 +140,95 @@ int genphy_c45_pma_setup_forced(struct phy_device *phydev) if (ret < 0) return ret; + if (genphy_c45_baset1_able(phydev)) { + int ctl = 0; + + switch (phydev->master_slave_set) { + case MASTER_SLAVE_CFG_MASTER_PREFERRED: + case MASTER_SLAVE_CFG_MASTER_FORCE: + ctl = MDIO_PMA_PMD_BT1_CTRL_CFG_MST; + break; + case MASTER_SLAVE_CFG_SLAVE_FORCE: + case MASTER_SLAVE_CFG_SLAVE_PREFERRED: + case MASTER_SLAVE_CFG_UNKNOWN: + case MASTER_SLAVE_CFG_UNSUPPORTED: + break; + default: + phydev_warn(phydev, "Unsupported Master/Slave mode\n"); + return -EOPNOTSUPP; + } + + ret = phy_modify_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1_CTRL, + MDIO_PMA_PMD_BT1_CTRL_CFG_MST, ctl); + if (ret < 0) + return ret; + } + return genphy_c45_an_disable_aneg(phydev); } EXPORT_SYMBOL_GPL(genphy_c45_pma_setup_forced); +/* Sets master/slave preference and supported technologies. + * The preference is set in the BIT(4) of BASE-T1 AN + * advertisement register 7.515 and whether the status + * is forced or not, it is set in the BIT(12) of BASE-T1 + * AN advertisement register 7.514. + * Sets 10BASE-T1L Ability BIT(14) in BASE-T1 autonegotiation + * advertisement register [31:16] if supported. + */ +static int genphy_c45_baset1_an_config_aneg(struct phy_device *phydev) +{ + int changed = 0; + u16 adv_l = 0; + u16 adv_m = 0; + int ret; + + switch (phydev->master_slave_set) { + case MASTER_SLAVE_CFG_MASTER_FORCE: + case MASTER_SLAVE_CFG_SLAVE_FORCE: + adv_l |= MDIO_AN_T1_ADV_L_FORCE_MS; + break; + case MASTER_SLAVE_CFG_MASTER_PREFERRED: + case MASTER_SLAVE_CFG_SLAVE_PREFERRED: + break; + default: + break; + } + + switch (phydev->master_slave_set) { + case MASTER_SLAVE_CFG_MASTER_FORCE: + case MASTER_SLAVE_CFG_MASTER_PREFERRED: + adv_m |= MDIO_AN_T1_ADV_M_MST; + break; + case MASTER_SLAVE_CFG_SLAVE_FORCE: + case MASTER_SLAVE_CFG_SLAVE_PREFERRED: + break; + default: + break; + } + + adv_l |= linkmode_adv_to_mii_t1_adv_l_t(phydev->advertising); + + ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_L, + (MDIO_AN_T1_ADV_L_FORCE_MS | MDIO_AN_T1_ADV_L_PAUSE_CAP + | MDIO_AN_T1_ADV_L_PAUSE_ASYM), adv_l); + if (ret < 0) + return ret; + if (ret > 0) + changed = 1; + + adv_m |= linkmode_adv_to_mii_t1_adv_m_t(phydev->advertising); + + ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_M, + MDIO_AN_T1_ADV_M_MST | MDIO_AN_T1_ADV_M_B10L, adv_m); + if (ret < 0) + return ret; + if (ret > 0) + changed = 1; + + return changed; +} + /** * genphy_c45_an_config_aneg - configure advertisement registers * @phydev: target phy_device struct @@ -141,6 +248,9 @@ int genphy_c45_an_config_aneg(struct phy_device *phydev) changed = genphy_config_eee_advert(phydev); + if (genphy_c45_baset1_able(phydev)) + return genphy_c45_baset1_an_config_aneg(phydev); + adv = linkmode_adv_to_mii_adv_t(phydev->advertising); ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, @@ -178,8 +288,12 @@ EXPORT_SYMBOL_GPL(genphy_c45_an_config_aneg); */ int genphy_c45_an_disable_aneg(struct phy_device *phydev) { + u16 reg = MDIO_CTRL1; - return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, + if (genphy_c45_baset1_able(phydev)) + reg = MDIO_AN_T1_CTRL; + + return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, reg, MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART); } EXPORT_SYMBOL_GPL(genphy_c45_an_disable_aneg); @@ -194,7 +308,12 @@ EXPORT_SYMBOL_GPL(genphy_c45_an_disable_aneg); */ int genphy_c45_restart_aneg(struct phy_device *phydev) { - return phy_set_bits_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, + u16 reg = MDIO_CTRL1; + + if (genphy_c45_baset1_able(phydev)) + reg = MDIO_AN_T1_CTRL; + + return phy_set_bits_mmd(phydev, MDIO_MMD_AN, reg, MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART); } EXPORT_SYMBOL_GPL(genphy_c45_restart_aneg); @@ -210,11 +329,15 @@ EXPORT_SYMBOL_GPL(genphy_c45_restart_aneg); */ int genphy_c45_check_and_restart_aneg(struct phy_device *phydev, bool restart) { + u16 reg = MDIO_CTRL1; int ret; + if (genphy_c45_baset1_able(phydev)) + reg = MDIO_AN_T1_CTRL; + if (!restart) { /* Configure and restart aneg if it wasn't set before */ - ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1); + ret = phy_read_mmd(phydev, MDIO_MMD_AN, reg); if (ret < 0) return ret; @@ -242,7 +365,13 @@ EXPORT_SYMBOL_GPL(genphy_c45_check_and_restart_aneg); */ int genphy_c45_aneg_done(struct phy_device *phydev) { - int val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); + int reg = MDIO_STAT1; + int val; + + if (genphy_c45_baset1_able(phydev)) + reg = MDIO_AN_T1_STAT; + + val = phy_read_mmd(phydev, MDIO_MMD_AN, reg); return val < 0 ? val : val & MDIO_AN_STAT1_COMPLETE ? 1 : 0; } @@ -307,6 +436,49 @@ int genphy_c45_read_link(struct phy_device *phydev) } EXPORT_SYMBOL_GPL(genphy_c45_read_link); +/* Read the Clause 45 defined BASE-T1 AN (7.513) status register to check + * if autoneg is complete. If so read the BASE-T1 Autonegotiation + * Advertisement registers filling in the link partner advertisement, + * pause and asym_pause members in phydev. + */ +static int genphy_c45_baset1_read_lpa(struct phy_device *phydev) +{ + int val; + + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_STAT); + if (val < 0) + return val; + + if (!(val & MDIO_AN_STAT1_COMPLETE)) { + linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->lp_advertising); + mii_t1_adv_l_mod_linkmode_t(phydev->lp_advertising, 0); + mii_t1_adv_m_mod_linkmode_t(phydev->lp_advertising, 0); + + phydev->pause = 0; + phydev->asym_pause = 0; + + return 0; + } + + linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->lp_advertising, 1); + + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_LP_L); + if (val < 0) + return val; + + mii_t1_adv_l_mod_linkmode_t(phydev->lp_advertising, val); + phydev->pause = val & MDIO_AN_T1_ADV_L_PAUSE_CAP ? 1 : 0; + phydev->asym_pause = val & MDIO_AN_T1_ADV_L_PAUSE_ASYM ? 1 : 0; + + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_LP_M); + if (val < 0) + return val; + + mii_t1_adv_m_mod_linkmode_t(phydev->lp_advertising, val); + + return 0; +} + /** * genphy_c45_read_lpa - read the link partner advertisement and pause * @phydev: target phy_device struct @@ -321,6 +493,9 @@ int genphy_c45_read_lpa(struct phy_device *phydev) { int val; + if (genphy_c45_baset1_able(phydev)) + return genphy_c45_baset1_read_lpa(phydev); + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); if (val < 0) return val; @@ -399,6 +574,17 @@ int genphy_c45_read_pma(struct phy_device *phydev) phydev->duplex = DUPLEX_FULL; + if (genphy_c45_baset1_able(phydev)) { + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1_CTRL); + if (val < 0) + return val; + + if (MDIO_PMA_PMD_BT1_CTRL_CFG_MST) + phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER; + else + phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE; + } + return 0; } EXPORT_SYMBOL_GPL(genphy_c45_read_pma); @@ -530,12 +716,67 @@ int genphy_c45_pma_read_abilities(struct phy_device *phydev) phydev->supported, val & MDIO_PMA_NG_EXTABLE_5GBT); } + + if (val & MDIO_PMA_EXTABLE_BT1) { + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1); + if (val < 0) + return val; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, + phydev->supported, + val & MDIO_PMA_PMD_BT1_B10L_ABLE); + + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_STAT); + if (val < 0) + return val; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + phydev->supported, + val & MDIO_AN_STAT1_ABLE); + } } return 0; } EXPORT_SYMBOL_GPL(genphy_c45_pma_read_abilities); +/* Read master/slave preference from registers. + * The preference is read from the BIT(4) of BASE-T1 AN + * advertisement register 7.515 and whether the preference + * is forced or not, it is read from BASE-T1 AN advertisement + * register 7.514. + */ +static int genphy_c45_baset1_read_status(struct phy_device *phydev) +{ + int ret; + int cfg; + + phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN; + phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN; + + ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_L); + if (ret < 0) + return ret; + + cfg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_M); + if (cfg < 0) + return cfg; + + if (ret & MDIO_AN_T1_ADV_L_FORCE_MS) { + if (cfg & MDIO_AN_T1_ADV_M_MST) + phydev->master_slave_get = MASTER_SLAVE_CFG_MASTER_FORCE; + else + phydev->master_slave_get = MASTER_SLAVE_CFG_SLAVE_FORCE; + } else { + if (cfg & MDIO_AN_T1_ADV_M_MST) + phydev->master_slave_get = MASTER_SLAVE_CFG_MASTER_PREFERRED; + else + phydev->master_slave_get = MASTER_SLAVE_CFG_SLAVE_PREFERRED; + } + + return 0; +} + /** * genphy_c45_read_status - read PHY status * @phydev: target phy_device struct @@ -560,6 +801,12 @@ int genphy_c45_read_status(struct phy_device *phydev) if (ret) return ret; + if (genphy_c45_baset1_able(phydev)) { + ret = genphy_c45_baset1_read_status(phydev); + if (ret < 0) + return ret; + } + phy_resolve_aneg_linkmode(phydev); } else { ret = genphy_c45_read_pma(phydev); diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index 2001f3329133..1f2531a1a876 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -13,7 +13,7 @@ */ const char *phy_speed_to_str(int speed) { - BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 92, + BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 93, "Enum ethtool_link_mode_bit_indices and phylib are out of sync. " "If a speed or mode has been added please update phy_speed_to_str " "and the PHY settings array.\n"); @@ -176,6 +176,7 @@ static const struct phy_setting settings[] = { /* 10M */ PHY_SETTING( 10, FULL, 10baseT_Full ), PHY_SETTING( 10, HALF, 10baseT_Half ), + PHY_SETTING( 10, FULL, 10baseT1L_Full ), }; #undef PHY_SETTING diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index beb2b66da132..9034c6a8e18f 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -295,20 +295,20 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) if (mdio_phy_id_is_c45(mii_data->phy_id)) { prtad = mdio_phy_id_prtad(mii_data->phy_id); devad = mdio_phy_id_devad(mii_data->phy_id); - devad = mdiobus_c45_addr(devad, mii_data->reg_num); + mii_data->val_out = mdiobus_c45_read( + phydev->mdio.bus, prtad, devad, + mii_data->reg_num); } else { - prtad = mii_data->phy_id; - devad = mii_data->reg_num; + mii_data->val_out = mdiobus_read( + phydev->mdio.bus, mii_data->phy_id, + mii_data->reg_num); } - mii_data->val_out = mdiobus_read(phydev->mdio.bus, prtad, - devad); return 0; case SIOCSMIIREG: if (mdio_phy_id_is_c45(mii_data->phy_id)) { prtad = mdio_phy_id_prtad(mii_data->phy_id); devad = mdio_phy_id_devad(mii_data->phy_id); - devad = mdiobus_c45_addr(devad, mii_data->reg_num); } else { prtad = mii_data->phy_id; devad = mii_data->reg_num; @@ -351,7 +351,11 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) } } - mdiobus_write(phydev->mdio.bus, prtad, devad, val); + if (mdio_phy_id_is_c45(mii_data->phy_id)) + mdiobus_c45_write(phydev->mdio.bus, prtad, devad, + mii_data->reg_num, val); + else + mdiobus_write(phydev->mdio.bus, prtad, devad, val); if (prtad == phydev->mdio.addr && devad == MII_BMCR && diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 8406ac739def..431a8719c635 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -90,8 +90,9 @@ const int phy_10_100_features_array[4] = { }; EXPORT_SYMBOL_GPL(phy_10_100_features_array); -const int phy_basic_t1_features_array[2] = { +const int phy_basic_t1_features_array[3] = { ETHTOOL_LINK_MODE_TP_BIT, + ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, ETHTOOL_LINK_MODE_100baseT1_Full_BIT, }; EXPORT_SYMBOL_GPL(phy_basic_t1_features_array); @@ -599,6 +600,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id, dev->autoneg = AUTONEG_ENABLE; + dev->pma_extable = -ENODATA; dev->is_c45 = is_c45; dev->phy_id = phy_id; if (c45_ids) @@ -1449,6 +1451,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, phydev->state = PHY_READY; + phydev->interrupts = PHY_INTERRUPT_DISABLED; + /* Port is set to PORT_TP by default and the actual PHY driver will set * it to different value depending on the PHY configuration. If we have * the generic PHY driver we can't figure it out, thus set the old @@ -1471,10 +1475,6 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, if (err) goto error; - err = phy_disable_interrupts(phydev); - if (err) - return err; - phy_resume(phydev); phy_led_triggers_register(phydev); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 06943889d747..066684b80919 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -168,8 +168,10 @@ static void phylink_caps_to_linkmodes(unsigned long *linkmodes, if (caps & MAC_10HD) __set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, linkmodes); - if (caps & MAC_10FD) + if (caps & MAC_10FD) { __set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, linkmodes); + } if (caps & MAC_100HD) { __set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, linkmodes); @@ -2301,8 +2303,11 @@ static int phylink_phy_read(struct phylink *pl, unsigned int phy_id, if (mdio_phy_id_is_c45(phy_id)) { prtad = mdio_phy_id_prtad(phy_id); devad = mdio_phy_id_devad(phy_id); - devad = mdiobus_c45_addr(devad, reg); - } else if (phydev->is_c45) { + return mdiobus_c45_read(pl->phydev->mdio.bus, prtad, devad, + reg); + } + + if (phydev->is_c45) { switch (reg) { case MII_BMCR: case MII_BMSR: @@ -2324,12 +2329,11 @@ static int phylink_phy_read(struct phylink *pl, unsigned int phy_id, return -EINVAL; } prtad = phy_id; - devad = mdiobus_c45_addr(devad, reg); - } else { - prtad = phy_id; - devad = reg; + return mdiobus_c45_read(pl->phydev->mdio.bus, prtad, devad, + reg); } - return mdiobus_read(pl->phydev->mdio.bus, prtad, devad); + + return mdiobus_read(pl->phydev->mdio.bus, phy_id, reg); } static int phylink_phy_write(struct phylink *pl, unsigned int phy_id, @@ -2341,8 +2345,11 @@ static int phylink_phy_write(struct phylink *pl, unsigned int phy_id, if (mdio_phy_id_is_c45(phy_id)) { prtad = mdio_phy_id_prtad(phy_id); devad = mdio_phy_id_devad(phy_id); - devad = mdiobus_c45_addr(devad, reg); - } else if (phydev->is_c45) { + return mdiobus_c45_write(pl->phydev->mdio.bus, prtad, devad, + reg, val); + } + + if (phydev->is_c45) { switch (reg) { case MII_BMCR: case MII_BMSR: @@ -2363,14 +2370,11 @@ static int phylink_phy_write(struct phylink *pl, unsigned int phy_id, default: return -EINVAL; } - prtad = phy_id; - devad = mdiobus_c45_addr(devad, reg); - } else { - prtad = phy_id; - devad = reg; + return mdiobus_c45_write(pl->phydev->mdio.bus, phy_id, devad, + reg, val); } - return mdiobus_write(phydev->mdio.bus, prtad, devad, val); + return mdiobus_write(phydev->mdio.bus, phy_id, reg, val); } static int phylink_mii_read(struct phylink *pl, unsigned int phy_id, @@ -2778,34 +2782,6 @@ static const struct sfp_upstream_ops sfp_phylink_ops = { /* Helpers for MAC drivers */ -/** - * phylink_helper_basex_speed() - 1000BaseX/2500BaseX helper - * @state: a pointer to a &struct phylink_link_state - * - * Inspect the interface mode, advertising mask or forced speed and - * decide whether to run at 2.5Gbit or 1Gbit appropriately, switching - * the interface mode to suit. @state->interface is appropriately - * updated, and the advertising mask has the "other" baseX_Full flag - * cleared. - */ -void phylink_helper_basex_speed(struct phylink_link_state *state) -{ - if (phy_interface_mode_is_8023z(state->interface)) { - bool want_2500 = state->an_enabled ? - phylink_test(state->advertising, 2500baseX_Full) : - state->speed == SPEED_2500; - - if (want_2500) { - phylink_clear(state->advertising, 1000baseX_Full); - state->interface = PHY_INTERFACE_MODE_2500BASEX; - } else { - phylink_clear(state->advertising, 2500baseX_Full); - state->interface = PHY_INTERFACE_MODE_1000BASEX; - } - } -} -EXPORT_SYMBOL_GPL(phylink_helper_basex_speed); - static void phylink_decode_c37_word(struct phylink_link_state *state, uint16_t config_reg, int speed) { diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 3619520340b7..1b41cd9732d7 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -1011,8 +1011,7 @@ static int pppoe_recvmsg(struct socket *sock, struct msghdr *m, goto end; } - skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, - flags & MSG_DONTWAIT, &error); + skb = skb_recv_datagram(sk, flags, &error); if (error < 0) goto end; diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index 88396ff99f03..6865d32270e5 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c @@ -469,7 +469,7 @@ static void sl_tx_timeout(struct net_device *dev, unsigned int txqueue) spin_lock(&sl->lock); if (netif_queue_stopped(dev)) { - if (!netif_running(dev)) + if (!netif_running(dev) || !sl->tty) goto out; /* May be we must check transmitter timeout here ? diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 276a0e42ca8e..dbe4c0a4be2c 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1124,7 +1124,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) /* NETIF_F_LLTX requires to do our own update of trans_start */ queue = netdev_get_tx_queue(dev, txq); - queue->trans_start = jiffies; + txq_trans_cond_update(queue); /* Notify and wake up reader process */ if (tfile->flags & TUN_FASYNC) diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c index ea06d10e1c21..ca409d450a29 100644 --- a/drivers/net/usb/aqc111.c +++ b/drivers/net/usb/aqc111.c @@ -1102,10 +1102,15 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb) if (start_of_descs != desc_offset) goto err; - /* self check desc_offset from header*/ - if (desc_offset >= skb_len) + /* self check desc_offset from header and make sure that the + * bounds of the metadata array are inside the SKB + */ + if (pkt_count * 2 + desc_offset >= skb_len) goto err; + /* Packets must not overlap the metadata array */ + skb_trim(skb, desc_offset); + if (pkt_count == 0) goto err; diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 9b4dfa3001d6..2de09ad5bac0 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -479,7 +479,7 @@ static int usbnet_cdc_zte_bind(struct usbnet *dev, struct usb_interface *intf) * device MAC address has been updated). Always set MAC address to that of the * device. */ -static int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb) +int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { if (skb->len < ETH_HLEN || !(skb->data[0] & 0x02)) return 1; @@ -489,6 +489,7 @@ static int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb) return 1; } +EXPORT_SYMBOL_GPL(usbnet_cdc_zte_rx_fixup); /* Ensure correct link state * diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 15f91d691bba..cdca00c0dc1f 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -1492,19 +1492,19 @@ static void cdc_ncm_txpath_bh(struct tasklet_struct *t) struct cdc_ncm_ctx *ctx = from_tasklet(ctx, t, bh); struct usbnet *dev = ctx->dev; - spin_lock_bh(&ctx->mtx); + spin_lock(&ctx->mtx); if (ctx->tx_timer_pending != 0) { ctx->tx_timer_pending--; cdc_ncm_tx_timeout_start(ctx); - spin_unlock_bh(&ctx->mtx); + spin_unlock(&ctx->mtx); } else if (dev->net != NULL) { ctx->tx_reason_timeout++; /* count reason for transmitting */ - spin_unlock_bh(&ctx->mtx); + spin_unlock(&ctx->mtx); netif_tx_lock_bh(dev->net); usbnet_start_xmit(NULL, dev->net); netif_tx_unlock_bh(dev->net); } else { - spin_unlock_bh(&ctx->mtx); + spin_unlock(&ctx->mtx); } } diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 415f16662f88..94e571fb61da 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -92,8 +92,6 @@ WAKE_MCAST | WAKE_BCAST | \ WAKE_ARP | WAKE_MAGIC) -#define LAN78XX_NAPI_WEIGHT 64 - #define TX_URB_NUM 10 #define TX_SS_URB_NUM TX_URB_NUM #define TX_HS_URB_NUM TX_URB_NUM @@ -4376,7 +4374,7 @@ static int lan78xx_probe(struct usb_interface *intf, netif_set_gso_max_size(netdev, LAN78XX_TSO_SIZE(dev)); - netif_napi_add(netdev, &dev->napi, lan78xx_poll, LAN78XX_NAPI_WEIGHT); + netif_napi_add(netdev, &dev->napi, lan78xx_poll, NAPI_POLL_WEIGHT); INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork); init_usb_anchor(&dev->deferred); diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 3353e761016d..79f8bd849b1a 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -190,7 +190,6 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb) skbn = netdev_alloc_skb(net, pkt_len + LL_MAX_HEADER); if (!skbn) return 0; - skbn->dev = net; switch (skb->data[offset + qmimux_hdr_sz] & 0xf0) { case 0x40: @@ -1351,6 +1350,7 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_SET_DTR(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ {QMI_QUIRK_SET_DTR(0x1199, 0x907b, 10)},/* Sierra Wireless EM74xx */ {QMI_QUIRK_SET_DTR(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */ + {QMI_QUIRK_SET_DTR(0x1199, 0xc081, 8)}, /* Sierra Wireless EM7590 */ {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ @@ -1358,6 +1358,7 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1057, 2)}, /* Telit FN980 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */ {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index 247f58cb0f84..4e70dec30e5a 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -418,10 +418,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags) goto halt_fail_and_release; } - if (bp[0] & 0x02) - eth_hw_addr_random(net); - else - eth_hw_addr_set(net, bp); + eth_hw_addr_set(net, bp); /* set a nonzero filter to enable data transfers */ memset(u.set, 0, sizeof *u.set); @@ -463,6 +460,16 @@ static int rndis_bind(struct usbnet *dev, struct usb_interface *intf) return generic_rndis_bind(dev, intf, FLAG_RNDIS_PHYM_NOT_WIRELESS); } +static int zte_rndis_bind(struct usbnet *dev, struct usb_interface *intf) +{ + int status = rndis_bind(dev, intf); + + if (!status && (dev->net->dev_addr[0] & 0x02)) + eth_hw_addr_random(dev->net); + + return status; +} + void rndis_unbind(struct usbnet *dev, struct usb_interface *intf) { struct rndis_halt *halt; @@ -485,10 +492,14 @@ EXPORT_SYMBOL_GPL(rndis_unbind); */ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { + bool dst_mac_fixup; + /* This check is no longer done by usbnet */ if (skb->len < dev->net->hard_header_len) return 0; + dst_mac_fixup = !!(dev->driver_info->data & RNDIS_DRIVER_DATA_DST_MAC_FIXUP); + /* peripheral may have batched packets to us... */ while (likely(skb->len)) { struct rndis_data_hdr *hdr = (void *)skb->data; @@ -523,10 +534,17 @@ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb) break; skb_pull(skb, msg_len - sizeof *hdr); skb_trim(skb2, data_len); + + if (unlikely(dst_mac_fixup)) + usbnet_cdc_zte_rx_fixup(dev, skb2); + usbnet_skb_return(dev, skb2); } /* caller will usbnet_skb_return the remaining packet */ + if (unlikely(dst_mac_fixup)) + usbnet_cdc_zte_rx_fixup(dev, skb); + return 1; } EXPORT_SYMBOL_GPL(rndis_rx_fixup); @@ -600,6 +618,17 @@ static const struct driver_info rndis_poll_status_info = { .tx_fixup = rndis_tx_fixup, }; +static const struct driver_info zte_rndis_info = { + .description = "ZTE RNDIS device", + .flags = FLAG_ETHER | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT, + .data = RNDIS_DRIVER_DATA_DST_MAC_FIXUP, + .bind = zte_rndis_bind, + .unbind = rndis_unbind, + .status = rndis_status, + .rx_fixup = rndis_rx_fixup, + .tx_fixup = rndis_tx_fixup, +}; + /*-------------------------------------------------------------------------*/ static const struct usb_device_id products [] = { @@ -614,6 +643,16 @@ static const struct usb_device_id products [] = { USB_CLASS_COMM, 2 /* ACM */, 0x0ff), .driver_info = (unsigned long)&rndis_info, }, { + /* ZTE WWAN modules */ + USB_VENDOR_AND_INTERFACE_INFO(0x19d2, + USB_CLASS_WIRELESS_CONTROLLER, 1, 3), + .driver_info = (unsigned long)&zte_rndis_info, +}, { + /* ZTE WWAN modules, ACM flavour */ + USB_VENDOR_AND_INTERFACE_INFO(0x19d2, + USB_CLASS_COMM, 2 /* ACM */, 0x0ff), + .driver_info = (unsigned long)&zte_rndis_info, +}, { /* RNDIS is MSFT's un-official variant of CDC ACM */ USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff), .driver_info = (unsigned long) &rndis_info, diff --git a/drivers/net/usb/sr9800.h b/drivers/net/usb/sr9800.h index 18f670251275..952e6f7c0321 100644 --- a/drivers/net/usb/sr9800.h +++ b/drivers/net/usb/sr9800.h @@ -163,7 +163,7 @@ #define SR9800_MAX_BULKIN_24K 6 #define SR9800_MAX_BULKIN_32K 7 -struct {unsigned short size, byte_cnt, threshold; } SR9800_BULKIN_SIZE[] = { +static const struct {unsigned short size, byte_cnt, threshold; } SR9800_BULKIN_SIZE[] = { /* 2k */ {2048, 0x8000, 0x8001}, /* 4k */ diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 1b5714926d81..3592014e50cc 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -320,7 +320,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) rcu_read_lock(); rcv = rcu_dereference(priv->peer); - if (unlikely(!rcv)) { + if (unlikely(!rcv) || !pskb_may_pull(skb, ETH_HLEN)) { kfree_skb(skb); goto drop; } @@ -1375,7 +1375,7 @@ static int veth_alloc_queues(struct net_device *dev) struct veth_priv *priv = netdev_priv(dev); int i; - priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL); + priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL_ACCOUNT); if (!priv->rq) return -ENOMEM; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 87838cbe38cf..cbba9d2e8f32 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1005,6 +1005,24 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, * xdp.data_meta were adjusted */ len = xdp.data_end - xdp.data + vi->hdr_len + metasize; + + /* recalculate headroom if xdp.data or xdp_data_meta + * were adjusted, note that offset should always point + * to the start of the reserved bytes for virtio_net + * header which are followed by xdp.data, that means + * that offset is equal to the headroom (when buf is + * starting at the beginning of the page, otherwise + * there is a base offset inside the page) but it's used + * with a different starting point (buf start) than + * xdp.data (buf start + vnet hdr size). If xdp.data or + * data_meta were adjusted by the xdp prog then the + * headroom size has changed and so has the offset, we + * can use data_hard_start, which points at buf start + + * vnet hdr size, to calculate the new headroom and use + * it later to compute buf start in page_to_skb() + */ + headroom = xdp.data - xdp.data_hard_start - metasize; + /* We can only create skb based on xdp_page. */ if (unlikely(xdp_page != page)) { rcu_read_unlock(); @@ -1012,7 +1030,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, head_skb = page_to_skb(vi, rq, xdp_page, offset, len, PAGE_SIZE, false, metasize, - VIRTIO_XDP_HEADROOM); + headroom); return head_skb; } break; diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 85e362461d71..cfc30ce4c6e1 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -1265,6 +1265,7 @@ static int vrf_prepare_mac_header(struct sk_buff *skb, eth = (struct ethhdr *)skb->data; skb_reset_mac_header(skb); + skb_reset_mac_len(skb); /* we set the ethernet destination and the source addresses to the * address of the VRF device. @@ -1294,9 +1295,9 @@ static int vrf_prepare_mac_header(struct sk_buff *skb, */ static int vrf_add_mac_header_if_unset(struct sk_buff *skb, struct net_device *vrf_dev, - u16 proto) + u16 proto, struct net_device *orig_dev) { - if (skb_mac_header_was_set(skb)) + if (skb_mac_header_was_set(skb) && dev_has_header(orig_dev)) return 0; return vrf_prepare_mac_header(skb, vrf_dev, proto); @@ -1402,6 +1403,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, /* if packet is NDISC then keep the ingress interface */ if (!is_ndisc) { + struct net_device *orig_dev = skb->dev; + vrf_rx_stats(vrf_dev, skb->len); skb->dev = vrf_dev; skb->skb_iif = vrf_dev->ifindex; @@ -1410,7 +1413,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, int err; err = vrf_add_mac_header_if_unset(skb, vrf_dev, - ETH_P_IPV6); + ETH_P_IPV6, + orig_dev); if (likely(!err)) { skb_push(skb, skb->mac_len); dev_queue_xmit_nit(skb, vrf_dev); @@ -1440,6 +1444,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev, struct sk_buff *skb) { + struct net_device *orig_dev = skb->dev; + skb->dev = vrf_dev; skb->skb_iif = vrf_dev->ifindex; IPCB(skb)->flags |= IPSKB_L3SLAVE; @@ -1460,7 +1466,8 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev, if (!list_empty(&vrf_dev->ptype_all)) { int err; - err = vrf_add_mac_header_if_unset(skb, vrf_dev, ETH_P_IP); + err = vrf_add_mac_header_if_unset(skb, vrf_dev, ETH_P_IP, + orig_dev); if (likely(!err)) { skb_push(skb, skb->mac_len); dev_queue_xmit_nit(skb, vrf_dev); diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c index de97ff98d36e..8a5e3a6d32d7 100644 --- a/drivers/net/vxlan/vxlan_core.c +++ b/drivers/net/vxlan/vxlan_core.c @@ -651,11 +651,11 @@ static int vxlan_fdb_append(struct vxlan_fdb *f, rd = kmalloc(sizeof(*rd), GFP_ATOMIC); if (rd == NULL) - return -ENOBUFS; + return -ENOMEM; if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) { kfree(rd); - return -ENOBUFS; + return -ENOMEM; } rd->remote_ip = *ip; diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index 140780ac1745..dcb069dde66b 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig @@ -23,78 +23,6 @@ menuconfig WAN if WAN -# There is no way to detect a comtrol sv11 - force it modular for now. -config HOSTESS_SV11 - tristate "Comtrol Hostess SV-11 support" - depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS - help - Driver for Comtrol Hostess SV-11 network card which - operates on low speed synchronous serial links at up to - 256Kbps, supporting PPP and Cisco HDLC. - - The driver will be compiled as a module: the - module will be called hostess_sv11. - -# The COSA/SRP driver has not been tested as non-modular yet. -config COSA - tristate "COSA/SRP sync serial boards support" - depends on ISA && m && ISA_DMA_API && HDLC && VIRT_TO_BUS - help - Driver for COSA and SRP synchronous serial boards. - - These boards allow to connect synchronous serial devices (for example - base-band modems, or any other device with the X.21, V.24, V.35 or - V.36 interface) to your Linux box. The cards can work as the - character device, synchronous PPP network device, or the Cisco HDLC - network device. - - You will need user-space utilities COSA or SRP boards for downloading - the firmware to the cards and to set them up. Look at the - <http://www.fi.muni.cz/~kas/cosa/> for more information. You can also - read the comment at the top of the <file:drivers/net/wan/cosa.c> for - details about the cards and the driver itself. - - The driver will be compiled as a module: the - module will be called cosa. - -# -# Lan Media's board. Currently 1000, 1200, 5200, 5245 -# -config LANMEDIA - tristate "LanMedia Corp. SSI/V.35, T1/E1, HSSI, T3 boards" - depends on PCI && VIRT_TO_BUS && HDLC - help - Driver for the following Lan Media family of serial boards: - - - LMC 1000 board allows you to connect synchronous serial devices - (for example base-band modems, or any other device with the X.21, - V.24, V.35 or V.36 interface) to your Linux box. - - - LMC 1200 with on board DSU board allows you to connect your Linux - box directly to a T1 or E1 circuit. - - - LMC 5200 board provides a HSSI interface capable of running up to - 52 Mbits per second. - - - LMC 5245 board connects directly to a T3 circuit saving the - additional external hardware. - - To change setting such as clock source you will need lmcctl. - It is available at <ftp://ftp.lanmedia.com/> (broken link). - - To compile this driver as a module, choose M here: the - module will be called lmc. - -# There is no way to detect a Sealevel board. Force it modular -config SEALEVEL_4021 - tristate "Sealevel Systems 4021 support" - depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS - help - This is a driver for the Sealevel Systems ACB 56 serial I/O adapter. - - The driver will be compiled as a module: the - module will be called sealevel. - # Generic HDLC config HDLC tristate "Generic HDLC layer" diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile index 480bcd1f6c1c..5bec8fae47f8 100644 --- a/drivers/net/wan/Makefile +++ b/drivers/net/wan/Makefile @@ -14,13 +14,8 @@ obj-$(CONFIG_HDLC_FR) += hdlc_fr.o obj-$(CONFIG_HDLC_PPP) += hdlc_ppp.o obj-$(CONFIG_HDLC_X25) += hdlc_x25.o -obj-$(CONFIG_HOSTESS_SV11) += z85230.o hostess_sv11.o -obj-$(CONFIG_SEALEVEL_4021) += z85230.o sealevel.o -obj-$(CONFIG_COSA) += cosa.o obj-$(CONFIG_FARSYNC) += farsync.o -obj-$(CONFIG_LANMEDIA) += lmc/ - obj-$(CONFIG_LAPBETHER) += lapbether.o obj-$(CONFIG_N2) += n2.o obj-$(CONFIG_C101) += c101.o diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c deleted file mode 100644 index 23d2954d9747..000000000000 --- a/drivers/net/wan/cosa.c +++ /dev/null @@ -1,2052 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* $Id: cosa.c,v 1.31 2000/03/08 17:47:16 kas Exp $ */ - -/* Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz> - * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl> - */ - -/* The driver for the SRP and COSA synchronous serial cards. - * - * HARDWARE INFO - * - * Both cards are developed at the Institute of Computer Science, - * Masaryk University (https://www.ics.muni.cz/). The hardware is - * developed by Jiri Novotny <novotny@ics.muni.cz>. More information - * and the photo of both cards is available at - * http://www.pavoucek.cz/cosa.html. The card documentation, firmwares - * and other goods can be downloaded from ftp://ftp.ics.muni.cz/pub/cosa/. - * For Linux-specific utilities, see below in the "Software info" section. - * If you want to order the card, contact Jiri Novotny. - * - * The SRP (serial port?, the Czech word "srp" means "sickle") card - * is a 2-port intelligent (with its own 8-bit CPU) synchronous serial card - * with V.24 interfaces up to 80kb/s each. - * - * The COSA (communication serial adapter?, the Czech word "kosa" means - * "scythe") is a next-generation sync/async board with two interfaces - * - currently any of V.24, X.21, V.35 and V.36 can be selected. - * It has a 16-bit SAB80166 CPU and can do up to 10 Mb/s per channel. - * The 8-channels version is in development. - * - * Both types have downloadable firmware and communicate via ISA DMA. - * COSA can be also a bus-mastering device. - * - * SOFTWARE INFO - * - * The homepage of the Linux driver is at https://www.fi.muni.cz/~kas/cosa/. - * The CVS tree of Linux driver can be viewed there, as well as the - * firmware binaries and user-space utilities for downloading the firmware - * into the card and setting up the card. - * - * The Linux driver (unlike the present *BSD drivers :-) can work even - * for the COSA and SRP in one computer and allows each channel to work - * in one of the two modes (character or network device). - * - * AUTHOR - * - * The Linux driver was written by Jan "Yenya" Kasprzak <kas@fi.muni.cz>. - * - * You can mail me bugfixes and even success reports. I am especially - * interested in the SMP and/or muliti-channel success/failure reports - * (I wonder if I did the locking properly :-). - * - * THE AUTHOR USED THE FOLLOWING SOURCES WHEN PROGRAMMING THE DRIVER - * - * The COSA/SRP NetBSD driver by Zdenek Salvet and Ivos Cernohlavek - * The skeleton.c by Donald Becker - * The SDL Riscom/N2 driver by Mike Natale - * The Comtrol Hostess SV11 driver by Alan Cox - * The Sync PPP/Cisco HDLC layer (syncppp.c) ported to Linux by Alan Cox - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/sched/signal.h> -#include <linux/slab.h> -#include <linux/poll.h> -#include <linux/fs.h> -#include <linux/interrupt.h> -#include <linux/delay.h> -#include <linux/hdlc.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/netdevice.h> -#include <linux/spinlock.h> -#include <linux/mutex.h> -#include <linux/device.h> -#include <asm/io.h> -#include <asm/dma.h> -#include <asm/byteorder.h> - -#undef COSA_SLOW_IO /* for testing purposes only */ - -#include "cosa.h" - -/* Maximum length of the identification string. */ -#define COSA_MAX_ID_STRING 128 - -/* Maximum length of the channel name */ -#define COSA_MAX_NAME (sizeof("cosaXXXcXXX") + 1) - -/* Per-channel data structure */ - -struct channel_data { - int usage; /* Usage count; >0 for chrdev, -1 for netdev */ - int num; /* Number of the channel */ - struct cosa_data *cosa; /* Pointer to the per-card structure */ - int txsize; /* Size of transmitted data */ - char *txbuf; /* Transmit buffer */ - char name[COSA_MAX_NAME]; /* channel name */ - - /* The HW layer interface */ - /* routine called from the RX interrupt */ - char *(*setup_rx)(struct channel_data *channel, int size); - /* routine called when the RX is done (from the EOT interrupt) */ - int (*rx_done)(struct channel_data *channel); - /* routine called when the TX is done (from the EOT interrupt) */ - int (*tx_done)(struct channel_data *channel, int size); - - /* Character device parts */ - struct mutex rlock; - struct semaphore wsem; - char *rxdata; - int rxsize; - wait_queue_head_t txwaitq, rxwaitq; - int tx_status, rx_status; - - /* generic HDLC device parts */ - struct net_device *netdev; - struct sk_buff *rx_skb, *tx_skb; -}; - -/* cosa->firmware_status bits */ -#define COSA_FW_RESET BIT(0) /* Is the ROM monitor active? */ -#define COSA_FW_DOWNLOAD BIT(1) /* Is the microcode downloaded? */ -#define COSA_FW_START BIT(2) /* Is the microcode running? */ - -struct cosa_data { - int num; /* Card number */ - char name[COSA_MAX_NAME]; /* Card name - e.g "cosa0" */ - unsigned int datareg, statusreg; /* I/O ports */ - unsigned short irq, dma; /* IRQ and DMA number */ - unsigned short startaddr; /* Firmware start address */ - unsigned short busmaster; /* Use busmastering? */ - int nchannels; /* # of channels on this card */ - int driver_status; /* For communicating with firmware */ - int firmware_status; /* Downloaded, reseted, etc. */ - unsigned long rxbitmap, txbitmap;/* Bitmap of channels who are willing to send/receive data */ - unsigned long rxtx; /* RX or TX in progress? */ - int enabled; - int usage; /* usage count */ - int txchan, txsize, rxsize; - struct channel_data *rxchan; - char *bouncebuf; - char *txbuf, *rxbuf; - struct channel_data *chan; - spinlock_t lock; /* For exclusive operations on this structure */ - char id_string[COSA_MAX_ID_STRING]; /* ROM monitor ID string */ - char *type; /* card type */ -}; - -/* Define this if you want all the possible ports to be autoprobed. - * It is here but it probably is not a good idea to use this. - */ -/* #define COSA_ISA_AUTOPROBE 1*/ - -/* Character device major number. 117 was allocated for us. - * The value of 0 means to allocate a first free one. - */ -static DEFINE_MUTEX(cosa_chardev_mutex); -static int cosa_major = 117; - -/* Encoding of the minor numbers: - * The lowest CARD_MINOR_BITS bits means the channel on the single card, - * the highest bits means the card number. - */ -#define CARD_MINOR_BITS 4 /* How many bits in minor number are reserved - * for the single card - */ -/* The following depends on CARD_MINOR_BITS. Unfortunately, the "MODULE_STRING" - * macro doesn't like anything other than the raw number as an argument :-( - */ -#define MAX_CARDS 16 -/* #define MAX_CARDS (1 << (8-CARD_MINOR_BITS)) */ - -#define DRIVER_RX_READY 0x0001 -#define DRIVER_TX_READY 0x0002 -#define DRIVER_TXMAP_SHIFT 2 -#define DRIVER_TXMAP_MASK 0x0c /* FIXME: 0xfc for 8-channel version */ - -/* for cosa->rxtx - indicates whether either transmit or receive is - * in progress. These values are mean number of the bit. - */ -#define TXBIT 0 -#define RXBIT 1 -#define IRQBIT 2 - -#define COSA_MTU 2000 /* FIXME: I don't know this exactly */ - -#undef DEBUG_DATA //1 /* Dump the data read or written to the channel */ -#undef DEBUG_IRQS //1 /* Print the message when the IRQ is received */ -#undef DEBUG_IO //1 /* Dump the I/O traffic */ - -#define TX_TIMEOUT (5 * HZ) - -/* Maybe the following should be allocated dynamically */ -static struct cosa_data cosa_cards[MAX_CARDS]; -static int nr_cards; - -#ifdef COSA_ISA_AUTOPROBE -static int io[MAX_CARDS + 1] = {0x220, 0x228, 0x210, 0x218, 0,}; -/* NOTE: DMA is not autoprobed!!! */ -static int dma[MAX_CARDS + 1] = {1, 7, 1, 7, 1, 7, 1, 7, 0,}; -#else -static int io[MAX_CARDS + 1]; -static int dma[MAX_CARDS + 1]; -#endif -/* IRQ can be safely autoprobed */ -static int irq[MAX_CARDS + 1] = {-1, -1, -1, -1, -1, -1, 0,}; - -/* for class stuff*/ -static struct class *cosa_class; - -#ifdef MODULE -module_param_hw_array(io, int, ioport, NULL, 0); -MODULE_PARM_DESC(io, "The I/O bases of the COSA or SRP cards"); -module_param_hw_array(irq, int, irq, NULL, 0); -MODULE_PARM_DESC(irq, "The IRQ lines of the COSA or SRP cards"); -module_param_hw_array(dma, int, dma, NULL, 0); -MODULE_PARM_DESC(dma, "The DMA channels of the COSA or SRP cards"); - -MODULE_AUTHOR("Jan \"Yenya\" Kasprzak, <kas@fi.muni.cz>"); -MODULE_DESCRIPTION("Modular driver for the COSA or SRP synchronous card"); -MODULE_LICENSE("GPL"); -#endif - -/* I use this mainly for testing purposes */ -#ifdef COSA_SLOW_IO -#define cosa_outb outb_p -#define cosa_outw outw_p -#define cosa_inb inb_p -#define cosa_inw inw_p -#else -#define cosa_outb outb -#define cosa_outw outw -#define cosa_inb inb -#define cosa_inw inw -#endif - -#define is_8bit(cosa) (!((cosa)->datareg & 0x08)) - -#define cosa_getstatus(cosa) (cosa_inb((cosa)->statusreg)) -#define cosa_putstatus(cosa, stat) (cosa_outb(stat, (cosa)->statusreg)) -#define cosa_getdata16(cosa) (cosa_inw((cosa)->datareg)) -#define cosa_getdata8(cosa) (cosa_inb((cosa)->datareg)) -#define cosa_putdata16(cosa, dt) (cosa_outw(dt, (cosa)->datareg)) -#define cosa_putdata8(cosa, dt) (cosa_outb(dt, (cosa)->datareg)) - -/* Initialization stuff */ -static int cosa_probe(int ioaddr, int irq, int dma); - -/* HW interface */ -static void cosa_enable_rx(struct channel_data *chan); -static void cosa_disable_rx(struct channel_data *chan); -static int cosa_start_tx(struct channel_data *channel, char *buf, int size); -static void cosa_kick(struct cosa_data *cosa); -static int cosa_dma_able(struct channel_data *chan, char *buf, int data); - -/* Network device stuff */ -static int cosa_net_attach(struct net_device *dev, unsigned short encoding, - unsigned short parity); -static int cosa_net_open(struct net_device *d); -static int cosa_net_close(struct net_device *d); -static void cosa_net_timeout(struct net_device *d, unsigned int txqueue); -static netdev_tx_t cosa_net_tx(struct sk_buff *skb, struct net_device *d); -static char *cosa_net_setup_rx(struct channel_data *channel, int size); -static int cosa_net_rx_done(struct channel_data *channel); -static int cosa_net_tx_done(struct channel_data *channel, int size); - -/* Character device */ -static char *chrdev_setup_rx(struct channel_data *channel, int size); -static int chrdev_rx_done(struct channel_data *channel); -static int chrdev_tx_done(struct channel_data *channel, int size); -static ssize_t cosa_read(struct file *file, - char __user *buf, size_t count, loff_t *ppos); -static ssize_t cosa_write(struct file *file, - const char __user *buf, size_t count, loff_t *ppos); -static unsigned int cosa_poll(struct file *file, poll_table *poll); -static int cosa_open(struct inode *inode, struct file *file); -static int cosa_release(struct inode *inode, struct file *file); -static long cosa_chardev_ioctl(struct file *file, unsigned int cmd, - unsigned long arg); -#ifdef COSA_FASYNC_WORKING -static int cosa_fasync(struct inode *inode, struct file *file, int on); -#endif - -static const struct file_operations cosa_fops = { - .owner = THIS_MODULE, - .llseek = no_llseek, - .read = cosa_read, - .write = cosa_write, - .poll = cosa_poll, - .unlocked_ioctl = cosa_chardev_ioctl, - .open = cosa_open, - .release = cosa_release, -#ifdef COSA_FASYNC_WORKING - .fasync = cosa_fasync, -#endif -}; - -/* Ioctls */ -static int cosa_start(struct cosa_data *cosa, int address); -static int cosa_reset(struct cosa_data *cosa); -static int cosa_download(struct cosa_data *cosa, void __user *a); -static int cosa_readmem(struct cosa_data *cosa, void __user *a); - -/* COSA/SRP ROM monitor */ -static int download(struct cosa_data *cosa, const char __user *data, int addr, int len); -static int startmicrocode(struct cosa_data *cosa, int address); -static int readmem(struct cosa_data *cosa, char __user *data, int addr, int len); -static int cosa_reset_and_read_id(struct cosa_data *cosa, char *id); - -/* Auxiliary functions */ -static int get_wait_data(struct cosa_data *cosa); -static int put_wait_data(struct cosa_data *cosa, int data); -static int puthexnumber(struct cosa_data *cosa, int number); -static void put_driver_status(struct cosa_data *cosa); -static void put_driver_status_nolock(struct cosa_data *cosa); - -/* Interrupt handling */ -static irqreturn_t cosa_interrupt(int irq, void *cosa); - -/* I/O ops debugging */ -#ifdef DEBUG_IO -static void debug_data_in(struct cosa_data *cosa, int data); -static void debug_data_out(struct cosa_data *cosa, int data); -static void debug_data_cmd(struct cosa_data *cosa, int data); -static void debug_status_in(struct cosa_data *cosa, int status); -static void debug_status_out(struct cosa_data *cosa, int status); -#endif - -static inline struct channel_data *dev_to_chan(struct net_device *dev) -{ - return (struct channel_data *)dev_to_hdlc(dev)->priv; -} - -/* ---------- Initialization stuff ---------- */ - -static int __init cosa_init(void) -{ - int i, err = 0; - - if (cosa_major > 0) { - if (register_chrdev(cosa_major, "cosa", &cosa_fops)) { - pr_warn("unable to get major %d\n", cosa_major); - err = -EIO; - goto out; - } - } else { - cosa_major = register_chrdev(0, "cosa", &cosa_fops); - if (!cosa_major) { - pr_warn("unable to register chardev\n"); - err = -EIO; - goto out; - } - } - for (i = 0; i < MAX_CARDS; i++) - cosa_cards[i].num = -1; - for (i = 0; io[i] != 0 && i < MAX_CARDS; i++) - cosa_probe(io[i], irq[i], dma[i]); - if (!nr_cards) { - pr_warn("no devices found\n"); - unregister_chrdev(cosa_major, "cosa"); - err = -ENODEV; - goto out; - } - cosa_class = class_create(THIS_MODULE, "cosa"); - if (IS_ERR(cosa_class)) { - err = PTR_ERR(cosa_class); - goto out_chrdev; - } - for (i = 0; i < nr_cards; i++) - device_create(cosa_class, NULL, MKDEV(cosa_major, i), NULL, - "cosa%d", i); - err = 0; - goto out; - -out_chrdev: - unregister_chrdev(cosa_major, "cosa"); -out: - return err; -} -module_init(cosa_init); - -static void __exit cosa_exit(void) -{ - struct cosa_data *cosa; - int i; - - for (i = 0; i < nr_cards; i++) - device_destroy(cosa_class, MKDEV(cosa_major, i)); - class_destroy(cosa_class); - - for (cosa = cosa_cards; nr_cards--; cosa++) { - /* Clean up the per-channel data */ - for (i = 0; i < cosa->nchannels; i++) { - /* Chardev driver has no alloc'd per-channel data */ - unregister_hdlc_device(cosa->chan[i].netdev); - free_netdev(cosa->chan[i].netdev); - } - /* Clean up the per-card data */ - kfree(cosa->chan); - kfree(cosa->bouncebuf); - free_irq(cosa->irq, cosa); - free_dma(cosa->dma); - release_region(cosa->datareg, is_8bit(cosa) ? 2 : 4); - } - unregister_chrdev(cosa_major, "cosa"); -} -module_exit(cosa_exit); - -static const struct net_device_ops cosa_ops = { - .ndo_open = cosa_net_open, - .ndo_stop = cosa_net_close, - .ndo_start_xmit = hdlc_start_xmit, - .ndo_siocwandev = hdlc_ioctl, - .ndo_tx_timeout = cosa_net_timeout, -}; - -static int cosa_probe(int base, int irq, int dma) -{ - struct cosa_data *cosa = cosa_cards + nr_cards; - int i, err = 0; - - memset(cosa, 0, sizeof(struct cosa_data)); - - /* Checking validity of parameters: */ - /* IRQ should be 2-7 or 10-15; negative IRQ means autoprobe */ - if ((irq >= 0 && irq < 2) || irq > 15 || (irq < 10 && irq > 7)) { - pr_info("invalid IRQ %d\n", irq); - return -1; - } - /* I/O address should be between 0x100 and 0x3ff and should be - * multiple of 8. - */ - if (base < 0x100 || base > 0x3ff || base & 0x7) { - pr_info("invalid I/O address 0x%x\n", base); - return -1; - } - /* DMA should be 0,1 or 3-7 */ - if (dma < 0 || dma == 4 || dma > 7) { - pr_info("invalid DMA %d\n", dma); - return -1; - } - /* and finally, on 16-bit COSA DMA should be 4-7 and - * I/O base should not be multiple of 0x10 - */ - if (((base & 0x8) && dma < 4) || (!(base & 0x8) && dma > 3)) { - pr_info("8/16 bit base and DMA mismatch (base=0x%x, dma=%d)\n", - base, dma); - return -1; - } - - cosa->dma = dma; - cosa->datareg = base; - cosa->statusreg = is_8bit(cosa) ? base + 1 : base + 2; - spin_lock_init(&cosa->lock); - - if (!request_region(base, is_8bit(cosa) ? 2 : 4, "cosa")) - return -1; - - if (cosa_reset_and_read_id(cosa, cosa->id_string) < 0) { - printk(KERN_DEBUG "probe at 0x%x failed.\n", base); - err = -1; - goto err_out; - } - - /* Test the validity of identification string */ - if (!strncmp(cosa->id_string, "SRP", 3)) { - cosa->type = "srp"; - } else if (!strncmp(cosa->id_string, "COSA", 4)) { - cosa->type = is_8bit(cosa) ? "cosa8" : "cosa16"; - } else { -/* Print a warning only if we are not autoprobing */ -#ifndef COSA_ISA_AUTOPROBE - pr_info("valid signature not found at 0x%x\n", base); -#endif - err = -1; - goto err_out; - } - /* Update the name of the region now we know the type of card */ - release_region(base, is_8bit(cosa) ? 2 : 4); - if (!request_region(base, is_8bit(cosa) ? 2 : 4, cosa->type)) { - printk(KERN_DEBUG "changing name at 0x%x failed.\n", base); - return -1; - } - - /* Now do IRQ autoprobe */ - if (irq < 0) { - unsigned long irqs; -/* pr_info("IRQ autoprobe\n"); */ - irqs = probe_irq_on(); - /* Enable interrupt on tx buffer empty (it sure is) - * really sure ? - * FIXME: When this code is not used as module, we should - * probably call udelay() instead of the interruptible sleep. - */ - set_current_state(TASK_INTERRUPTIBLE); - cosa_putstatus(cosa, SR_TX_INT_ENA); - schedule_timeout(msecs_to_jiffies(300)); - irq = probe_irq_off(irqs); - /* Disable all IRQs from the card */ - cosa_putstatus(cosa, 0); - /* Empty the received data register */ - cosa_getdata8(cosa); - - if (irq < 0) { - pr_info("multiple interrupts obtained (%d, board at 0x%x)\n", - irq, cosa->datareg); - err = -1; - goto err_out; - } - if (irq == 0) { - pr_info("no interrupt obtained (board at 0x%x)\n", - cosa->datareg); - /* return -1; */ - } - } - - cosa->irq = irq; - cosa->num = nr_cards; - cosa->usage = 0; - cosa->nchannels = 2; /* FIXME: how to determine this? */ - - if (request_irq(cosa->irq, cosa_interrupt, 0, cosa->type, cosa)) { - err = -1; - goto err_out; - } - if (request_dma(cosa->dma, cosa->type)) { - err = -1; - goto err_out1; - } - - cosa->bouncebuf = kmalloc(COSA_MTU, GFP_KERNEL | GFP_DMA); - if (!cosa->bouncebuf) { - err = -ENOMEM; - goto err_out2; - } - sprintf(cosa->name, "cosa%d", cosa->num); - - /* Initialize the per-channel data */ - cosa->chan = kcalloc(cosa->nchannels, sizeof(struct channel_data), GFP_KERNEL); - if (!cosa->chan) { - err = -ENOMEM; - goto err_out3; - } - - for (i = 0; i < cosa->nchannels; i++) { - struct channel_data *chan = &cosa->chan[i]; - - chan->cosa = cosa; - chan->num = i; - sprintf(chan->name, "cosa%dc%d", chan->cosa->num, i); - - /* Initialize the chardev data structures */ - mutex_init(&chan->rlock); - sema_init(&chan->wsem, 1); - - /* Register the network interface */ - chan->netdev = alloc_hdlcdev(chan); - if (!chan->netdev) { - pr_warn("%s: alloc_hdlcdev failed\n", chan->name); - err = -ENOMEM; - goto err_hdlcdev; - } - dev_to_hdlc(chan->netdev)->attach = cosa_net_attach; - dev_to_hdlc(chan->netdev)->xmit = cosa_net_tx; - chan->netdev->netdev_ops = &cosa_ops; - chan->netdev->watchdog_timeo = TX_TIMEOUT; - chan->netdev->base_addr = chan->cosa->datareg; - chan->netdev->irq = chan->cosa->irq; - chan->netdev->dma = chan->cosa->dma; - err = register_hdlc_device(chan->netdev); - if (err) { - netdev_warn(chan->netdev, - "register_hdlc_device() failed\n"); - free_netdev(chan->netdev); - goto err_hdlcdev; - } - } - - pr_info("cosa%d: %s (%s at 0x%x irq %d dma %d), %d channels\n", - cosa->num, cosa->id_string, cosa->type, - cosa->datareg, cosa->irq, cosa->dma, cosa->nchannels); - - return nr_cards++; - -err_hdlcdev: - while (i-- > 0) { - unregister_hdlc_device(cosa->chan[i].netdev); - free_netdev(cosa->chan[i].netdev); - } - kfree(cosa->chan); -err_out3: - kfree(cosa->bouncebuf); -err_out2: - free_dma(cosa->dma); -err_out1: - free_irq(cosa->irq, cosa); -err_out: - release_region(cosa->datareg, is_8bit(cosa) ? 2 : 4); - pr_notice("cosa%d: allocating resources failed\n", cosa->num); - return err; -} - -/*---------- network device ---------- */ - -static int cosa_net_attach(struct net_device *dev, unsigned short encoding, - unsigned short parity) -{ - if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT) - return 0; - return -EINVAL; -} - -static int cosa_net_open(struct net_device *dev) -{ - struct channel_data *chan = dev_to_chan(dev); - int err; - unsigned long flags; - - if (!(chan->cosa->firmware_status & COSA_FW_START)) { - pr_notice("%s: start the firmware first (status %d)\n", - chan->cosa->name, chan->cosa->firmware_status); - return -EPERM; - } - spin_lock_irqsave(&chan->cosa->lock, flags); - if (chan->usage != 0) { - pr_warn("%s: cosa_net_open called with usage count %d\n", - chan->name, chan->usage); - spin_unlock_irqrestore(&chan->cosa->lock, flags); - return -EBUSY; - } - chan->setup_rx = cosa_net_setup_rx; - chan->tx_done = cosa_net_tx_done; - chan->rx_done = cosa_net_rx_done; - chan->usage = -1; - chan->cosa->usage++; - spin_unlock_irqrestore(&chan->cosa->lock, flags); - - err = hdlc_open(dev); - if (err) { - spin_lock_irqsave(&chan->cosa->lock, flags); - chan->usage = 0; - chan->cosa->usage--; - spin_unlock_irqrestore(&chan->cosa->lock, flags); - return err; - } - - netif_start_queue(dev); - cosa_enable_rx(chan); - return 0; -} - -static netdev_tx_t cosa_net_tx(struct sk_buff *skb, - struct net_device *dev) -{ - struct channel_data *chan = dev_to_chan(dev); - - netif_stop_queue(dev); - - chan->tx_skb = skb; - cosa_start_tx(chan, skb->data, skb->len); - return NETDEV_TX_OK; -} - -static void cosa_net_timeout(struct net_device *dev, unsigned int txqueue) -{ - struct channel_data *chan = dev_to_chan(dev); - - if (test_bit(RXBIT, &chan->cosa->rxtx)) { - chan->netdev->stats.rx_errors++; - chan->netdev->stats.rx_missed_errors++; - } else { - chan->netdev->stats.tx_errors++; - chan->netdev->stats.tx_aborted_errors++; - } - cosa_kick(chan->cosa); - if (chan->tx_skb) { - dev_kfree_skb(chan->tx_skb); - chan->tx_skb = NULL; - } - netif_wake_queue(dev); -} - -static int cosa_net_close(struct net_device *dev) -{ - struct channel_data *chan = dev_to_chan(dev); - unsigned long flags; - - netif_stop_queue(dev); - hdlc_close(dev); - cosa_disable_rx(chan); - spin_lock_irqsave(&chan->cosa->lock, flags); - if (chan->rx_skb) { - kfree_skb(chan->rx_skb); - chan->rx_skb = NULL; - } - if (chan->tx_skb) { - kfree_skb(chan->tx_skb); - chan->tx_skb = NULL; - } - chan->usage = 0; - chan->cosa->usage--; - spin_unlock_irqrestore(&chan->cosa->lock, flags); - return 0; -} - -static char *cosa_net_setup_rx(struct channel_data *chan, int size) -{ - /* We can safely fall back to non-dma-able memory, because we have - * the cosa->bouncebuf pre-allocated. - */ - kfree_skb(chan->rx_skb); - chan->rx_skb = dev_alloc_skb(size); - if (!chan->rx_skb) { - pr_notice("%s: Memory squeeze, dropping packet\n", chan->name); - chan->netdev->stats.rx_dropped++; - return NULL; - } - netif_trans_update(chan->netdev); - return skb_put(chan->rx_skb, size); -} - -static int cosa_net_rx_done(struct channel_data *chan) -{ - if (!chan->rx_skb) { - pr_warn("%s: rx_done with empty skb!\n", chan->name); - chan->netdev->stats.rx_errors++; - chan->netdev->stats.rx_frame_errors++; - return 0; - } - chan->rx_skb->protocol = hdlc_type_trans(chan->rx_skb, chan->netdev); - chan->rx_skb->dev = chan->netdev; - skb_reset_mac_header(chan->rx_skb); - chan->netdev->stats.rx_packets++; - chan->netdev->stats.rx_bytes += chan->cosa->rxsize; - netif_rx(chan->rx_skb); - chan->rx_skb = NULL; - return 0; -} - -/* ARGSUSED */ -static int cosa_net_tx_done(struct channel_data *chan, int size) -{ - if (!chan->tx_skb) { - pr_warn("%s: tx_done with empty skb!\n", chan->name); - chan->netdev->stats.tx_errors++; - chan->netdev->stats.tx_aborted_errors++; - return 1; - } - dev_consume_skb_irq(chan->tx_skb); - chan->tx_skb = NULL; - chan->netdev->stats.tx_packets++; - chan->netdev->stats.tx_bytes += size; - netif_wake_queue(chan->netdev); - return 1; -} - -/*---------- Character device ---------- */ - -static ssize_t cosa_read(struct file *file, - char __user *buf, size_t count, loff_t *ppos) -{ - DECLARE_WAITQUEUE(wait, current); - unsigned long flags; - struct channel_data *chan = file->private_data; - struct cosa_data *cosa = chan->cosa; - char *kbuf; - - if (!(cosa->firmware_status & COSA_FW_START)) { - pr_notice("%s: start the firmware first (status %d)\n", - cosa->name, cosa->firmware_status); - return -EPERM; - } - if (mutex_lock_interruptible(&chan->rlock)) - return -ERESTARTSYS; - - chan->rxdata = kmalloc(COSA_MTU, GFP_DMA | GFP_KERNEL); - if (!chan->rxdata) { - mutex_unlock(&chan->rlock); - return -ENOMEM; - } - - chan->rx_status = 0; - cosa_enable_rx(chan); - spin_lock_irqsave(&cosa->lock, flags); - add_wait_queue(&chan->rxwaitq, &wait); - while (!chan->rx_status) { - set_current_state(TASK_INTERRUPTIBLE); - spin_unlock_irqrestore(&cosa->lock, flags); - schedule(); - spin_lock_irqsave(&cosa->lock, flags); - if (signal_pending(current) && chan->rx_status == 0) { - chan->rx_status = 1; - remove_wait_queue(&chan->rxwaitq, &wait); - __set_current_state(TASK_RUNNING); - spin_unlock_irqrestore(&cosa->lock, flags); - mutex_unlock(&chan->rlock); - return -ERESTARTSYS; - } - } - remove_wait_queue(&chan->rxwaitq, &wait); - __set_current_state(TASK_RUNNING); - kbuf = chan->rxdata; - count = chan->rxsize; - spin_unlock_irqrestore(&cosa->lock, flags); - mutex_unlock(&chan->rlock); - - if (copy_to_user(buf, kbuf, count)) { - kfree(kbuf); - return -EFAULT; - } - kfree(kbuf); - return count; -} - -static char *chrdev_setup_rx(struct channel_data *chan, int size) -{ - /* Expect size <= COSA_MTU */ - chan->rxsize = size; - return chan->rxdata; -} - -static int chrdev_rx_done(struct channel_data *chan) -{ - if (chan->rx_status) { /* Reader has died */ - kfree(chan->rxdata); - up(&chan->wsem); - } - chan->rx_status = 1; - wake_up_interruptible(&chan->rxwaitq); - return 1; -} - -static ssize_t cosa_write(struct file *file, - const char __user *buf, size_t count, loff_t *ppos) -{ - DECLARE_WAITQUEUE(wait, current); - struct channel_data *chan = file->private_data; - struct cosa_data *cosa = chan->cosa; - unsigned long flags; - char *kbuf; - - if (!(cosa->firmware_status & COSA_FW_START)) { - pr_notice("%s: start the firmware first (status %d)\n", - cosa->name, cosa->firmware_status); - return -EPERM; - } - if (down_interruptible(&chan->wsem)) - return -ERESTARTSYS; - - if (count > COSA_MTU) - count = COSA_MTU; - - /* Allocate the buffer */ - kbuf = kmalloc(count, GFP_KERNEL | GFP_DMA); - if (!kbuf) { - up(&chan->wsem); - return -ENOMEM; - } - if (copy_from_user(kbuf, buf, count)) { - up(&chan->wsem); - kfree(kbuf); - return -EFAULT; - } - chan->tx_status = 0; - cosa_start_tx(chan, kbuf, count); - - spin_lock_irqsave(&cosa->lock, flags); - add_wait_queue(&chan->txwaitq, &wait); - while (!chan->tx_status) { - set_current_state(TASK_INTERRUPTIBLE); - spin_unlock_irqrestore(&cosa->lock, flags); - schedule(); - spin_lock_irqsave(&cosa->lock, flags); - if (signal_pending(current) && chan->tx_status == 0) { - chan->tx_status = 1; - remove_wait_queue(&chan->txwaitq, &wait); - __set_current_state(TASK_RUNNING); - chan->tx_status = 1; - spin_unlock_irqrestore(&cosa->lock, flags); - up(&chan->wsem); - kfree(kbuf); - return -ERESTARTSYS; - } - } - remove_wait_queue(&chan->txwaitq, &wait); - __set_current_state(TASK_RUNNING); - up(&chan->wsem); - spin_unlock_irqrestore(&cosa->lock, flags); - kfree(kbuf); - return count; -} - -static int chrdev_tx_done(struct channel_data *chan, int size) -{ - if (chan->tx_status) { /* Writer was interrupted */ - kfree(chan->txbuf); - up(&chan->wsem); - } - chan->tx_status = 1; - wake_up_interruptible(&chan->txwaitq); - return 1; -} - -static __poll_t cosa_poll(struct file *file, poll_table *poll) -{ - pr_info("cosa_poll is here\n"); - return 0; -} - -static int cosa_open(struct inode *inode, struct file *file) -{ - struct cosa_data *cosa; - struct channel_data *chan; - unsigned long flags; - int n; - int ret = 0; - - mutex_lock(&cosa_chardev_mutex); - n = iminor(file_inode(file)) >> CARD_MINOR_BITS; - if (n >= nr_cards) { - ret = -ENODEV; - goto out; - } - cosa = cosa_cards + n; - - n = iminor(file_inode(file)) & ((1 << CARD_MINOR_BITS) - 1); - if (n >= cosa->nchannels) { - ret = -ENODEV; - goto out; - } - chan = cosa->chan + n; - - file->private_data = chan; - - spin_lock_irqsave(&cosa->lock, flags); - - if (chan->usage < 0) { /* in netdev mode */ - spin_unlock_irqrestore(&cosa->lock, flags); - ret = -EBUSY; - goto out; - } - cosa->usage++; - chan->usage++; - - chan->tx_done = chrdev_tx_done; - chan->setup_rx = chrdev_setup_rx; - chan->rx_done = chrdev_rx_done; - spin_unlock_irqrestore(&cosa->lock, flags); -out: - mutex_unlock(&cosa_chardev_mutex); - return ret; -} - -static int cosa_release(struct inode *inode, struct file *file) -{ - struct channel_data *channel = file->private_data; - struct cosa_data *cosa; - unsigned long flags; - - cosa = channel->cosa; - spin_lock_irqsave(&cosa->lock, flags); - cosa->usage--; - channel->usage--; - spin_unlock_irqrestore(&cosa->lock, flags); - return 0; -} - -#ifdef COSA_FASYNC_WORKING -static struct fasync_struct *fasync[256] = { NULL, }; - -/* To be done ... */ -static int cosa_fasync(struct inode *inode, struct file *file, int on) -{ - int port = iminor(inode); - - return fasync_helper(inode, file, on, &fasync[port]); -} -#endif - -/* ---------- Ioctls ---------- */ - -/* Ioctl subroutines can safely be made inline, because they are called - * only from cosa_ioctl(). - */ -static inline int cosa_reset(struct cosa_data *cosa) -{ - char idstring[COSA_MAX_ID_STRING]; - - if (cosa->usage > 1) - pr_info("cosa%d: WARNING: reset requested with cosa->usage > 1 (%d). Odd things may happen.\n", - cosa->num, cosa->usage); - cosa->firmware_status &= ~(COSA_FW_RESET | COSA_FW_START); - if (cosa_reset_and_read_id(cosa, idstring) < 0) { - pr_notice("cosa%d: reset failed\n", cosa->num); - return -EIO; - } - pr_info("cosa%d: resetting device: %s\n", cosa->num, idstring); - cosa->firmware_status |= COSA_FW_RESET; - return 0; -} - -/* High-level function to download data into COSA memory. Calls download() */ -static inline int cosa_download(struct cosa_data *cosa, void __user *arg) -{ - struct cosa_download d; - int i; - - if (cosa->usage > 1) - pr_info("%s: WARNING: download of microcode requested with cosa->usage > 1 (%d). Odd things may happen.\n", - cosa->name, cosa->usage); - if (!(cosa->firmware_status & COSA_FW_RESET)) { - pr_notice("%s: reset the card first (status %d)\n", - cosa->name, cosa->firmware_status); - return -EPERM; - } - - if (copy_from_user(&d, arg, sizeof(d))) - return -EFAULT; - - if (d.addr < 0 || d.addr > COSA_MAX_FIRMWARE_SIZE) - return -EINVAL; - if (d.len < 0 || d.len > COSA_MAX_FIRMWARE_SIZE) - return -EINVAL; - - /* If something fails, force the user to reset the card */ - cosa->firmware_status &= ~(COSA_FW_RESET | COSA_FW_DOWNLOAD); - - i = download(cosa, d.code, d.len, d.addr); - if (i < 0) { - pr_notice("cosa%d: microcode download failed: %d\n", - cosa->num, i); - return -EIO; - } - pr_info("cosa%d: downloading microcode - 0x%04x bytes at 0x%04x\n", - cosa->num, d.len, d.addr); - cosa->firmware_status |= COSA_FW_RESET | COSA_FW_DOWNLOAD; - return 0; -} - -/* High-level function to read COSA memory. Calls readmem() */ -static inline int cosa_readmem(struct cosa_data *cosa, void __user *arg) -{ - struct cosa_download d; - int i; - - if (cosa->usage > 1) - pr_info("cosa%d: WARNING: readmem requested with cosa->usage > 1 (%d). Odd things may happen.\n", - cosa->num, cosa->usage); - if (!(cosa->firmware_status & COSA_FW_RESET)) { - pr_notice("%s: reset the card first (status %d)\n", - cosa->name, cosa->firmware_status); - return -EPERM; - } - - if (copy_from_user(&d, arg, sizeof(d))) - return -EFAULT; - - /* If something fails, force the user to reset the card */ - cosa->firmware_status &= ~COSA_FW_RESET; - - i = readmem(cosa, d.code, d.len, d.addr); - if (i < 0) { - pr_notice("cosa%d: reading memory failed: %d\n", cosa->num, i); - return -EIO; - } - pr_info("cosa%d: reading card memory - 0x%04x bytes at 0x%04x\n", - cosa->num, d.len, d.addr); - cosa->firmware_status |= COSA_FW_RESET; - return 0; -} - -/* High-level function to start microcode. Calls startmicrocode(). */ -static inline int cosa_start(struct cosa_data *cosa, int address) -{ - int i; - - if (cosa->usage > 1) - pr_info("cosa%d: WARNING: start microcode requested with cosa->usage > 1 (%d). Odd things may happen.\n", - cosa->num, cosa->usage); - - if ((cosa->firmware_status & (COSA_FW_RESET | COSA_FW_DOWNLOAD)) - != (COSA_FW_RESET | COSA_FW_DOWNLOAD)) { - pr_notice("%s: download the microcode and/or reset the card first (status %d)\n", - cosa->name, cosa->firmware_status); - return -EPERM; - } - cosa->firmware_status &= ~COSA_FW_RESET; - i = startmicrocode(cosa, address); - if (i < 0) { - pr_notice("cosa%d: start microcode at 0x%04x failed: %d\n", - cosa->num, address, i); - return -EIO; - } - pr_info("cosa%d: starting microcode at 0x%04x\n", cosa->num, address); - cosa->startaddr = address; - cosa->firmware_status |= COSA_FW_START; - return 0; -} - -/* Buffer of size at least COSA_MAX_ID_STRING is expected */ -static inline int cosa_getidstr(struct cosa_data *cosa, char __user *string) -{ - int l = strlen(cosa->id_string) + 1; - - if (copy_to_user(string, cosa->id_string, l)) - return -EFAULT; - return l; -} - -/* Buffer of size at least COSA_MAX_ID_STRING is expected */ -static inline int cosa_gettype(struct cosa_data *cosa, char __user *string) -{ - int l = strlen(cosa->type) + 1; - - if (copy_to_user(string, cosa->type, l)) - return -EFAULT; - return l; -} - -static int cosa_ioctl_common(struct cosa_data *cosa, - struct channel_data *channel, unsigned int cmd, - unsigned long arg) -{ - void __user *argp = (void __user *)arg; - - switch (cmd) { - case COSAIORSET: /* Reset the device */ - if (!capable(CAP_NET_ADMIN)) - return -EACCES; - return cosa_reset(cosa); - case COSAIOSTRT: /* Start the firmware */ - if (!capable(CAP_SYS_RAWIO)) - return -EACCES; - return cosa_start(cosa, arg); - case COSAIODOWNLD: /* Download the firmware */ - if (!capable(CAP_SYS_RAWIO)) - return -EACCES; - - return cosa_download(cosa, argp); - case COSAIORMEM: - if (!capable(CAP_SYS_RAWIO)) - return -EACCES; - return cosa_readmem(cosa, argp); - case COSAIORTYPE: - return cosa_gettype(cosa, argp); - case COSAIORIDSTR: - return cosa_getidstr(cosa, argp); - case COSAIONRCARDS: - return nr_cards; - case COSAIONRCHANS: - return cosa->nchannels; - case COSAIOBMSET: - if (!capable(CAP_SYS_RAWIO)) - return -EACCES; - if (is_8bit(cosa)) - return -EINVAL; - if (arg != COSA_BM_OFF && arg != COSA_BM_ON) - return -EINVAL; - cosa->busmaster = arg; - return 0; - case COSAIOBMGET: - return cosa->busmaster; - } - return -ENOIOCTLCMD; -} - -static long cosa_chardev_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) -{ - struct channel_data *channel = file->private_data; - struct cosa_data *cosa; - long ret; - - mutex_lock(&cosa_chardev_mutex); - cosa = channel->cosa; - ret = cosa_ioctl_common(cosa, channel, cmd, arg); - mutex_unlock(&cosa_chardev_mutex); - return ret; -} - -/*---------- HW layer interface ---------- */ - -/* The higher layer can bind itself to the HW layer by setting the callbacks - * in the channel_data structure and by using these routines. - */ -static void cosa_enable_rx(struct channel_data *chan) -{ - struct cosa_data *cosa = chan->cosa; - - if (!test_and_set_bit(chan->num, &cosa->rxbitmap)) - put_driver_status(cosa); -} - -static void cosa_disable_rx(struct channel_data *chan) -{ - struct cosa_data *cosa = chan->cosa; - - if (test_and_clear_bit(chan->num, &cosa->rxbitmap)) - put_driver_status(cosa); -} - -/* FIXME: This routine probably should check for cosa_start_tx() called when - * the previous transmit is still unfinished. In this case the non-zero - * return value should indicate to the caller that the queuing(sp?) up - * the transmit has failed. - */ -static int cosa_start_tx(struct channel_data *chan, char *buf, int len) -{ - struct cosa_data *cosa = chan->cosa; - unsigned long flags; -#ifdef DEBUG_DATA - int i; - - pr_info("cosa%dc%d: starting tx(0x%x)", - chan->cosa->num, chan->num, len); - for (i = 0; i < len; i++) - pr_cont(" %02x", buf[i]&0xff); - pr_cont("\n"); -#endif - spin_lock_irqsave(&cosa->lock, flags); - chan->txbuf = buf; - chan->txsize = len; - if (len > COSA_MTU) - chan->txsize = COSA_MTU; - spin_unlock_irqrestore(&cosa->lock, flags); - - /* Tell the firmware we are ready */ - set_bit(chan->num, &cosa->txbitmap); - put_driver_status(cosa); - - return 0; -} - -static void put_driver_status(struct cosa_data *cosa) -{ - unsigned long flags; - int status; - - spin_lock_irqsave(&cosa->lock, flags); - - status = (cosa->rxbitmap ? DRIVER_RX_READY : 0) - | (cosa->txbitmap ? DRIVER_TX_READY : 0) - | (cosa->txbitmap ? ~(cosa->txbitmap << DRIVER_TXMAP_SHIFT) - & DRIVER_TXMAP_MASK : 0); - if (!cosa->rxtx) { - if (cosa->rxbitmap | cosa->txbitmap) { - if (!cosa->enabled) { - cosa_putstatus(cosa, SR_RX_INT_ENA); -#ifdef DEBUG_IO - debug_status_out(cosa, SR_RX_INT_ENA); -#endif - cosa->enabled = 1; - } - } else if (cosa->enabled) { - cosa->enabled = 0; - cosa_putstatus(cosa, 0); -#ifdef DEBUG_IO - debug_status_out(cosa, 0); -#endif - } - cosa_putdata8(cosa, status); -#ifdef DEBUG_IO - debug_data_cmd(cosa, status); -#endif - } - spin_unlock_irqrestore(&cosa->lock, flags); -} - -static void put_driver_status_nolock(struct cosa_data *cosa) -{ - int status; - - status = (cosa->rxbitmap ? DRIVER_RX_READY : 0) - | (cosa->txbitmap ? DRIVER_TX_READY : 0) - | (cosa->txbitmap ? ~(cosa->txbitmap << DRIVER_TXMAP_SHIFT) - & DRIVER_TXMAP_MASK : 0); - - if (cosa->rxbitmap | cosa->txbitmap) { - cosa_putstatus(cosa, SR_RX_INT_ENA); -#ifdef DEBUG_IO - debug_status_out(cosa, SR_RX_INT_ENA); -#endif - cosa->enabled = 1; - } else { - cosa_putstatus(cosa, 0); -#ifdef DEBUG_IO - debug_status_out(cosa, 0); -#endif - cosa->enabled = 0; - } - cosa_putdata8(cosa, status); -#ifdef DEBUG_IO - debug_data_cmd(cosa, status); -#endif -} - -/* The "kickme" function: When the DMA times out, this is called to - * clean up the driver status. - * FIXME: Preliminary support, the interface is probably wrong. - */ -static void cosa_kick(struct cosa_data *cosa) -{ - unsigned long flags, flags1; - char *s = "(probably) IRQ"; - - if (test_bit(RXBIT, &cosa->rxtx)) - s = "RX DMA"; - if (test_bit(TXBIT, &cosa->rxtx)) - s = "TX DMA"; - - pr_info("%s: %s timeout - restarting\n", cosa->name, s); - spin_lock_irqsave(&cosa->lock, flags); - cosa->rxtx = 0; - - flags1 = claim_dma_lock(); - disable_dma(cosa->dma); - clear_dma_ff(cosa->dma); - release_dma_lock(flags1); - - /* FIXME: Anything else? */ - udelay(100); - cosa_putstatus(cosa, 0); - udelay(100); - (void)cosa_getdata8(cosa); - udelay(100); - cosa_putdata8(cosa, 0); - udelay(100); - put_driver_status_nolock(cosa); - spin_unlock_irqrestore(&cosa->lock, flags); -} - -/* Check if the whole buffer is DMA-able. It means it is below the 16M of - * physical memory and doesn't span the 64k boundary. For now it seems - * SKB's never do this, but we'll check this anyway. - */ -static int cosa_dma_able(struct channel_data *chan, char *buf, int len) -{ - static int count; - unsigned long b = (unsigned long)buf; - - if (b + len >= MAX_DMA_ADDRESS) - return 0; - if ((b ^ (b + len)) & 0x10000) { - if (count++ < 5) - pr_info("%s: packet spanning a 64k boundary\n", - chan->name); - return 0; - } - return 1; -} - -/* ---------- The SRP/COSA ROM monitor functions ---------- */ - -/* Downloading SRP microcode: say "w" to SRP monitor, it answers by "w=", - * drivers need to say 4-digit hex number meaning start address of the microcode - * separated by a single space. Monitor replies by saying " =". Now driver - * has to write 4-digit hex number meaning the last byte address ended - * by a single space. Monitor has to reply with a space. Now the download - * begins. After the download monitor replies with "\r\n." (CR LF dot). - */ -static int download(struct cosa_data *cosa, const char __user *microcode, int length, int address) -{ - int i; - - if (put_wait_data(cosa, 'w') == -1) - return -1; - if ((i=get_wait_data(cosa)) != 'w') { printk("dnld: 0x%04x\n",i); return -2;} - if (get_wait_data(cosa) != '=') - return -3; - - if (puthexnumber(cosa, address) < 0) - return -4; - if (put_wait_data(cosa, ' ') == -1) - return -10; - if (get_wait_data(cosa) != ' ') - return -11; - if (get_wait_data(cosa) != '=') - return -12; - - if (puthexnumber(cosa, address + length - 1) < 0) - return -13; - if (put_wait_data(cosa, ' ') == -1) - return -18; - if (get_wait_data(cosa) != ' ') - return -19; - - while (length--) { - char c; -#ifndef SRP_DOWNLOAD_AT_BOOT - if (get_user(c, microcode)) - return -23; /* ??? */ -#else - c = *microcode; -#endif - if (put_wait_data(cosa, c) == -1) - return -20; - microcode++; - } - - if (get_wait_data(cosa) != '\r') - return -21; - if (get_wait_data(cosa) != '\n') - return -22; - if (get_wait_data(cosa) != '.') - return -23; -#if 0 - printk(KERN_DEBUG "cosa%d: download completed.\n", cosa->num); -#endif - return 0; -} - -/* Starting microcode is done via the "g" command of the SRP monitor. - * The chat should be the following: "g" "g=" "<addr><CR>" - * "<CR><CR><LF><CR><LF>". - */ -static int startmicrocode(struct cosa_data *cosa, int address) -{ - if (put_wait_data(cosa, 'g') == -1) - return -1; - if (get_wait_data(cosa) != 'g') - return -2; - if (get_wait_data(cosa) != '=') - return -3; - - if (puthexnumber(cosa, address) < 0) - return -4; - if (put_wait_data(cosa, '\r') == -1) - return -5; - - if (get_wait_data(cosa) != '\r') - return -6; - if (get_wait_data(cosa) != '\r') - return -7; - if (get_wait_data(cosa) != '\n') - return -8; - if (get_wait_data(cosa) != '\r') - return -9; - if (get_wait_data(cosa) != '\n') - return -10; -#if 0 - printk(KERN_DEBUG "cosa%d: microcode started\n", cosa->num); -#endif - return 0; -} - -/* Reading memory is done via the "r" command of the SRP monitor. - * The chat is the following "r" "r=" "<addr> " " =" "<last_byte> " " " - * Then driver can read the data and the conversation is finished - * by SRP monitor sending "<CR><LF>." (dot at the end). - * - * This routine is not needed during the normal operation and serves - * for debugging purposes only. - */ -static int readmem(struct cosa_data *cosa, char __user *microcode, int length, int address) -{ - if (put_wait_data(cosa, 'r') == -1) - return -1; - if ((get_wait_data(cosa)) != 'r') - return -2; - if ((get_wait_data(cosa)) != '=') - return -3; - - if (puthexnumber(cosa, address) < 0) - return -4; - if (put_wait_data(cosa, ' ') == -1) - return -5; - if (get_wait_data(cosa) != ' ') - return -6; - if (get_wait_data(cosa) != '=') - return -7; - - if (puthexnumber(cosa, address + length - 1) < 0) - return -8; - if (put_wait_data(cosa, ' ') == -1) - return -9; - if (get_wait_data(cosa) != ' ') - return -10; - - while (length--) { - char c; - int i; - - i = get_wait_data(cosa); - if (i == -1) { - pr_info("0x%04x bytes remaining\n", length); - return -11; - } - c = i; -#if 1 - if (put_user(c, microcode)) - return -23; /* ??? */ -#else - *microcode = c; -#endif - microcode++; - } - - if (get_wait_data(cosa) != '\r') - return -21; - if (get_wait_data(cosa) != '\n') - return -22; - if (get_wait_data(cosa) != '.') - return -23; -#if 0 - printk(KERN_DEBUG "cosa%d: readmem completed.\n", cosa->num); -#endif - return 0; -} - -/* This function resets the device and reads the initial prompt - * of the device's ROM monitor. - */ -static int cosa_reset_and_read_id(struct cosa_data *cosa, char *idstring) -{ - int i = 0, id = 0, prev = 0, curr = 0; - - /* Reset the card ... */ - cosa_putstatus(cosa, 0); - cosa_getdata8(cosa); - cosa_putstatus(cosa, SR_RST); - msleep(500); - /* Disable all IRQs from the card */ - cosa_putstatus(cosa, 0); - - /* Try to read the ID string. The card then prints out the - * identification string ended by the "\n\x2e". - * - * The following loop is indexed through i (instead of id) - * to avoid looping forever when for any reason - * the port returns '\r', '\n' or '\x2e' permanently. - */ - for (i = 0; i < COSA_MAX_ID_STRING - 1; i++, prev = curr) { - curr = get_wait_data(cosa); - if (curr == -1) - return -1; - - curr &= 0xff; - if (curr != '\r' && curr != '\n' && curr != 0x2e) - idstring[id++] = curr; - if (curr == 0x2e && prev == '\n') - break; - } - /* Perhaps we should fail when i==COSA_MAX_ID_STRING-1 ? */ - idstring[id] = '\0'; - return id; -} - -/* ---------- Auxiliary routines for COSA/SRP monitor ---------- */ - -/* This routine gets the data byte from the card waiting for the SR_RX_RDY - * bit to be set in a loop. It should be used in the exceptional cases - * only (for example when resetting the card or downloading the firmware. - */ -static int get_wait_data(struct cosa_data *cosa) -{ - int retries = 1000; - - while (--retries) { - /* read data and return them */ - if (cosa_getstatus(cosa) & SR_RX_RDY) { - short r; - - r = cosa_getdata8(cosa); -#if 0 - pr_info("get_wait_data returning after %d retries\n", - 999 - retries); -#endif - return r; - } - /* sleep if not ready to read */ - schedule_timeout_interruptible(1); - } - pr_info("timeout in get_wait_data (status 0x%x)\n", - cosa_getstatus(cosa)); - return -1; -} - -/* This routine puts the data byte to the card waiting for the SR_TX_RDY - * bit to be set in a loop. It should be used in the exceptional cases - * only (for example when resetting the card or downloading the firmware). - */ -static int put_wait_data(struct cosa_data *cosa, int data) -{ - int retries = 1000; - - while (--retries) { - /* read data and return them */ - if (cosa_getstatus(cosa) & SR_TX_RDY) { - cosa_putdata8(cosa, data); -#if 0 - pr_info("Putdata: %d retries\n", 999 - retries); -#endif - return 0; - } -#if 0 - /* sleep if not ready to read */ - schedule_timeout_interruptible(1); -#endif - } - pr_info("cosa%d: timeout in put_wait_data (status 0x%x)\n", - cosa->num, cosa_getstatus(cosa)); - return -1; -} - -/* The following routine puts the hexadecimal number into the SRP monitor - * and verifies the proper echo of the sent bytes. Returns 0 on success, - * negative number on failure (-1,-3,-5,-7) means that put_wait_data() failed, - * (-2,-4,-6,-8) means that reading echo failed. - */ -static int puthexnumber(struct cosa_data *cosa, int number) -{ - char temp[5]; - int i; - - /* Well, I should probably replace this by something faster. */ - sprintf(temp, "%04X", number); - for (i = 0; i < 4; i++) { - if (put_wait_data(cosa, temp[i]) == -1) { - pr_notice("cosa%d: puthexnumber failed to write byte %d\n", - cosa->num, i); - return -1 - 2 * i; - } - if (get_wait_data(cosa) != temp[i]) { - pr_notice("cosa%d: puthexhumber failed to read echo of byte %d\n", - cosa->num, i); - return -2 - 2 * i; - } - } - return 0; -} - -/* ---------- Interrupt routines ---------- */ - -/* There are three types of interrupt: - * At the beginning of transmit - this handled is in tx_interrupt(), - * at the beginning of receive - it is in rx_interrupt() and - * at the end of transmit/receive - it is the eot_interrupt() function. - * These functions are multiplexed by cosa_interrupt() according to the - * COSA status byte. I have moved the rx/tx/eot interrupt handling into - * separate functions to make it more readable. These functions are inline, - * so there should be no overhead of function call. - * - * In the COSA bus-master mode, we need to tell the card the address of a - * buffer. Unfortunately, COSA may be too slow for us, so we must busy-wait. - * It's time to use the bottom half :-( - */ - -/* Transmit interrupt routine - called when COSA is willing to obtain - * data from the OS. The most tricky part of the routine is selection - * of channel we (OS) want to send packet for. For SRP we should probably - * use the round-robin approach. The newer COSA firmwares have a simple - * flow-control - in the status word has bits 2 and 3 set to 1 means that the - * channel 0 or 1 doesn't want to receive data. - * - * It seems there is a bug in COSA firmware (need to trace it further): - * When the driver status says that the kernel has no more data for transmit - * (e.g. at the end of TX DMA) and then the kernel changes its mind - * (e.g. new packet is queued to hard_start_xmit()), the card issues - * the TX interrupt but does not mark the channel as ready-to-transmit. - * The fix seems to be to push the packet to COSA despite its request. - * We first try to obey the card's opinion, and then fall back to forced TX. - */ -static inline void tx_interrupt(struct cosa_data *cosa, int status) -{ - unsigned long flags, flags1; -#ifdef DEBUG_IRQS - pr_info("cosa%d: SR_DOWN_REQUEST status=0x%04x\n", cosa->num, status); -#endif - spin_lock_irqsave(&cosa->lock, flags); - set_bit(TXBIT, &cosa->rxtx); - if (!test_bit(IRQBIT, &cosa->rxtx)) { - /* flow control, see the comment above */ - int i = 0; - - if (!cosa->txbitmap) { - pr_warn("%s: No channel wants data in TX IRQ. Expect DMA timeout.\n", - cosa->name); - put_driver_status_nolock(cosa); - clear_bit(TXBIT, &cosa->rxtx); - spin_unlock_irqrestore(&cosa->lock, flags); - return; - } - while (1) { - cosa->txchan++; - i++; - if (cosa->txchan >= cosa->nchannels) - cosa->txchan = 0; - if (!(cosa->txbitmap & (1 << cosa->txchan))) - continue; - if (~status & - (1 << (cosa->txchan + DRIVER_TXMAP_SHIFT))) - break; - /* in second pass, accept first ready-to-TX channel */ - if (i > cosa->nchannels) { - /* Can be safely ignored */ -#ifdef DEBUG_IRQS - printk(KERN_DEBUG "%s: Forcing TX " - "to not-ready channel %d\n", - cosa->name, cosa->txchan); -#endif - break; - } - } - - cosa->txsize = cosa->chan[cosa->txchan].txsize; - if (cosa_dma_able(cosa->chan + cosa->txchan, - cosa->chan[cosa->txchan].txbuf, - cosa->txsize)) { - cosa->txbuf = cosa->chan[cosa->txchan].txbuf; - } else { - memcpy(cosa->bouncebuf, cosa->chan[cosa->txchan].txbuf, - cosa->txsize); - cosa->txbuf = cosa->bouncebuf; - } - } - - if (is_8bit(cosa)) { - if (!test_bit(IRQBIT, &cosa->rxtx)) { - cosa_putstatus(cosa, SR_TX_INT_ENA); - cosa_putdata8(cosa, ((cosa->txchan << 5) & 0xe0) | - ((cosa->txsize >> 8) & 0x1f)); -#ifdef DEBUG_IO - debug_status_out(cosa, SR_TX_INT_ENA); - debug_data_out(cosa, ((cosa->txchan << 5) & 0xe0) | - ((cosa->txsize >> 8) & 0x1f)); - debug_data_in(cosa, cosa_getdata8(cosa)); -#else - cosa_getdata8(cosa); -#endif - set_bit(IRQBIT, &cosa->rxtx); - spin_unlock_irqrestore(&cosa->lock, flags); - return; - } else { - clear_bit(IRQBIT, &cosa->rxtx); - cosa_putstatus(cosa, 0); - cosa_putdata8(cosa, cosa->txsize & 0xff); -#ifdef DEBUG_IO - debug_status_out(cosa, 0); - debug_data_out(cosa, cosa->txsize & 0xff); -#endif - } - } else { - cosa_putstatus(cosa, SR_TX_INT_ENA); - cosa_putdata16(cosa, ((cosa->txchan << 13) & 0xe000) - | (cosa->txsize & 0x1fff)); -#ifdef DEBUG_IO - debug_status_out(cosa, SR_TX_INT_ENA); - debug_data_out(cosa, ((cosa->txchan << 13) & 0xe000) | - (cosa->txsize & 0x1fff)); - debug_data_in(cosa, cosa_getdata8(cosa)); - debug_status_out(cosa, 0); -#else - cosa_getdata8(cosa); -#endif - cosa_putstatus(cosa, 0); - } - - if (cosa->busmaster) { - unsigned long addr = virt_to_bus(cosa->txbuf); - int count = 0; - - pr_info("busmaster IRQ\n"); - while (!(cosa_getstatus(cosa) & SR_TX_RDY)) { - count++; - udelay(10); - if (count > 1000) - break; - } - pr_info("status %x\n", cosa_getstatus(cosa)); - pr_info("ready after %d loops\n", count); - cosa_putdata16(cosa, (addr >> 16) & 0xffff); - - count = 0; - while (!(cosa_getstatus(cosa) & SR_TX_RDY)) { - count++; - if (count > 1000) - break; - udelay(10); - } - pr_info("ready after %d loops\n", count); - cosa_putdata16(cosa, addr & 0xffff); - flags1 = claim_dma_lock(); - set_dma_mode(cosa->dma, DMA_MODE_CASCADE); - enable_dma(cosa->dma); - release_dma_lock(flags1); - } else { - /* start the DMA */ - flags1 = claim_dma_lock(); - disable_dma(cosa->dma); - clear_dma_ff(cosa->dma); - set_dma_mode(cosa->dma, DMA_MODE_WRITE); - set_dma_addr(cosa->dma, virt_to_bus(cosa->txbuf)); - set_dma_count(cosa->dma, cosa->txsize); - enable_dma(cosa->dma); - release_dma_lock(flags1); - } - cosa_putstatus(cosa, SR_TX_DMA_ENA | SR_USR_INT_ENA); -#ifdef DEBUG_IO - debug_status_out(cosa, SR_TX_DMA_ENA | SR_USR_INT_ENA); -#endif - spin_unlock_irqrestore(&cosa->lock, flags); -} - -static inline void rx_interrupt(struct cosa_data *cosa, int status) -{ - unsigned long flags; -#ifdef DEBUG_IRQS - pr_info("cosa%d: SR_UP_REQUEST\n", cosa->num); -#endif - - spin_lock_irqsave(&cosa->lock, flags); - set_bit(RXBIT, &cosa->rxtx); - - if (is_8bit(cosa)) { - if (!test_bit(IRQBIT, &cosa->rxtx)) { - set_bit(IRQBIT, &cosa->rxtx); - put_driver_status_nolock(cosa); - cosa->rxsize = cosa_getdata8(cosa) << 8; -#ifdef DEBUG_IO - debug_data_in(cosa, cosa->rxsize >> 8); -#endif - spin_unlock_irqrestore(&cosa->lock, flags); - return; - } else { - clear_bit(IRQBIT, &cosa->rxtx); - cosa->rxsize |= cosa_getdata8(cosa) & 0xff; -#ifdef DEBUG_IO - debug_data_in(cosa, cosa->rxsize & 0xff); -#endif -#if 0 - pr_info("cosa%d: receive rxsize = (0x%04x)\n", - cosa->num, cosa->rxsize); -#endif - } - } else { - cosa->rxsize = cosa_getdata16(cosa); -#ifdef DEBUG_IO - debug_data_in(cosa, cosa->rxsize); -#endif -#if 0 - pr_info("cosa%d: receive rxsize = (0x%04x)\n", - cosa->num, cosa->rxsize); -#endif - } - if (((cosa->rxsize & 0xe000) >> 13) >= cosa->nchannels) { - pr_warn("%s: rx for unknown channel (0x%04x)\n", - cosa->name, cosa->rxsize); - spin_unlock_irqrestore(&cosa->lock, flags); - goto reject; - } - cosa->rxchan = cosa->chan + ((cosa->rxsize & 0xe000) >> 13); - cosa->rxsize &= 0x1fff; - spin_unlock_irqrestore(&cosa->lock, flags); - - cosa->rxbuf = NULL; - if (cosa->rxchan->setup_rx) - cosa->rxbuf = cosa->rxchan->setup_rx(cosa->rxchan, cosa->rxsize); - - if (!cosa->rxbuf) { -reject: /* Reject the packet */ - pr_info("cosa%d: rejecting packet on channel %d\n", - cosa->num, cosa->rxchan->num); - cosa->rxbuf = cosa->bouncebuf; - } - - /* start the DMA */ - flags = claim_dma_lock(); - disable_dma(cosa->dma); - clear_dma_ff(cosa->dma); - set_dma_mode(cosa->dma, DMA_MODE_READ); - if (cosa_dma_able(cosa->rxchan, cosa->rxbuf, cosa->rxsize & 0x1fff)) - set_dma_addr(cosa->dma, virt_to_bus(cosa->rxbuf)); - else - set_dma_addr(cosa->dma, virt_to_bus(cosa->bouncebuf)); - - set_dma_count(cosa->dma, (cosa->rxsize & 0x1fff)); - enable_dma(cosa->dma); - release_dma_lock(flags); - spin_lock_irqsave(&cosa->lock, flags); - cosa_putstatus(cosa, SR_RX_DMA_ENA | SR_USR_INT_ENA); - if (!is_8bit(cosa) && (status & SR_TX_RDY)) - cosa_putdata8(cosa, DRIVER_RX_READY); -#ifdef DEBUG_IO - debug_status_out(cosa, SR_RX_DMA_ENA | SR_USR_INT_ENA); - if (!is_8bit(cosa) && (status & SR_TX_RDY)) - debug_data_cmd(cosa, DRIVER_RX_READY); -#endif - spin_unlock_irqrestore(&cosa->lock, flags); -} - -static inline void eot_interrupt(struct cosa_data *cosa, int status) -{ - unsigned long flags, flags1; - - spin_lock_irqsave(&cosa->lock, flags); - flags1 = claim_dma_lock(); - disable_dma(cosa->dma); - clear_dma_ff(cosa->dma); - release_dma_lock(flags1); - if (test_bit(TXBIT, &cosa->rxtx)) { - struct channel_data *chan = cosa->chan + cosa->txchan; - - if (chan->tx_done) - if (chan->tx_done(chan, cosa->txsize)) - clear_bit(chan->num, &cosa->txbitmap); - } else if (test_bit(RXBIT, &cosa->rxtx)) { -#ifdef DEBUG_DATA - { - int i; - - pr_info("cosa%dc%d: done rx(0x%x)", - cosa->num, cosa->rxchan->num, cosa->rxsize); - for (i = 0; i < cosa->rxsize; i++) - pr_cont(" %02x", cosa->rxbuf[i]&0xff); - pr_cont("\n"); - } -#endif - /* Packet for unknown channel? */ - if (cosa->rxbuf == cosa->bouncebuf) - goto out; - if (!cosa_dma_able(cosa->rxchan, cosa->rxbuf, cosa->rxsize)) - memcpy(cosa->rxbuf, cosa->bouncebuf, cosa->rxsize); - if (cosa->rxchan->rx_done) - if (cosa->rxchan->rx_done(cosa->rxchan)) - clear_bit(cosa->rxchan->num, &cosa->rxbitmap); - } else { - pr_notice("cosa%d: unexpected EOT interrupt\n", cosa->num); - } - /* Clear the RXBIT, TXBIT and IRQBIT (the latest should be - * cleared anyway). We should do it as soon as possible - * so that we can tell the COSA we are done and to give it a time - * for recovery. - */ -out: - cosa->rxtx = 0; - put_driver_status_nolock(cosa); - spin_unlock_irqrestore(&cosa->lock, flags); -} - -static irqreturn_t cosa_interrupt(int irq, void *cosa_) -{ - unsigned status; - int count = 0; - struct cosa_data *cosa = cosa_; -again: - status = cosa_getstatus(cosa); -#ifdef DEBUG_IRQS - pr_info("cosa%d: got IRQ, status 0x%02x\n", cosa->num, status & 0xff); -#endif -#ifdef DEBUG_IO - debug_status_in(cosa, status); -#endif - switch (status & SR_CMD_FROM_SRP_MASK) { - case SR_DOWN_REQUEST: - tx_interrupt(cosa, status); - break; - case SR_UP_REQUEST: - rx_interrupt(cosa, status); - break; - case SR_END_OF_TRANSFER: - eot_interrupt(cosa, status); - break; - default: - /* We may be too fast for SRP. Try to wait a bit more. */ - if (count++ < 100) { - udelay(100); - goto again; - } - pr_info("cosa%d: unknown status 0x%02x in IRQ after %d retries\n", - cosa->num, status & 0xff, count); - } -#ifdef DEBUG_IRQS - if (count) - pr_info("%s: %d-times got unknown status in IRQ\n", - cosa->name, count); - else - pr_info("%s: returning from IRQ\n", cosa->name); -#endif - return IRQ_HANDLED; -} - -/* ---------- I/O debugging routines ---------- */ -/* These routines can be used to monitor COSA/SRP I/O and to printk() - * the data being transferred on the data and status I/O port in a - * readable way. - */ - -#ifdef DEBUG_IO -static void debug_status_in(struct cosa_data *cosa, int status) -{ - char *s; - - switch (status & SR_CMD_FROM_SRP_MASK) { - case SR_UP_REQUEST: - s = "RX_REQ"; - break; - case SR_DOWN_REQUEST: - s = "TX_REQ"; - break; - case SR_END_OF_TRANSFER: - s = "ET_REQ"; - break; - default: - s = "NO_REQ"; - break; - } - pr_info("%s: IO: status -> 0x%02x (%s%s%s%s)\n", - cosa->name, - status, - status & SR_USR_RQ ? "USR_RQ|" : "", - status & SR_TX_RDY ? "TX_RDY|" : "", - status & SR_RX_RDY ? "RX_RDY|" : "", - s); -} - -static void debug_status_out(struct cosa_data *cosa, int status) -{ - pr_info("%s: IO: status <- 0x%02x (%s%s%s%s%s%s)\n", - cosa->name, - status, - status & SR_RX_DMA_ENA ? "RXDMA|" : "!rxdma|", - status & SR_TX_DMA_ENA ? "TXDMA|" : "!txdma|", - status & SR_RST ? "RESET|" : "", - status & SR_USR_INT_ENA ? "USRINT|" : "!usrint|", - status & SR_TX_INT_ENA ? "TXINT|" : "!txint|", - status & SR_RX_INT_ENA ? "RXINT" : "!rxint"); -} - -static void debug_data_in(struct cosa_data *cosa, int data) -{ - pr_info("%s: IO: data -> 0x%04x\n", cosa->name, data); -} - -static void debug_data_out(struct cosa_data *cosa, int data) -{ - pr_info("%s: IO: data <- 0x%04x\n", cosa->name, data); -} - -static void debug_data_cmd(struct cosa_data *cosa, int data) -{ - pr_info("%s: IO: data <- 0x%04x (%s|%s)\n", - cosa->name, data, - data & SR_RDY_RCV ? "RX_RDY" : "!rx_rdy", - data & SR_RDY_SND ? "TX_RDY" : "!tx_rdy"); -} -#endif - -/* EOF -- this file has not been truncated */ diff --git a/drivers/net/wan/cosa.h b/drivers/net/wan/cosa.h deleted file mode 100644 index f57e0af9d56a..000000000000 --- a/drivers/net/wan/cosa.h +++ /dev/null @@ -1,104 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* $Id: cosa.h,v 1.6 1999/01/06 14:02:44 kas Exp $ */ - -/* - * Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz> - */ - -#ifndef COSA_H__ -#define COSA_H__ - -#include <linux/ioctl.h> - -#ifdef __KERNEL__ -/* status register - output bits */ -#define SR_RX_DMA_ENA 0x04 /* receiver DMA enable bit */ -#define SR_TX_DMA_ENA 0x08 /* transmitter DMA enable bit */ -#define SR_RST 0x10 /* SRP reset */ -#define SR_USR_INT_ENA 0x20 /* user interrupt enable bit */ -#define SR_TX_INT_ENA 0x40 /* transmitter interrupt enable bit */ -#define SR_RX_INT_ENA 0x80 /* receiver interrupt enable bit */ - -/* status register - input bits */ -#define SR_USR_RQ 0x20 /* user interrupt request pending */ -#define SR_TX_RDY 0x40 /* transmitter empty (ready) */ -#define SR_RX_RDY 0x80 /* receiver data ready */ - -#define SR_UP_REQUEST 0x02 /* request from SRP to transfer data - up to PC */ -#define SR_DOWN_REQUEST 0x01 /* SRP is able to transfer data down - from PC to SRP */ -#define SR_END_OF_TRANSFER 0x03 /* SRP signalize end of - transfer (up or down) */ - -#define SR_CMD_FROM_SRP_MASK 0x03 /* mask to get SRP command */ - -/* bits in driver status byte definitions : */ -#define SR_RDY_RCV 0x01 /* ready to receive packet */ -#define SR_RDY_SND 0x02 /* ready to send packet */ -#define SR_CMD_PND 0x04 /* command pending */ /* not currently used */ - -/* ???? */ -#define SR_PKT_UP 0x01 /* transfer of packet up in progress */ -#define SR_PKT_DOWN 0x02 /* transfer of packet down in progress */ - -#endif /* __KERNEL__ */ - -#define SR_LOAD_ADDR 0x4400 /* SRP microcode load address */ -#define SR_START_ADDR 0x4400 /* SRP microcode start address */ - -#define COSA_LOAD_ADDR 0x400 /* SRP microcode load address */ -#define COSA_MAX_FIRMWARE_SIZE 0x10000 - -/* ioctls */ -struct cosa_download { - int addr, len; - char __user *code; -}; - -/* Reset the device */ -#define COSAIORSET _IO('C',0xf0) - -/* Start microcode at given address */ -#define COSAIOSTRT _IOW('C',0xf1, int) - -/* Read the block from the device memory */ -#define COSAIORMEM _IOWR('C',0xf2, struct cosa_download *) - /* actually the struct cosa_download itself; this is to keep - * the ioctl number same as in 2.4 in order to keep the user-space - * utils compatible. */ - -/* Write the block to the device memory (i.e. download the microcode) */ -#define COSAIODOWNLD _IOW('C',0xf2, struct cosa_download *) - /* actually the struct cosa_download itself; this is to keep - * the ioctl number same as in 2.4 in order to keep the user-space - * utils compatible. */ - -/* Read the device type (one of "srp", "cosa", and "cosa8" for now) */ -#define COSAIORTYPE _IOR('C',0xf3, char *) - -/* Read the device identification string */ -#define COSAIORIDSTR _IOR('C',0xf4, char *) -/* Maximum length of the identification string. */ -#define COSA_MAX_ID_STRING 128 - -/* Increment/decrement the module usage count :-) */ -/* #define COSAIOMINC _IO('C',0xf5) */ -/* #define COSAIOMDEC _IO('C',0xf6) */ - -/* Get the total number of cards installed */ -#define COSAIONRCARDS _IO('C',0xf7) - -/* Get the number of channels on this card */ -#define COSAIONRCHANS _IO('C',0xf8) - -/* Set the driver for the bus-master operations */ -#define COSAIOBMSET _IOW('C', 0xf9, unsigned short) - -#define COSA_BM_OFF 0 /* Bus-mastering off - use ISA DMA (default) */ -#define COSA_BM_ON 1 /* Bus-mastering on - faster but untested */ - -/* Gets the busmaster status */ -#define COSAIOBMGET _IO('C', 0xfa) - -#endif /* !COSA_H__ */ diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c deleted file mode 100644 index e985e54ba75d..000000000000 --- a/drivers/net/wan/hostess_sv11.c +++ /dev/null @@ -1,336 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* Comtrol SV11 card driver - * - * This is a slightly odd Z85230 synchronous driver. All you need to - * know basically is - * - * Its a genuine Z85230 - * - * It supports DMA using two DMA channels in SYNC mode. The driver doesn't - * use these facilities - * - * The control port is at io+1, the data at io+3 and turning off the DMA - * is done by writing 0 to io+4 - * - * The hardware does the bus handling to avoid the need for delays between - * touching control registers. - * - * Port B isn't wired (why - beats me) - * - * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl> - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/net.h> -#include <linux/skbuff.h> -#include <linux/netdevice.h> -#include <linux/if_arp.h> -#include <linux/delay.h> -#include <linux/hdlc.h> -#include <linux/ioport.h> -#include <linux/slab.h> -#include <net/arp.h> - -#include <asm/irq.h> -#include <asm/io.h> -#include <asm/dma.h> -#include <asm/byteorder.h> -#include "z85230.h" - -static int dma; - -/* Network driver support routines - */ - -static inline struct z8530_dev *dev_to_sv(struct net_device *dev) -{ - return (struct z8530_dev *)dev_to_hdlc(dev)->priv; -} - -/* Frame receive. Simple for our card as we do HDLC and there - * is no funny garbage involved - */ - -static void hostess_input(struct z8530_channel *c, struct sk_buff *skb) -{ - /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ - skb_trim(skb, skb->len - 2); - skb->protocol = hdlc_type_trans(skb, c->netdevice); - skb_reset_mac_header(skb); - skb->dev = c->netdevice; - /* Send it to the PPP layer. We don't have time to process - * it right now. - */ - netif_rx(skb); -} - -/* We've been placed in the UP state - */ - -static int hostess_open(struct net_device *d) -{ - struct z8530_dev *sv11 = dev_to_sv(d); - int err = -1; - - /* Link layer up - */ - switch (dma) { - case 0: - err = z8530_sync_open(d, &sv11->chanA); - break; - case 1: - err = z8530_sync_dma_open(d, &sv11->chanA); - break; - case 2: - err = z8530_sync_txdma_open(d, &sv11->chanA); - break; - } - - if (err) - return err; - - err = hdlc_open(d); - if (err) { - switch (dma) { - case 0: - z8530_sync_close(d, &sv11->chanA); - break; - case 1: - z8530_sync_dma_close(d, &sv11->chanA); - break; - case 2: - z8530_sync_txdma_close(d, &sv11->chanA); - break; - } - return err; - } - sv11->chanA.rx_function = hostess_input; - - /* - * Go go go - */ - - netif_start_queue(d); - return 0; -} - -static int hostess_close(struct net_device *d) -{ - struct z8530_dev *sv11 = dev_to_sv(d); - /* Discard new frames - */ - sv11->chanA.rx_function = z8530_null_rx; - - hdlc_close(d); - netif_stop_queue(d); - - switch (dma) { - case 0: - z8530_sync_close(d, &sv11->chanA); - break; - case 1: - z8530_sync_dma_close(d, &sv11->chanA); - break; - case 2: - z8530_sync_txdma_close(d, &sv11->chanA); - break; - } - return 0; -} - -/* Passed network frames, fire them downwind. - */ - -static netdev_tx_t hostess_queue_xmit(struct sk_buff *skb, - struct net_device *d) -{ - return z8530_queue_xmit(&dev_to_sv(d)->chanA, skb); -} - -static int hostess_attach(struct net_device *dev, unsigned short encoding, - unsigned short parity) -{ - if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT) - return 0; - return -EINVAL; -} - -/* Description block for a Comtrol Hostess SV11 card - */ - -static const struct net_device_ops hostess_ops = { - .ndo_open = hostess_open, - .ndo_stop = hostess_close, - .ndo_start_xmit = hdlc_start_xmit, - .ndo_siocwandev = hdlc_ioctl, -}; - -static struct z8530_dev *sv11_init(int iobase, int irq) -{ - struct z8530_dev *sv; - struct net_device *netdev; - /* Get the needed I/O space - */ - - if (!request_region(iobase, 8, "Comtrol SV11")) { - pr_warn("I/O 0x%X already in use\n", iobase); - return NULL; - } - - sv = kzalloc(sizeof(struct z8530_dev), GFP_KERNEL); - if (!sv) - goto err_kzalloc; - - /* Stuff in the I/O addressing - */ - - sv->active = 0; - - sv->chanA.ctrlio = iobase + 1; - sv->chanA.dataio = iobase + 3; - sv->chanB.ctrlio = -1; - sv->chanB.dataio = -1; - sv->chanA.irqs = &z8530_nop; - sv->chanB.irqs = &z8530_nop; - - outb(0, iobase + 4); /* DMA off */ - - /* We want a fast IRQ for this device. Actually we'd like an even faster - * IRQ ;) - This is one driver RtLinux is made for - */ - - if (request_irq(irq, z8530_interrupt, 0, - "Hostess SV11", sv) < 0) { - pr_warn("IRQ %d already in use\n", irq); - goto err_irq; - } - - sv->irq = irq; - sv->chanA.private = sv; - sv->chanA.dev = sv; - sv->chanB.dev = sv; - - if (dma) { - /* You can have DMA off or 1 and 3 thats the lot - * on the Comtrol. - */ - sv->chanA.txdma = 3; - sv->chanA.rxdma = 1; - outb(0x03 | 0x08, iobase + 4); /* DMA on */ - if (request_dma(sv->chanA.txdma, "Hostess SV/11 (TX)")) - goto err_txdma; - - if (dma == 1) - if (request_dma(sv->chanA.rxdma, "Hostess SV/11 (RX)")) - goto err_rxdma; - } - - /* Kill our private IRQ line the hostess can end up chattering - * until the configuration is set - */ - disable_irq(irq); - - /* Begin normal initialise - */ - - if (z8530_init(sv)) { - pr_err("Z8530 series device not found\n"); - enable_irq(irq); - goto free_dma; - } - z8530_channel_load(&sv->chanB, z8530_dead_port); - if (sv->type == Z85C30) - z8530_channel_load(&sv->chanA, z8530_hdlc_kilostream); - else - z8530_channel_load(&sv->chanA, z8530_hdlc_kilostream_85230); - - enable_irq(irq); - - /* Now we can take the IRQ - */ - - sv->chanA.netdevice = netdev = alloc_hdlcdev(sv); - if (!netdev) - goto free_dma; - - dev_to_hdlc(netdev)->attach = hostess_attach; - dev_to_hdlc(netdev)->xmit = hostess_queue_xmit; - netdev->netdev_ops = &hostess_ops; - netdev->base_addr = iobase; - netdev->irq = irq; - - if (register_hdlc_device(netdev)) { - pr_err("unable to register HDLC device\n"); - free_netdev(netdev); - goto free_dma; - } - - z8530_describe(sv, "I/O", iobase); - sv->active = 1; - return sv; - -free_dma: - if (dma == 1) - free_dma(sv->chanA.rxdma); -err_rxdma: - if (dma) - free_dma(sv->chanA.txdma); -err_txdma: - free_irq(irq, sv); -err_irq: - kfree(sv); -err_kzalloc: - release_region(iobase, 8); - return NULL; -} - -static void sv11_shutdown(struct z8530_dev *dev) -{ - unregister_hdlc_device(dev->chanA.netdevice); - z8530_shutdown(dev); - free_irq(dev->irq, dev); - if (dma) { - if (dma == 1) - free_dma(dev->chanA.rxdma); - free_dma(dev->chanA.txdma); - } - release_region(dev->chanA.ctrlio - 1, 8); - free_netdev(dev->chanA.netdevice); - kfree(dev); -} - -static int io = 0x200; -static int irq = 9; - -module_param_hw(io, int, ioport, 0); -MODULE_PARM_DESC(io, "The I/O base of the Comtrol Hostess SV11 card"); -module_param_hw(dma, int, dma, 0); -MODULE_PARM_DESC(dma, "Set this to 1 to use DMA1/DMA3 for TX/RX"); -module_param_hw(irq, int, irq, 0); -MODULE_PARM_DESC(irq, "The interrupt line setting for the Comtrol Hostess SV11 card"); - -MODULE_AUTHOR("Alan Cox"); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Modular driver for the Comtrol Hostess SV11"); - -static struct z8530_dev *sv11_unit; - -static int sv11_module_init(void) -{ - sv11_unit = sv11_init(io, irq); - if (!sv11_unit) - return -ENODEV; - return 0; -} -module_init(sv11_module_init); - -static void sv11_module_cleanup(void) -{ - if (sv11_unit) - sv11_shutdown(sv11_unit); -} -module_exit(sv11_module_cleanup); diff --git a/drivers/net/wan/lmc/Makefile b/drivers/net/wan/lmc/Makefile deleted file mode 100644 index f00fe4491d69..000000000000 --- a/drivers/net/wan/lmc/Makefile +++ /dev/null @@ -1,18 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# Makefile for the Lan Media 21140 based WAN cards -# Specifically the 1000,1200,5200,5245 -# - -obj-$(CONFIG_LANMEDIA) += lmc.o - -lmc-objs := lmc_debug.o lmc_media.o lmc_main.o lmc_proto.o - -# Like above except every packet gets echoed to KERN_DEBUG -# in hex -# -# DBDEF = \ -# -DDEBUG \ -# -DLMC_PACKET_LOG - -ccflags-y := $(DBGDEF) diff --git a/drivers/net/wan/lmc/lmc.h b/drivers/net/wan/lmc/lmc.h deleted file mode 100644 index d7d59b4595f9..000000000000 --- a/drivers/net/wan/lmc/lmc.h +++ /dev/null @@ -1,33 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LMC_H_ -#define _LMC_H_ - -#include "lmc_var.h" - -/* - * prototypes for everyone - */ -int lmc_probe(struct net_device * dev); -unsigned lmc_mii_readreg(lmc_softc_t * const sc, unsigned - devaddr, unsigned regno); -void lmc_mii_writereg(lmc_softc_t * const sc, unsigned devaddr, - unsigned regno, unsigned data); -void lmc_led_on(lmc_softc_t * const, u32); -void lmc_led_off(lmc_softc_t * const, u32); -unsigned lmc_mii_readreg(lmc_softc_t * const, unsigned, unsigned); -void lmc_mii_writereg(lmc_softc_t * const, unsigned, unsigned, unsigned); -void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits); -void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits); - -int lmc_ioctl(struct net_device *dev, struct if_settings *ifs); - -extern lmc_media_t lmc_ds3_media; -extern lmc_media_t lmc_ssi_media; -extern lmc_media_t lmc_t1_media; -extern lmc_media_t lmc_hssi_media; - -#ifdef _DBG_EVENTLOG -static void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3); -#endif - -#endif diff --git a/drivers/net/wan/lmc/lmc_debug.c b/drivers/net/wan/lmc/lmc_debug.c deleted file mode 100644 index 2b6051bda3fb..000000000000 --- a/drivers/net/wan/lmc/lmc_debug.c +++ /dev/null @@ -1,65 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include <linux/types.h> -#include <linux/netdevice.h> -#include <linux/interrupt.h> - -#include "lmc_debug.h" - -/* - * Prints out len, max to 80 octets using printk, 20 per line - */ -#ifdef DEBUG -#ifdef LMC_PACKET_LOG -void lmcConsoleLog(char *type, unsigned char *ucData, int iLen) -{ - int iNewLine = 1; - char str[80], *pstr; - - sprintf(str, KERN_DEBUG "lmc: %s: ", type); - pstr = str+strlen(str); - - if(iLen > 240){ - printk(KERN_DEBUG "lmc: Printing 240 chars... out of: %d\n", iLen); - iLen = 240; - } - else{ - printk(KERN_DEBUG "lmc: Printing %d chars\n", iLen); - } - - while(iLen > 0) - { - sprintf(pstr, "%02x ", *ucData); - pstr+=3; - ucData++; - if( !(iNewLine % 20)) - { - sprintf(pstr, "\n"); - printk(str); - sprintf(str, KERN_DEBUG "lmc: %s: ", type); - pstr=str+strlen(str); - } - iNewLine++; - iLen--; - } - sprintf(pstr, "\n"); - printk(str); -} -#endif -#endif - -#ifdef DEBUG -u32 lmcEventLogIndex; -u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS]; - -void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3) -{ - lmcEventLogBuf[lmcEventLogIndex++] = EventNum; - lmcEventLogBuf[lmcEventLogIndex++] = arg2; - lmcEventLogBuf[lmcEventLogIndex++] = arg3; - lmcEventLogBuf[lmcEventLogIndex++] = jiffies; - - lmcEventLogIndex &= (LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS) - 1; -} -#endif /* DEBUG */ - -/* --------------------------- end if_lmc_linux.c ------------------------ */ diff --git a/drivers/net/wan/lmc/lmc_debug.h b/drivers/net/wan/lmc/lmc_debug.h deleted file mode 100644 index cfae9eddf003..000000000000 --- a/drivers/net/wan/lmc/lmc_debug.h +++ /dev/null @@ -1,52 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LMC_DEBUG_H_ -#define _LMC_DEBUG_H_ - -#ifdef DEBUG -#ifdef LMC_PACKET_LOG -#define LMC_CONSOLE_LOG(x,y,z) lmcConsoleLog((x), (y), (z)) -#else -#define LMC_CONSOLE_LOG(x,y,z) -#endif -#else -#define LMC_CONSOLE_LOG(x,y,z) -#endif - - - -/* Debug --- Event log definitions --- */ -/* EVENTLOGSIZE*EVENTLOGARGS needs to be a power of 2 */ -#define LMC_EVENTLOGSIZE 1024 /* number of events in eventlog */ -#define LMC_EVENTLOGARGS 4 /* number of args for each event */ - -/* event indicators */ -#define LMC_EVENT_XMT 1 -#define LMC_EVENT_XMTEND 2 -#define LMC_EVENT_XMTINT 3 -#define LMC_EVENT_RCVINT 4 -#define LMC_EVENT_RCVEND 5 -#define LMC_EVENT_INT 6 -#define LMC_EVENT_XMTINTTMO 7 -#define LMC_EVENT_XMTPRCTMO 8 -#define LMC_EVENT_INTEND 9 -#define LMC_EVENT_RESET1 10 -#define LMC_EVENT_RESET2 11 -#define LMC_EVENT_FORCEDRESET 12 -#define LMC_EVENT_WATCHDOG 13 -#define LMC_EVENT_BADPKTSURGE 14 -#define LMC_EVENT_TBUSY0 15 -#define LMC_EVENT_TBUSY1 16 - - -#ifdef DEBUG -extern u32 lmcEventLogIndex; -extern u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS]; -#define LMC_EVENT_LOG(x, y, z) lmcEventLog((x), (y), (z)) -#else -#define LMC_EVENT_LOG(x,y,z) -#endif /* end ifdef _DBG_EVENTLOG */ - -void lmcConsoleLog(char *type, unsigned char *ucData, int iLen); -void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3); - -#endif diff --git a/drivers/net/wan/lmc/lmc_ioctl.h b/drivers/net/wan/lmc/lmc_ioctl.h deleted file mode 100644 index 8c65e2176e94..000000000000 --- a/drivers/net/wan/lmc/lmc_ioctl.h +++ /dev/null @@ -1,255 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -#ifndef _LMC_IOCTL_H_ -#define _LMC_IOCTL_H_ -/* $Id: lmc_ioctl.h,v 1.15 2000/04/06 12:16:43 asj Exp $ */ - - /* - * Copyright (c) 1997-2000 LAN Media Corporation (LMC) - * All rights reserved. www.lanmedia.com - * - * This code is written by: - * Andrew Stanley-Jones (asj@cban.com) - * Rob Braun (bbraun@vix.com), - * Michael Graff (explorer@vix.com) and - * Matt Thomas (matt@3am-software.com). - */ - -#define LMCIOCGINFO SIOCDEVPRIVATE+3 /* get current state */ -#define LMCIOCSINFO SIOCDEVPRIVATE+4 /* set state to user values */ -#define LMCIOCGETLMCSTATS SIOCDEVPRIVATE+5 -#define LMCIOCCLEARLMCSTATS SIOCDEVPRIVATE+6 -#define LMCIOCDUMPEVENTLOG SIOCDEVPRIVATE+7 -#define LMCIOCGETXINFO SIOCDEVPRIVATE+8 -#define LMCIOCSETCIRCUIT SIOCDEVPRIVATE+9 -#define LMCIOCUNUSEDATM SIOCDEVPRIVATE+10 -#define LMCIOCRESET SIOCDEVPRIVATE+11 -#define LMCIOCT1CONTROL SIOCDEVPRIVATE+12 -#define LMCIOCIFTYPE SIOCDEVPRIVATE+13 -#define LMCIOCXILINX SIOCDEVPRIVATE+14 - -#define LMC_CARDTYPE_UNKNOWN -1 -#define LMC_CARDTYPE_HSSI 1 /* probed card is a HSSI card */ -#define LMC_CARDTYPE_DS3 2 /* probed card is a DS3 card */ -#define LMC_CARDTYPE_SSI 3 /* probed card is a SSI card */ -#define LMC_CARDTYPE_T1 4 /* probed card is a T1 card */ - -#define LMC_CTL_CARDTYPE_LMC5200 0 /* HSSI */ -#define LMC_CTL_CARDTYPE_LMC5245 1 /* DS3 */ -#define LMC_CTL_CARDTYPE_LMC1000 2 /* SSI, V.35 */ -#define LMC_CTL_CARDTYPE_LMC1200 3 /* DS1 */ - -#define LMC_CTL_OFF 0 /* generic OFF value */ -#define LMC_CTL_ON 1 /* generic ON value */ - -#define LMC_CTL_CLOCK_SOURCE_EXT 0 /* clock off line */ -#define LMC_CTL_CLOCK_SOURCE_INT 1 /* internal clock */ - -#define LMC_CTL_CRC_LENGTH_16 16 -#define LMC_CTL_CRC_LENGTH_32 32 -#define LMC_CTL_CRC_BYTESIZE_2 2 -#define LMC_CTL_CRC_BYTESIZE_4 4 - - -#define LMC_CTL_CABLE_LENGTH_LT_100FT 0 /* DS3 cable < 100 feet */ -#define LMC_CTL_CABLE_LENGTH_GT_100FT 1 /* DS3 cable >= 100 feet */ - -#define LMC_CTL_CIRCUIT_TYPE_E1 0 -#define LMC_CTL_CIRCUIT_TYPE_T1 1 - -/* - * IFTYPE defines - */ -#define LMC_PPP 1 /* use generic HDLC interface */ -#define LMC_NET 2 /* use direct net interface */ -#define LMC_RAW 3 /* use direct net interface */ - -/* - * These are not in the least IOCTL related, but I want them common. - */ -/* - * assignments for the GPIO register on the DEC chip (common) - */ -#define LMC_GEP_INIT 0x01 /* 0: */ -#define LMC_GEP_RESET 0x02 /* 1: */ -#define LMC_GEP_MODE 0x10 /* 4: */ -#define LMC_GEP_DP 0x20 /* 5: */ -#define LMC_GEP_DATA 0x40 /* 6: serial out */ -#define LMC_GEP_CLK 0x80 /* 7: serial clock */ - -/* - * HSSI GPIO assignments - */ -#define LMC_GEP_HSSI_ST 0x04 /* 2: receive timing sense (deprecated) */ -#define LMC_GEP_HSSI_CLOCK 0x08 /* 3: clock source */ - -/* - * T1 GPIO assignments - */ -#define LMC_GEP_SSI_GENERATOR 0x04 /* 2: enable prog freq gen serial i/f */ -#define LMC_GEP_SSI_TXCLOCK 0x08 /* 3: provide clock on TXCLOCK output */ - -/* - * Common MII16 bits - */ -#define LMC_MII16_LED0 0x0080 -#define LMC_MII16_LED1 0x0100 -#define LMC_MII16_LED2 0x0200 -#define LMC_MII16_LED3 0x0400 /* Error, and the red one */ -#define LMC_MII16_LED_ALL 0x0780 /* LED bit mask */ -#define LMC_MII16_FIFO_RESET 0x0800 - -/* - * definitions for HSSI - */ -#define LMC_MII16_HSSI_TA 0x0001 -#define LMC_MII16_HSSI_CA 0x0002 -#define LMC_MII16_HSSI_LA 0x0004 -#define LMC_MII16_HSSI_LB 0x0008 -#define LMC_MII16_HSSI_LC 0x0010 -#define LMC_MII16_HSSI_TM 0x0020 -#define LMC_MII16_HSSI_CRC 0x0040 - -/* - * assignments for the MII register 16 (DS3) - */ -#define LMC_MII16_DS3_ZERO 0x0001 -#define LMC_MII16_DS3_TRLBK 0x0002 -#define LMC_MII16_DS3_LNLBK 0x0004 -#define LMC_MII16_DS3_RAIS 0x0008 -#define LMC_MII16_DS3_TAIS 0x0010 -#define LMC_MII16_DS3_BIST 0x0020 -#define LMC_MII16_DS3_DLOS 0x0040 -#define LMC_MII16_DS3_CRC 0x1000 -#define LMC_MII16_DS3_SCRAM 0x2000 -#define LMC_MII16_DS3_SCRAM_LARS 0x4000 - -/* Note: 2 pairs of LEDs where swapped by mistake - * in Xilinx code for DS3 & DS1 adapters */ -#define LMC_DS3_LED0 0x0100 /* bit 08 yellow */ -#define LMC_DS3_LED1 0x0080 /* bit 07 blue */ -#define LMC_DS3_LED2 0x0400 /* bit 10 green */ -#define LMC_DS3_LED3 0x0200 /* bit 09 red */ - -/* - * framer register 0 and 7 (7 is latched and reset on read) - */ -#define LMC_FRAMER_REG0_DLOS 0x80 /* digital loss of service */ -#define LMC_FRAMER_REG0_OOFS 0x40 /* out of frame sync */ -#define LMC_FRAMER_REG0_AIS 0x20 /* alarm indication signal */ -#define LMC_FRAMER_REG0_CIS 0x10 /* channel idle */ -#define LMC_FRAMER_REG0_LOC 0x08 /* loss of clock */ - -/* - * Framer register 9 contains the blue alarm signal - */ -#define LMC_FRAMER_REG9_RBLUE 0x02 /* Blue alarm failure */ - -/* - * Framer register 0x10 contains xbit error - */ -#define LMC_FRAMER_REG10_XBIT 0x01 /* X bit error alarm failure */ - -/* - * And SSI, LMC1000 - */ -#define LMC_MII16_SSI_DTR 0x0001 /* DTR output RW */ -#define LMC_MII16_SSI_DSR 0x0002 /* DSR input RO */ -#define LMC_MII16_SSI_RTS 0x0004 /* RTS output RW */ -#define LMC_MII16_SSI_CTS 0x0008 /* CTS input RO */ -#define LMC_MII16_SSI_DCD 0x0010 /* DCD input RO */ -#define LMC_MII16_SSI_RI 0x0020 /* RI input RO */ -#define LMC_MII16_SSI_CRC 0x1000 /* CRC select - RW */ - -/* - * bits 0x0080 through 0x0800 are generic, and described - * above with LMC_MII16_LED[0123] _LED_ALL, and _FIFO_RESET - */ -#define LMC_MII16_SSI_LL 0x1000 /* LL output RW */ -#define LMC_MII16_SSI_RL 0x2000 /* RL output RW */ -#define LMC_MII16_SSI_TM 0x4000 /* TM input RO */ -#define LMC_MII16_SSI_LOOP 0x8000 /* loopback enable RW */ - -/* - * Some of the MII16 bits are mirrored in the MII17 register as well, - * but let's keep thing separate for now, and get only the cable from - * the MII17. - */ -#define LMC_MII17_SSI_CABLE_MASK 0x0038 /* mask to extract the cable type */ -#define LMC_MII17_SSI_CABLE_SHIFT 3 /* shift to extract the cable type */ - -/* - * And T1, LMC1200 - */ -#define LMC_MII16_T1_UNUSED1 0x0003 -#define LMC_MII16_T1_XOE 0x0004 -#define LMC_MII16_T1_RST 0x0008 /* T1 chip reset - RW */ -#define LMC_MII16_T1_Z 0x0010 /* output impedance T1=1, E1=0 output - RW */ -#define LMC_MII16_T1_INTR 0x0020 /* interrupt from 8370 - RO */ -#define LMC_MII16_T1_ONESEC 0x0040 /* one second square wave - ro */ - -#define LMC_MII16_T1_LED0 0x0100 -#define LMC_MII16_T1_LED1 0x0080 -#define LMC_MII16_T1_LED2 0x0400 -#define LMC_MII16_T1_LED3 0x0200 -#define LMC_MII16_T1_FIFO_RESET 0x0800 - -#define LMC_MII16_T1_CRC 0x1000 /* CRC select - RW */ -#define LMC_MII16_T1_UNUSED2 0xe000 - - -/* 8370 framer registers */ - -#define T1FRAMER_ALARM1_STATUS 0x47 -#define T1FRAMER_ALARM2_STATUS 0x48 -#define T1FRAMER_FERR_LSB 0x50 -#define T1FRAMER_FERR_MSB 0x51 /* framing bit error counter */ -#define T1FRAMER_LCV_LSB 0x54 -#define T1FRAMER_LCV_MSB 0x55 /* line code violation counter */ -#define T1FRAMER_AERR 0x5A - -/* mask for the above AERR register */ -#define T1FRAMER_LOF_MASK (0x0f0) /* receive loss of frame */ -#define T1FRAMER_COFA_MASK (0x0c0) /* change of frame alignment */ -#define T1FRAMER_SEF_MASK (0x03) /* severely errored frame */ - -/* 8370 framer register ALM1 (0x47) values - * used to determine link status - */ - -#define T1F_SIGFRZ 0x01 /* signaling freeze */ -#define T1F_RLOF 0x02 /* receive loss of frame alignment */ -#define T1F_RLOS 0x04 /* receive loss of signal */ -#define T1F_RALOS 0x08 /* receive analog loss of signal or RCKI loss of clock */ -#define T1F_RAIS 0x10 /* receive alarm indication signal */ -#define T1F_UNUSED 0x20 -#define T1F_RYEL 0x40 /* receive yellow alarm */ -#define T1F_RMYEL 0x80 /* receive multiframe yellow alarm */ - -#define LMC_T1F_WRITE 0 -#define LMC_T1F_READ 1 - -typedef struct lmc_st1f_control { - int command; - int address; - int value; - char __user *data; -} lmc_t1f_control; - -enum lmc_xilinx_c { - lmc_xilinx_reset = 1, - lmc_xilinx_load_prom = 2, - lmc_xilinx_load = 3 -}; - -struct lmc_xilinx_control { - enum lmc_xilinx_c command; - int len; - char __user *data; -}; - -/* ------------------ end T1 defs ------------------- */ - -#define LMC_MII_LedMask 0x0780 -#define LMC_MII_LedBitPos 7 - -#endif diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c deleted file mode 100644 index 76c6b4f89890..000000000000 --- a/drivers/net/wan/lmc/lmc_main.c +++ /dev/null @@ -1,2009 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only - /* - * Copyright (c) 1997-2000 LAN Media Corporation (LMC) - * All rights reserved. www.lanmedia.com - * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl> - * - * This code is written by: - * Andrew Stanley-Jones (asj@cban.com) - * Rob Braun (bbraun@vix.com), - * Michael Graff (explorer@vix.com) and - * Matt Thomas (matt@3am-software.com). - * - * With Help By: - * David Boggs - * Ron Crane - * Alan Cox - * - * Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards. - * - * To control link specific options lmcctl is required. - * It can be obtained from ftp.lanmedia.com. - * - * Linux driver notes: - * Linux uses the device struct lmc_private to pass private information - * around. - * - * The initialization portion of this driver (the lmc_reset() and the - * lmc_dec_reset() functions, as well as the led controls and the - * lmc_initcsrs() functions. - * - * The watchdog function runs every second and checks to see if - * we still have link, and that the timing source is what we expected - * it to be. If link is lost, the interface is marked down, and - * we no longer can transmit. - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/string.h> -#include <linux/timer.h> -#include <linux/ptrace.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/slab.h> -#include <linux/interrupt.h> -#include <linux/pci.h> -#include <linux/delay.h> -#include <linux/hdlc.h> -#include <linux/in.h> -#include <linux/if_arp.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/inet.h> -#include <linux/bitops.h> -#include <asm/processor.h> /* Processor type for cache alignment. */ -#include <asm/io.h> -#include <asm/dma.h> -#include <linux/uaccess.h> -#include <linux/jiffies.h> -//#include <asm/spinlock.h> - -#define DRIVER_MAJOR_VERSION 1 -#define DRIVER_MINOR_VERSION 34 -#define DRIVER_SUB_VERSION 0 - -#define DRIVER_VERSION ((DRIVER_MAJOR_VERSION << 8) + DRIVER_MINOR_VERSION) - -#include "lmc.h" -#include "lmc_var.h" -#include "lmc_ioctl.h" -#include "lmc_debug.h" -#include "lmc_proto.h" - -static int LMC_PKT_BUF_SZ = 1542; - -static const struct pci_device_id lmc_pci_tbl[] = { - { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST, - PCI_VENDOR_ID_LMC, PCI_ANY_ID }, - { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST, - PCI_ANY_ID, PCI_VENDOR_ID_LMC }, - { 0 } -}; - -MODULE_DEVICE_TABLE(pci, lmc_pci_tbl); -MODULE_LICENSE("GPL v2"); - - -static netdev_tx_t lmc_start_xmit(struct sk_buff *skb, - struct net_device *dev); -static int lmc_rx (struct net_device *dev); -static int lmc_open(struct net_device *dev); -static int lmc_close(struct net_device *dev); -static struct net_device_stats *lmc_get_stats(struct net_device *dev); -static irqreturn_t lmc_interrupt(int irq, void *dev_instance); -static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, size_t csr_size); -static void lmc_softreset(lmc_softc_t * const); -static void lmc_running_reset(struct net_device *dev); -static int lmc_ifdown(struct net_device * const); -static void lmc_watchdog(struct timer_list *t); -static void lmc_reset(lmc_softc_t * const sc); -static void lmc_dec_reset(lmc_softc_t * const sc); -static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue); - -/* - * linux reserves 16 device specific IOCTLs. We call them - * LMCIOC* to control various bits of our world. - */ -static int lmc_siocdevprivate(struct net_device *dev, struct ifreq *ifr, - void __user *data, int cmd) /*fold00*/ -{ - lmc_softc_t *sc = dev_to_sc(dev); - lmc_ctl_t ctl; - int ret = -EOPNOTSUPP; - u16 regVal; - unsigned long flags; - - /* - * Most functions mess with the structure - * Disable interrupts while we do the polling - */ - - switch (cmd) { - /* - * Return current driver state. Since we keep this up - * To date internally, just copy this out to the user. - */ - case LMCIOCGINFO: /*fold01*/ - if (copy_to_user(data, &sc->ictl, sizeof(lmc_ctl_t))) - ret = -EFAULT; - else - ret = 0; - break; - - case LMCIOCSINFO: /*fold01*/ - if (!capable(CAP_NET_ADMIN)) { - ret = -EPERM; - break; - } - - if(dev->flags & IFF_UP){ - ret = -EBUSY; - break; - } - - if (copy_from_user(&ctl, data, sizeof(lmc_ctl_t))) { - ret = -EFAULT; - break; - } - - spin_lock_irqsave(&sc->lmc_lock, flags); - sc->lmc_media->set_status (sc, &ctl); - - if(ctl.crc_length != sc->ictl.crc_length) { - sc->lmc_media->set_crc_length(sc, ctl.crc_length); - if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16) - sc->TxDescriptControlInit |= LMC_TDES_ADD_CRC_DISABLE; - else - sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE; - } - spin_unlock_irqrestore(&sc->lmc_lock, flags); - - ret = 0; - break; - - case LMCIOCIFTYPE: /*fold01*/ - { - u16 old_type = sc->if_type; - u16 new_type; - - if (!capable(CAP_NET_ADMIN)) { - ret = -EPERM; - break; - } - - if (copy_from_user(&new_type, data, sizeof(u16))) { - ret = -EFAULT; - break; - } - - - if (new_type == old_type) - { - ret = 0 ; - break; /* no change */ - } - - spin_lock_irqsave(&sc->lmc_lock, flags); - lmc_proto_close(sc); - - sc->if_type = new_type; - lmc_proto_attach(sc); - ret = lmc_proto_open(sc); - spin_unlock_irqrestore(&sc->lmc_lock, flags); - break; - } - - case LMCIOCGETXINFO: /*fold01*/ - spin_lock_irqsave(&sc->lmc_lock, flags); - sc->lmc_xinfo.Magic0 = 0xBEEFCAFE; - - sc->lmc_xinfo.PciCardType = sc->lmc_cardtype; - sc->lmc_xinfo.PciSlotNumber = 0; - sc->lmc_xinfo.DriverMajorVersion = DRIVER_MAJOR_VERSION; - sc->lmc_xinfo.DriverMinorVersion = DRIVER_MINOR_VERSION; - sc->lmc_xinfo.DriverSubVersion = DRIVER_SUB_VERSION; - sc->lmc_xinfo.XilinxRevisionNumber = - lmc_mii_readreg (sc, 0, 3) & 0xf; - sc->lmc_xinfo.MaxFrameSize = LMC_PKT_BUF_SZ; - sc->lmc_xinfo.link_status = sc->lmc_media->get_link_status (sc); - sc->lmc_xinfo.mii_reg16 = lmc_mii_readreg (sc, 0, 16); - spin_unlock_irqrestore(&sc->lmc_lock, flags); - - sc->lmc_xinfo.Magic1 = 0xDEADBEEF; - - if (copy_to_user(data, &sc->lmc_xinfo, sizeof(struct lmc_xinfo))) - ret = -EFAULT; - else - ret = 0; - - break; - - case LMCIOCGETLMCSTATS: - spin_lock_irqsave(&sc->lmc_lock, flags); - if (sc->lmc_cardtype == LMC_CARDTYPE_T1) { - lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_LSB); - sc->extra_stats.framingBitErrorCount += - lmc_mii_readreg(sc, 0, 18) & 0xff; - lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_MSB); - sc->extra_stats.framingBitErrorCount += - (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8; - lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_LSB); - sc->extra_stats.lineCodeViolationCount += - lmc_mii_readreg(sc, 0, 18) & 0xff; - lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_MSB); - sc->extra_stats.lineCodeViolationCount += - (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8; - lmc_mii_writereg(sc, 0, 17, T1FRAMER_AERR); - regVal = lmc_mii_readreg(sc, 0, 18) & 0xff; - - sc->extra_stats.lossOfFrameCount += - (regVal & T1FRAMER_LOF_MASK) >> 4; - sc->extra_stats.changeOfFrameAlignmentCount += - (regVal & T1FRAMER_COFA_MASK) >> 2; - sc->extra_stats.severelyErroredFrameCount += - regVal & T1FRAMER_SEF_MASK; - } - spin_unlock_irqrestore(&sc->lmc_lock, flags); - if (copy_to_user(data, &sc->lmc_device->stats, - sizeof(sc->lmc_device->stats)) || - copy_to_user(data + sizeof(sc->lmc_device->stats), - &sc->extra_stats, sizeof(sc->extra_stats))) - ret = -EFAULT; - else - ret = 0; - break; - - case LMCIOCCLEARLMCSTATS: - if (!capable(CAP_NET_ADMIN)) { - ret = -EPERM; - break; - } - - spin_lock_irqsave(&sc->lmc_lock, flags); - memset(&sc->lmc_device->stats, 0, sizeof(sc->lmc_device->stats)); - memset(&sc->extra_stats, 0, sizeof(sc->extra_stats)); - sc->extra_stats.check = STATCHECK; - sc->extra_stats.version_size = (DRIVER_VERSION << 16) + - sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats); - sc->extra_stats.lmc_cardtype = sc->lmc_cardtype; - spin_unlock_irqrestore(&sc->lmc_lock, flags); - ret = 0; - break; - - case LMCIOCSETCIRCUIT: /*fold01*/ - if (!capable(CAP_NET_ADMIN)){ - ret = -EPERM; - break; - } - - if(dev->flags & IFF_UP){ - ret = -EBUSY; - break; - } - - if (copy_from_user(&ctl, data, sizeof(lmc_ctl_t))) { - ret = -EFAULT; - break; - } - spin_lock_irqsave(&sc->lmc_lock, flags); - sc->lmc_media->set_circuit_type(sc, ctl.circuit_type); - sc->ictl.circuit_type = ctl.circuit_type; - spin_unlock_irqrestore(&sc->lmc_lock, flags); - ret = 0; - - break; - - case LMCIOCRESET: /*fold01*/ - if (!capable(CAP_NET_ADMIN)){ - ret = -EPERM; - break; - } - - spin_lock_irqsave(&sc->lmc_lock, flags); - /* Reset driver and bring back to current state */ - printk (" REG16 before reset +%04x\n", lmc_mii_readreg (sc, 0, 16)); - lmc_running_reset (dev); - printk (" REG16 after reset +%04x\n", lmc_mii_readreg (sc, 0, 16)); - - LMC_EVENT_LOG(LMC_EVENT_FORCEDRESET, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16)); - spin_unlock_irqrestore(&sc->lmc_lock, flags); - - ret = 0; - break; - -#ifdef DEBUG - case LMCIOCDUMPEVENTLOG: - if (copy_to_user(data, &lmcEventLogIndex, sizeof(u32))) { - ret = -EFAULT; - break; - } - if (copy_to_user(data + sizeof(u32), lmcEventLogBuf, - sizeof(lmcEventLogBuf))) - ret = -EFAULT; - else - ret = 0; - - break; -#endif /* end ifdef _DBG_EVENTLOG */ - case LMCIOCT1CONTROL: /*fold01*/ - if (sc->lmc_cardtype != LMC_CARDTYPE_T1){ - ret = -EOPNOTSUPP; - break; - } - break; - case LMCIOCXILINX: /*fold01*/ - { - struct lmc_xilinx_control xc; /*fold02*/ - - if (!capable(CAP_NET_ADMIN)){ - ret = -EPERM; - break; - } - - /* - * Stop the xwitter whlie we restart the hardware - */ - netif_stop_queue(dev); - - if (copy_from_user(&xc, data, sizeof(struct lmc_xilinx_control))) { - ret = -EFAULT; - break; - } - switch(xc.command){ - case lmc_xilinx_reset: /*fold02*/ - { - spin_lock_irqsave(&sc->lmc_lock, flags); - lmc_mii_readreg (sc, 0, 16); - - /* - * Make all of them 0 and make input - */ - lmc_gpio_mkinput(sc, 0xff); - - /* - * make the reset output - */ - lmc_gpio_mkoutput(sc, LMC_GEP_RESET); - - /* - * RESET low to force configuration. This also forces - * the transmitter clock to be internal, but we expect to reset - * that later anyway. - */ - - sc->lmc_gpio &= ~LMC_GEP_RESET; - LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); - - - /* - * hold for more than 10 microseconds - */ - udelay(50); - - sc->lmc_gpio |= LMC_GEP_RESET; - LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); - - - /* - * stop driving Xilinx-related signals - */ - lmc_gpio_mkinput(sc, 0xff); - - /* Reset the frammer hardware */ - sc->lmc_media->set_link_status (sc, 1); - sc->lmc_media->set_status (sc, NULL); -// lmc_softreset(sc); - - { - int i; - for(i = 0; i < 5; i++){ - lmc_led_on(sc, LMC_DS3_LED0); - mdelay(100); - lmc_led_off(sc, LMC_DS3_LED0); - lmc_led_on(sc, LMC_DS3_LED1); - mdelay(100); - lmc_led_off(sc, LMC_DS3_LED1); - lmc_led_on(sc, LMC_DS3_LED3); - mdelay(100); - lmc_led_off(sc, LMC_DS3_LED3); - lmc_led_on(sc, LMC_DS3_LED2); - mdelay(100); - lmc_led_off(sc, LMC_DS3_LED2); - } - } - spin_unlock_irqrestore(&sc->lmc_lock, flags); - - - - ret = 0x0; - - } - - break; - case lmc_xilinx_load_prom: /*fold02*/ - { - int timeout = 500000; - spin_lock_irqsave(&sc->lmc_lock, flags); - lmc_mii_readreg (sc, 0, 16); - - /* - * Make all of them 0 and make input - */ - lmc_gpio_mkinput(sc, 0xff); - - /* - * make the reset output - */ - lmc_gpio_mkoutput(sc, LMC_GEP_DP | LMC_GEP_RESET); - - /* - * RESET low to force configuration. This also forces - * the transmitter clock to be internal, but we expect to reset - * that later anyway. - */ - - sc->lmc_gpio &= ~(LMC_GEP_RESET | LMC_GEP_DP); - LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); - - - /* - * hold for more than 10 microseconds - */ - udelay(50); - - sc->lmc_gpio |= LMC_GEP_DP | LMC_GEP_RESET; - LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); - - /* - * busy wait for the chip to reset - */ - while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 && - (timeout-- > 0)) - cpu_relax(); - - - /* - * stop driving Xilinx-related signals - */ - lmc_gpio_mkinput(sc, 0xff); - spin_unlock_irqrestore(&sc->lmc_lock, flags); - - ret = 0x0; - - - break; - - } - - case lmc_xilinx_load: /*fold02*/ - { - char *data; - int pos; - int timeout = 500000; - - if (!xc.data) { - ret = -EINVAL; - break; - } - - data = memdup_user(xc.data, xc.len); - if (IS_ERR(data)) { - ret = PTR_ERR(data); - break; - } - - printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data); - - spin_lock_irqsave(&sc->lmc_lock, flags); - lmc_gpio_mkinput(sc, 0xff); - - /* - * Clear the Xilinx and start prgramming from the DEC - */ - - /* - * Set ouput as: - * Reset: 0 (active) - * DP: 0 (active) - * Mode: 1 - * - */ - sc->lmc_gpio = 0x00; - sc->lmc_gpio &= ~LMC_GEP_DP; - sc->lmc_gpio &= ~LMC_GEP_RESET; - sc->lmc_gpio |= LMC_GEP_MODE; - LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); - - lmc_gpio_mkoutput(sc, LMC_GEP_MODE | LMC_GEP_DP | LMC_GEP_RESET); - - /* - * Wait at least 10 us 20 to be safe - */ - udelay(50); - - /* - * Clear reset and activate programming lines - * Reset: Input - * DP: Input - * Clock: Output - * Data: Output - * Mode: Output - */ - lmc_gpio_mkinput(sc, LMC_GEP_DP | LMC_GEP_RESET); - - /* - * Set LOAD, DATA, Clock to 1 - */ - sc->lmc_gpio = 0x00; - sc->lmc_gpio |= LMC_GEP_MODE; - sc->lmc_gpio |= LMC_GEP_DATA; - sc->lmc_gpio |= LMC_GEP_CLK; - LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); - - lmc_gpio_mkoutput(sc, LMC_GEP_DATA | LMC_GEP_CLK | LMC_GEP_MODE ); - - /* - * busy wait for the chip to reset - */ - while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 && - (timeout-- > 0)) - cpu_relax(); - - printk(KERN_DEBUG "%s: Waited %d for the Xilinx to clear its memory\n", dev->name, 500000-timeout); - - for(pos = 0; pos < xc.len; pos++){ - switch(data[pos]){ - case 0: - sc->lmc_gpio &= ~LMC_GEP_DATA; /* Data is 0 */ - break; - case 1: - sc->lmc_gpio |= LMC_GEP_DATA; /* Data is 1 */ - break; - default: - printk(KERN_WARNING "%s Bad data in xilinx programming data at %d, got %d wanted 0 or 1\n", dev->name, pos, data[pos]); - sc->lmc_gpio |= LMC_GEP_DATA; /* Assume it's 1 */ - } - sc->lmc_gpio &= ~LMC_GEP_CLK; /* Clock to zero */ - sc->lmc_gpio |= LMC_GEP_MODE; - LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); - udelay(1); - - sc->lmc_gpio |= LMC_GEP_CLK; /* Put the clack back to one */ - sc->lmc_gpio |= LMC_GEP_MODE; - LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); - udelay(1); - } - if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0){ - printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (corrupted data)\n", dev->name); - } - else if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_DP) == 0){ - printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (done)\n", dev->name); - } - else { - printk(KERN_DEBUG "%s: Done reprogramming Xilinx, %d bits, good luck!\n", dev->name, pos); - } - - lmc_gpio_mkinput(sc, 0xff); - - sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET; - lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16); - - sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET; - lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16); - spin_unlock_irqrestore(&sc->lmc_lock, flags); - - kfree(data); - - ret = 0; - - break; - } - default: /*fold02*/ - ret = -EBADE; - break; - } - - netif_wake_queue(dev); - sc->lmc_txfull = 0; - - } - break; - default: - break; - } - - return ret; -} - - -/* the watchdog process that cruises around */ -static void lmc_watchdog(struct timer_list *t) /*fold00*/ -{ - lmc_softc_t *sc = from_timer(sc, t, timer); - struct net_device *dev = sc->lmc_device; - int link_status; - u32 ticks; - unsigned long flags; - - spin_lock_irqsave(&sc->lmc_lock, flags); - - if(sc->check != 0xBEAFCAFE){ - printk("LMC: Corrupt net_device struct, breaking out\n"); - spin_unlock_irqrestore(&sc->lmc_lock, flags); - return; - } - - - /* Make sure the tx jabber and rx watchdog are off, - * and the transmit and receive processes are running. - */ - - LMC_CSR_WRITE (sc, csr_15, 0x00000011); - sc->lmc_cmdmode |= TULIP_CMD_TXRUN | TULIP_CMD_RXRUN; - LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode); - - if (sc->lmc_ok == 0) - goto kick_timer; - - LMC_EVENT_LOG(LMC_EVENT_WATCHDOG, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16)); - - /* --- begin time out check ----------------------------------- - * check for a transmit interrupt timeout - * Has the packet xmt vs xmt serviced threshold been exceeded */ - if (sc->lmc_taint_tx == sc->lastlmc_taint_tx && - sc->lmc_device->stats.tx_packets > sc->lasttx_packets && - sc->tx_TimeoutInd == 0) - { - - /* wait for the watchdog to come around again */ - sc->tx_TimeoutInd = 1; - } - else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx && - sc->lmc_device->stats.tx_packets > sc->lasttx_packets && - sc->tx_TimeoutInd) - { - - LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0); - - sc->tx_TimeoutDisplay = 1; - sc->extra_stats.tx_TimeoutCnt++; - - /* DEC chip is stuck, hit it with a RESET!!!! */ - lmc_running_reset (dev); - - - /* look at receive & transmit process state to make sure they are running */ - LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0); - - /* look at: DSR - 02 for Reg 16 - * CTS - 08 - * DCD - 10 - * RI - 20 - * for Reg 17 - */ - LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg (sc, 0, 16), lmc_mii_readreg (sc, 0, 17)); - - /* reset the transmit timeout detection flag */ - sc->tx_TimeoutInd = 0; - sc->lastlmc_taint_tx = sc->lmc_taint_tx; - sc->lasttx_packets = sc->lmc_device->stats.tx_packets; - } else { - sc->tx_TimeoutInd = 0; - sc->lastlmc_taint_tx = sc->lmc_taint_tx; - sc->lasttx_packets = sc->lmc_device->stats.tx_packets; - } - - /* --- end time out check ----------------------------------- */ - - - link_status = sc->lmc_media->get_link_status (sc); - - /* - * hardware level link lost, but the interface is marked as up. - * Mark it as down. - */ - if ((link_status == 0) && (sc->last_link_status != 0)) { - printk(KERN_WARNING "%s: hardware/physical link down\n", dev->name); - sc->last_link_status = 0; - /* lmc_reset (sc); Why reset??? The link can go down ok */ - - /* Inform the world that link has been lost */ - netif_carrier_off(dev); - } - - /* - * hardware link is up, but the interface is marked as down. - * Bring it back up again. - */ - if (link_status != 0 && sc->last_link_status == 0) { - printk(KERN_WARNING "%s: hardware/physical link up\n", dev->name); - sc->last_link_status = 1; - /* lmc_reset (sc); Again why reset??? */ - - netif_carrier_on(dev); - } - - /* Call media specific watchdog functions */ - sc->lmc_media->watchdog(sc); - - /* - * Poke the transmitter to make sure it - * never stops, even if we run out of mem - */ - LMC_CSR_WRITE(sc, csr_rxpoll, 0); - - /* - * Check for code that failed - * and try and fix it as appropriate - */ - if(sc->failed_ring == 1){ - /* - * Failed to setup the recv/xmit rin - * Try again - */ - sc->failed_ring = 0; - lmc_softreset(sc); - } - if(sc->failed_recv_alloc == 1){ - /* - * We failed to alloc mem in the - * interrupt handler, go through the rings - * and rebuild them - */ - sc->failed_recv_alloc = 0; - lmc_softreset(sc); - } - - - /* - * remember the timer value - */ -kick_timer: - - ticks = LMC_CSR_READ (sc, csr_gp_timer); - LMC_CSR_WRITE (sc, csr_gp_timer, 0xffffffffUL); - sc->ictl.ticks = 0x0000ffff - (ticks & 0x0000ffff); - - /* - * restart this timer. - */ - sc->timer.expires = jiffies + (HZ); - add_timer (&sc->timer); - - spin_unlock_irqrestore(&sc->lmc_lock, flags); -} - -static int lmc_attach(struct net_device *dev, unsigned short encoding, - unsigned short parity) -{ - if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT) - return 0; - return -EINVAL; -} - -static const struct net_device_ops lmc_ops = { - .ndo_open = lmc_open, - .ndo_stop = lmc_close, - .ndo_start_xmit = hdlc_start_xmit, - .ndo_siocwandev = hdlc_ioctl, - .ndo_siocdevprivate = lmc_siocdevprivate, - .ndo_tx_timeout = lmc_driver_timeout, - .ndo_get_stats = lmc_get_stats, -}; - -static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - lmc_softc_t *sc; - struct net_device *dev; - u16 subdevice; - u16 AdapModelNum; - int err; - static int cards_found; - - err = pcim_enable_device(pdev); - if (err) { - printk(KERN_ERR "lmc: pci enable failed: %d\n", err); - return err; - } - - err = pci_request_regions(pdev, "lmc"); - if (err) { - printk(KERN_ERR "lmc: pci_request_region failed\n"); - return err; - } - - /* - * Allocate our own device structure - */ - sc = devm_kzalloc(&pdev->dev, sizeof(lmc_softc_t), GFP_KERNEL); - if (!sc) - return -ENOMEM; - - dev = alloc_hdlcdev(sc); - if (!dev) { - printk(KERN_ERR "lmc:alloc_netdev for device failed\n"); - return -ENOMEM; - } - - - dev->type = ARPHRD_HDLC; - dev_to_hdlc(dev)->xmit = lmc_start_xmit; - dev_to_hdlc(dev)->attach = lmc_attach; - dev->netdev_ops = &lmc_ops; - dev->watchdog_timeo = HZ; /* 1 second */ - dev->tx_queue_len = 100; - sc->lmc_device = dev; - sc->name = dev->name; - sc->if_type = LMC_PPP; - sc->check = 0xBEAFCAFE; - dev->base_addr = pci_resource_start(pdev, 0); - dev->irq = pdev->irq; - pci_set_drvdata(pdev, dev); - SET_NETDEV_DEV(dev, &pdev->dev); - - /* - * This will get the protocol layer ready and do any 1 time init's - * Must have a valid sc and dev structure - */ - lmc_proto_attach(sc); - - /* Init the spin lock so can call it latter */ - - spin_lock_init(&sc->lmc_lock); - pci_set_master(pdev); - - printk(KERN_INFO "hdlc: detected at %lx, irq %d\n", - dev->base_addr, dev->irq); - - err = register_hdlc_device(dev); - if (err) { - printk(KERN_ERR "%s: register_netdev failed.\n", dev->name); - free_netdev(dev); - return err; - } - - sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN; - sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT; - - /* - * - * Check either the subvendor or the subdevice, some systems reverse - * the setting in the bois, seems to be version and arch dependent? - * Fix the error, exchange the two values - */ - if ((subdevice = pdev->subsystem_device) == PCI_VENDOR_ID_LMC) - subdevice = pdev->subsystem_vendor; - - switch (subdevice) { - case PCI_DEVICE_ID_LMC_HSSI: - printk(KERN_INFO "%s: LMC HSSI\n", dev->name); - sc->lmc_cardtype = LMC_CARDTYPE_HSSI; - sc->lmc_media = &lmc_hssi_media; - break; - case PCI_DEVICE_ID_LMC_DS3: - printk(KERN_INFO "%s: LMC DS3\n", dev->name); - sc->lmc_cardtype = LMC_CARDTYPE_DS3; - sc->lmc_media = &lmc_ds3_media; - break; - case PCI_DEVICE_ID_LMC_SSI: - printk(KERN_INFO "%s: LMC SSI\n", dev->name); - sc->lmc_cardtype = LMC_CARDTYPE_SSI; - sc->lmc_media = &lmc_ssi_media; - break; - case PCI_DEVICE_ID_LMC_T1: - printk(KERN_INFO "%s: LMC T1\n", dev->name); - sc->lmc_cardtype = LMC_CARDTYPE_T1; - sc->lmc_media = &lmc_t1_media; - break; - default: - printk(KERN_WARNING "%s: LMC UNKNOWN CARD!\n", dev->name); - unregister_hdlc_device(dev); - return -EIO; - break; - } - - lmc_initcsrs (sc, dev->base_addr, 8); - - lmc_gpio_mkinput (sc, 0xff); - sc->lmc_gpio = 0; /* drive no signals yet */ - - sc->lmc_media->defaults (sc); - - sc->lmc_media->set_link_status (sc, LMC_LINK_UP); - - /* verify that the PCI Sub System ID matches the Adapter Model number - * from the MII register - */ - AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4; - - if ((AdapModelNum != LMC_ADAP_T1 || /* detect LMC1200 */ - subdevice != PCI_DEVICE_ID_LMC_T1) && - (AdapModelNum != LMC_ADAP_SSI || /* detect LMC1000 */ - subdevice != PCI_DEVICE_ID_LMC_SSI) && - (AdapModelNum != LMC_ADAP_DS3 || /* detect LMC5245 */ - subdevice != PCI_DEVICE_ID_LMC_DS3) && - (AdapModelNum != LMC_ADAP_HSSI || /* detect LMC5200 */ - subdevice != PCI_DEVICE_ID_LMC_HSSI)) - printk(KERN_WARNING "%s: Model number (%d) miscompare for PCI" - " Subsystem ID = 0x%04x\n", - dev->name, AdapModelNum, subdevice); - - /* - * reset clock - */ - LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL); - - sc->board_idx = cards_found++; - sc->extra_stats.check = STATCHECK; - sc->extra_stats.version_size = (DRIVER_VERSION << 16) + - sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats); - sc->extra_stats.lmc_cardtype = sc->lmc_cardtype; - - sc->lmc_ok = 0; - sc->last_link_status = 0; - - return 0; -} - -/* - * Called from pci when removing module. - */ -static void lmc_remove_one(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - - if (dev) { - printk(KERN_DEBUG "%s: removing...\n", dev->name); - unregister_hdlc_device(dev); - free_netdev(dev); - } -} - -/* After this is called, packets can be sent. - * Does not initialize the addresses - */ -static int lmc_open(struct net_device *dev) -{ - lmc_softc_t *sc = dev_to_sc(dev); - int err; - - lmc_led_on(sc, LMC_DS3_LED0); - - lmc_dec_reset(sc); - lmc_reset(sc); - - LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ(sc, csr_status), 0); - LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg(sc, 0, 16), - lmc_mii_readreg(sc, 0, 17)); - - if (sc->lmc_ok) - return 0; - - lmc_softreset (sc); - - /* Since we have to use PCI bus, this should work on x86,alpha,ppc */ - if (request_irq (dev->irq, lmc_interrupt, IRQF_SHARED, dev->name, dev)){ - printk(KERN_WARNING "%s: could not get irq: %d\n", dev->name, dev->irq); - return -EAGAIN; - } - sc->got_irq = 1; - - /* Assert Terminal Active */ - sc->lmc_miireg16 |= LMC_MII16_LED_ALL; - sc->lmc_media->set_link_status (sc, LMC_LINK_UP); - - /* - * reset to last state. - */ - sc->lmc_media->set_status (sc, NULL); - - /* setup default bits to be used in tulip_desc_t transmit descriptor - * -baz */ - sc->TxDescriptControlInit = ( - LMC_TDES_INTERRUPT_ON_COMPLETION - | LMC_TDES_FIRST_SEGMENT - | LMC_TDES_LAST_SEGMENT - | LMC_TDES_SECOND_ADDR_CHAINED - | LMC_TDES_DISABLE_PADDING - ); - - if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16) { - /* disable 32 bit CRC generated by ASIC */ - sc->TxDescriptControlInit |= LMC_TDES_ADD_CRC_DISABLE; - } - sc->lmc_media->set_crc_length(sc, sc->ictl.crc_length); - /* Acknoledge the Terminal Active and light LEDs */ - - /* dev->flags |= IFF_UP; */ - - if ((err = lmc_proto_open(sc)) != 0) - return err; - - netif_start_queue(dev); - sc->extra_stats.tx_tbusy0++; - - /* - * select what interrupts we want to get - */ - sc->lmc_intrmask = 0; - /* Should be using the default interrupt mask defined in the .h file. */ - sc->lmc_intrmask |= (TULIP_STS_NORMALINTR - | TULIP_STS_RXINTR - | TULIP_STS_TXINTR - | TULIP_STS_ABNRMLINTR - | TULIP_STS_SYSERROR - | TULIP_STS_TXSTOPPED - | TULIP_STS_TXUNDERFLOW - | TULIP_STS_RXSTOPPED - | TULIP_STS_RXNOBUF - ); - LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask); - - sc->lmc_cmdmode |= TULIP_CMD_TXRUN; - sc->lmc_cmdmode |= TULIP_CMD_RXRUN; - LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode); - - sc->lmc_ok = 1; /* Run watchdog */ - - /* - * Set the if up now - pfb - */ - - sc->last_link_status = 1; - - /* - * Setup a timer for the watchdog on probe, and start it running. - * Since lmc_ok == 0, it will be a NOP for now. - */ - timer_setup(&sc->timer, lmc_watchdog, 0); - sc->timer.expires = jiffies + HZ; - add_timer (&sc->timer); - - return 0; -} - -/* Total reset to compensate for the AdTran DSU doing bad things - * under heavy load - */ - -static void lmc_running_reset (struct net_device *dev) /*fold00*/ -{ - lmc_softc_t *sc = dev_to_sc(dev); - - /* stop interrupts */ - /* Clear the interrupt mask */ - LMC_CSR_WRITE (sc, csr_intr, 0x00000000); - - lmc_dec_reset (sc); - lmc_reset (sc); - lmc_softreset (sc); - /* sc->lmc_miireg16 |= LMC_MII16_LED_ALL; */ - sc->lmc_media->set_link_status (sc, 1); - sc->lmc_media->set_status (sc, NULL); - - netif_wake_queue(dev); - - sc->lmc_txfull = 0; - sc->extra_stats.tx_tbusy0++; - - sc->lmc_intrmask = TULIP_DEFAULT_INTR_MASK; - LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask); - - sc->lmc_cmdmode |= (TULIP_CMD_TXRUN | TULIP_CMD_RXRUN); - LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode); -} - - -/* This is what is called when you ifconfig down a device. - * This disables the timer for the watchdog and keepalives, - * and disables the irq for dev. - */ -static int lmc_close(struct net_device *dev) -{ - /* not calling release_region() as we should */ - lmc_softc_t *sc = dev_to_sc(dev); - - sc->lmc_ok = 0; - sc->lmc_media->set_link_status (sc, 0); - del_timer (&sc->timer); - lmc_proto_close(sc); - lmc_ifdown (dev); - - return 0; -} - -/* Ends the transfer of packets */ -/* When the interface goes down, this is called */ -static int lmc_ifdown (struct net_device *dev) /*fold00*/ -{ - lmc_softc_t *sc = dev_to_sc(dev); - u32 csr6; - int i; - - /* Don't let anything else go on right now */ - // dev->start = 0; - netif_stop_queue(dev); - sc->extra_stats.tx_tbusy1++; - - /* stop interrupts */ - /* Clear the interrupt mask */ - LMC_CSR_WRITE (sc, csr_intr, 0x00000000); - - /* Stop Tx and Rx on the chip */ - csr6 = LMC_CSR_READ (sc, csr_command); - csr6 &= ~LMC_DEC_ST; /* Turn off the Transmission bit */ - csr6 &= ~LMC_DEC_SR; /* Turn off the Receive bit */ - LMC_CSR_WRITE (sc, csr_command, csr6); - - sc->lmc_device->stats.rx_missed_errors += - LMC_CSR_READ(sc, csr_missed_frames) & 0xffff; - - /* release the interrupt */ - if(sc->got_irq == 1){ - free_irq (dev->irq, dev); - sc->got_irq = 0; - } - - /* free skbuffs in the Rx queue */ - for (i = 0; i < LMC_RXDESCS; i++) - { - struct sk_buff *skb = sc->lmc_rxq[i]; - sc->lmc_rxq[i] = NULL; - sc->lmc_rxring[i].status = 0; - sc->lmc_rxring[i].length = 0; - sc->lmc_rxring[i].buffer1 = 0xDEADBEEF; - if (skb != NULL) - dev_kfree_skb(skb); - sc->lmc_rxq[i] = NULL; - } - - for (i = 0; i < LMC_TXDESCS; i++) - { - if (sc->lmc_txq[i] != NULL) - dev_kfree_skb(sc->lmc_txq[i]); - sc->lmc_txq[i] = NULL; - } - - lmc_led_off (sc, LMC_MII16_LED_ALL); - - netif_wake_queue(dev); - sc->extra_stats.tx_tbusy0++; - - return 0; -} - -/* Interrupt handling routine. This will take an incoming packet, or clean - * up after a trasmit. - */ -static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/ -{ - struct net_device *dev = (struct net_device *) dev_instance; - lmc_softc_t *sc = dev_to_sc(dev); - u32 csr; - int i; - s32 stat; - unsigned int badtx; - int max_work = LMC_RXDESCS; - int handled = 0; - - spin_lock(&sc->lmc_lock); - - /* - * Read the csr to find what interrupts we have (if any) - */ - csr = LMC_CSR_READ (sc, csr_status); - - /* - * Make sure this is our interrupt - */ - if ( ! (csr & sc->lmc_intrmask)) { - goto lmc_int_fail_out; - } - - /* always go through this loop at least once */ - while (csr & sc->lmc_intrmask) { - handled = 1; - - /* - * Clear interrupt bits, we handle all case below - */ - LMC_CSR_WRITE (sc, csr_status, csr); - - /* - * One of - * - Transmit process timed out CSR5<1> - * - Transmit jabber timeout CSR5<3> - * - Transmit underflow CSR5<5> - * - Transmit Receiver buffer unavailable CSR5<7> - * - Receive process stopped CSR5<8> - * - Receive watchdog timeout CSR5<9> - * - Early transmit interrupt CSR5<10> - * - * Is this really right? Should we do a running reset for jabber? - * (being a WAN card and all) - */ - if (csr & TULIP_STS_ABNRMLINTR){ - lmc_running_reset (dev); - break; - } - - if (csr & TULIP_STS_RXINTR) - lmc_rx (dev); - - if (csr & (TULIP_STS_TXINTR | TULIP_STS_TXNOBUF | TULIP_STS_TXSTOPPED)) { - - int n_compl = 0 ; - /* reset the transmit timeout detection flag -baz */ - sc->extra_stats.tx_NoCompleteCnt = 0; - - badtx = sc->lmc_taint_tx; - i = badtx % LMC_TXDESCS; - - while ((badtx < sc->lmc_next_tx)) { - stat = sc->lmc_txring[i].status; - - LMC_EVENT_LOG (LMC_EVENT_XMTINT, stat, - sc->lmc_txring[i].length); - /* - * If bit 31 is 1 the tulip owns it break out of the loop - */ - if (stat & 0x80000000) - break; - - n_compl++ ; /* i.e., have an empty slot in ring */ - /* - * If we have no skbuff or have cleared it - * Already continue to the next buffer - */ - if (sc->lmc_txq[i] == NULL) - continue; - - /* - * Check the total error summary to look for any errors - */ - if (stat & 0x8000) { - sc->lmc_device->stats.tx_errors++; - if (stat & 0x4104) - sc->lmc_device->stats.tx_aborted_errors++; - if (stat & 0x0C00) - sc->lmc_device->stats.tx_carrier_errors++; - if (stat & 0x0200) - sc->lmc_device->stats.tx_window_errors++; - if (stat & 0x0002) - sc->lmc_device->stats.tx_fifo_errors++; - } else { - sc->lmc_device->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff; - - sc->lmc_device->stats.tx_packets++; - } - - dev_consume_skb_irq(sc->lmc_txq[i]); - sc->lmc_txq[i] = NULL; - - badtx++; - i = badtx % LMC_TXDESCS; - } - - if (sc->lmc_next_tx - badtx > LMC_TXDESCS) - { - printk ("%s: out of sync pointer\n", dev->name); - badtx += LMC_TXDESCS; - } - LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0); - sc->lmc_txfull = 0; - netif_wake_queue(dev); - sc->extra_stats.tx_tbusy0++; - - -#ifdef DEBUG - sc->extra_stats.dirtyTx = badtx; - sc->extra_stats.lmc_next_tx = sc->lmc_next_tx; - sc->extra_stats.lmc_txfull = sc->lmc_txfull; -#endif - sc->lmc_taint_tx = badtx; - - /* - * Why was there a break here??? - */ - } /* end handle transmit interrupt */ - - if (csr & TULIP_STS_SYSERROR) { - u32 error; - printk (KERN_WARNING "%s: system bus error csr: %#8.8x\n", dev->name, csr); - error = csr>>23 & 0x7; - switch(error){ - case 0x000: - printk(KERN_WARNING "%s: Parity Fault (bad)\n", dev->name); - break; - case 0x001: - printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name); - break; - case 0x002: - printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name); - break; - default: - printk(KERN_WARNING "%s: This bus error code was supposed to be reserved!\n", dev->name); - } - lmc_dec_reset (sc); - lmc_reset (sc); - LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0); - LMC_EVENT_LOG(LMC_EVENT_RESET2, - lmc_mii_readreg (sc, 0, 16), - lmc_mii_readreg (sc, 0, 17)); - - } - - - if(max_work-- <= 0) - break; - - /* - * Get current csr status to make sure - * we've cleared all interrupts - */ - csr = LMC_CSR_READ (sc, csr_status); - } /* end interrupt loop */ - LMC_EVENT_LOG(LMC_EVENT_INT, firstcsr, csr); - -lmc_int_fail_out: - - spin_unlock(&sc->lmc_lock); - - return IRQ_RETVAL(handled); -} - -static netdev_tx_t lmc_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - lmc_softc_t *sc = dev_to_sc(dev); - u32 flag; - int entry; - unsigned long flags; - - spin_lock_irqsave(&sc->lmc_lock, flags); - - /* normal path, tbusy known to be zero */ - - entry = sc->lmc_next_tx % LMC_TXDESCS; - - sc->lmc_txq[entry] = skb; - sc->lmc_txring[entry].buffer1 = virt_to_bus (skb->data); - - LMC_CONSOLE_LOG("xmit", skb->data, skb->len); - -#ifndef GCOM - /* If the queue is less than half full, don't interrupt */ - if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS / 2) - { - /* Do not interrupt on completion of this packet */ - flag = 0x60000000; - netif_wake_queue(dev); - } - else if (sc->lmc_next_tx - sc->lmc_taint_tx == LMC_TXDESCS / 2) - { - /* This generates an interrupt on completion of this packet */ - flag = 0xe0000000; - netif_wake_queue(dev); - } - else if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS - 1) - { - /* Do not interrupt on completion of this packet */ - flag = 0x60000000; - netif_wake_queue(dev); - } - else - { - /* This generates an interrupt on completion of this packet */ - flag = 0xe0000000; - sc->lmc_txfull = 1; - netif_stop_queue(dev); - } -#else - flag = LMC_TDES_INTERRUPT_ON_COMPLETION; - - if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1) - { /* ring full, go busy */ - sc->lmc_txfull = 1; - netif_stop_queue(dev); - sc->extra_stats.tx_tbusy1++; - LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0); - } -#endif - - - if (entry == LMC_TXDESCS - 1) /* last descriptor in ring */ - flag |= LMC_TDES_END_OF_RING; /* flag as such for Tulip */ - - /* don't pad small packets either */ - flag = sc->lmc_txring[entry].length = (skb->len) | flag | - sc->TxDescriptControlInit; - - /* set the transmit timeout flag to be checked in - * the watchdog timer handler. -baz - */ - - sc->extra_stats.tx_NoCompleteCnt++; - sc->lmc_next_tx++; - - /* give ownership to the chip */ - LMC_EVENT_LOG(LMC_EVENT_XMT, flag, entry); - sc->lmc_txring[entry].status = 0x80000000; - - /* send now! */ - LMC_CSR_WRITE (sc, csr_txpoll, 0); - - spin_unlock_irqrestore(&sc->lmc_lock, flags); - - return NETDEV_TX_OK; -} - - -static int lmc_rx(struct net_device *dev) -{ - lmc_softc_t *sc = dev_to_sc(dev); - int i; - int rx_work_limit = LMC_RXDESCS; - int rxIntLoopCnt; /* debug -baz */ - int localLengthErrCnt = 0; - long stat; - struct sk_buff *skb, *nsb; - u16 len; - - lmc_led_on(sc, LMC_DS3_LED3); - - rxIntLoopCnt = 0; /* debug -baz */ - - i = sc->lmc_next_rx % LMC_RXDESCS; - - while (((stat = sc->lmc_rxring[i].status) & LMC_RDES_OWN_BIT) != DESC_OWNED_BY_DC21X4) - { - rxIntLoopCnt++; /* debug -baz */ - len = ((stat & LMC_RDES_FRAME_LENGTH) >> RDES_FRAME_LENGTH_BIT_NUMBER); - if ((stat & 0x0300) != 0x0300) { /* Check first segment and last segment */ - if ((stat & 0x0000ffff) != 0x7fff) { - /* Oversized frame */ - sc->lmc_device->stats.rx_length_errors++; - goto skip_packet; - } - } - - if (stat & 0x00000008) { /* Catch a dribbling bit error */ - sc->lmc_device->stats.rx_errors++; - sc->lmc_device->stats.rx_frame_errors++; - goto skip_packet; - } - - - if (stat & 0x00000004) { /* Catch a CRC error by the Xilinx */ - sc->lmc_device->stats.rx_errors++; - sc->lmc_device->stats.rx_crc_errors++; - goto skip_packet; - } - - if (len > LMC_PKT_BUF_SZ) { - sc->lmc_device->stats.rx_length_errors++; - localLengthErrCnt++; - goto skip_packet; - } - - if (len < sc->lmc_crcSize + 2) { - sc->lmc_device->stats.rx_length_errors++; - sc->extra_stats.rx_SmallPktCnt++; - localLengthErrCnt++; - goto skip_packet; - } - - if(stat & 0x00004000){ - printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name); - } - - len -= sc->lmc_crcSize; - - skb = sc->lmc_rxq[i]; - - /* - * We ran out of memory at some point - * just allocate an skb buff and continue. - */ - - if (!skb) { - nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2); - if (nsb) { - sc->lmc_rxq[i] = nsb; - nsb->dev = dev; - sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb)); - } - sc->failed_recv_alloc = 1; - goto skip_packet; - } - - sc->lmc_device->stats.rx_packets++; - sc->lmc_device->stats.rx_bytes += len; - - LMC_CONSOLE_LOG("recv", skb->data, len); - - /* - * I'm not sure of the sanity of this - * Packets could be arriving at a constant - * 44.210mbits/sec and we're going to copy - * them into a new buffer?? - */ - - if(len > (LMC_MTU - (LMC_MTU>>2))){ /* len > LMC_MTU * 0.75 */ - /* - * If it's a large packet don't copy it just hand it up - */ - give_it_anyways: - - sc->lmc_rxq[i] = NULL; - sc->lmc_rxring[i].buffer1 = 0x0; - - skb_put (skb, len); - skb->protocol = lmc_proto_type(sc, skb); - skb_reset_mac_header(skb); - /* skb_reset_network_header(skb); */ - skb->dev = dev; - lmc_proto_netif(sc, skb); - - /* - * This skb will be destroyed by the upper layers, make a new one - */ - nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2); - if (nsb) { - sc->lmc_rxq[i] = nsb; - nsb->dev = dev; - sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb)); - /* Transferred to 21140 below */ - } - else { - /* - * We've run out of memory, stop trying to allocate - * memory and exit the interrupt handler - * - * The chip may run out of receivers and stop - * in which care we'll try to allocate the buffer - * again. (once a second) - */ - sc->extra_stats.rx_BuffAllocErr++; - LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len); - sc->failed_recv_alloc = 1; - goto skip_out_of_mem; - } - } - else { - nsb = dev_alloc_skb(len); - if(!nsb) { - goto give_it_anyways; - } - skb_copy_from_linear_data(skb, skb_put(nsb, len), len); - - nsb->protocol = lmc_proto_type(sc, nsb); - skb_reset_mac_header(nsb); - /* skb_reset_network_header(nsb); */ - nsb->dev = dev; - lmc_proto_netif(sc, nsb); - } - - skip_packet: - LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len); - sc->lmc_rxring[i].status = DESC_OWNED_BY_DC21X4; - - sc->lmc_next_rx++; - i = sc->lmc_next_rx % LMC_RXDESCS; - rx_work_limit--; - if (rx_work_limit < 0) - break; - } - - /* detect condition for LMC1000 where DSU cable attaches and fills - * descriptors with bogus packets - * - if (localLengthErrCnt > LMC_RXDESCS - 3) { - sc->extra_stats.rx_BadPktSurgeCnt++; - LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, localLengthErrCnt, - sc->extra_stats.rx_BadPktSurgeCnt); - } */ - - /* save max count of receive descriptors serviced */ - if (rxIntLoopCnt > sc->extra_stats.rxIntLoopCnt) - sc->extra_stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */ - -#ifdef DEBUG - if (rxIntLoopCnt == 0) - { - for (i = 0; i < LMC_RXDESCS; i++) - { - if ((sc->lmc_rxring[i].status & LMC_RDES_OWN_BIT) - != DESC_OWNED_BY_DC21X4) - { - rxIntLoopCnt++; - } - } - LMC_EVENT_LOG(LMC_EVENT_RCVEND, rxIntLoopCnt, 0); - } -#endif - - - lmc_led_off(sc, LMC_DS3_LED3); - -skip_out_of_mem: - return 0; -} - -static struct net_device_stats *lmc_get_stats(struct net_device *dev) -{ - lmc_softc_t *sc = dev_to_sc(dev); - unsigned long flags; - - spin_lock_irqsave(&sc->lmc_lock, flags); - - sc->lmc_device->stats.rx_missed_errors += LMC_CSR_READ(sc, csr_missed_frames) & 0xffff; - - spin_unlock_irqrestore(&sc->lmc_lock, flags); - - return &sc->lmc_device->stats; -} - -static struct pci_driver lmc_driver = { - .name = "lmc", - .id_table = lmc_pci_tbl, - .probe = lmc_init_one, - .remove = lmc_remove_one, -}; - -module_pci_driver(lmc_driver); - -unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno) /*fold00*/ -{ - int i; - int command = (0xf6 << 10) | (devaddr << 5) | regno; - int retval = 0; - - LMC_MII_SYNC (sc); - - for (i = 15; i >= 0; i--) - { - int dataval = (command & (1 << i)) ? 0x20000 : 0; - - LMC_CSR_WRITE (sc, csr_9, dataval); - lmc_delay (); - /* __SLOW_DOWN_IO; */ - LMC_CSR_WRITE (sc, csr_9, dataval | 0x10000); - lmc_delay (); - /* __SLOW_DOWN_IO; */ - } - - for (i = 19; i > 0; i--) - { - LMC_CSR_WRITE (sc, csr_9, 0x40000); - lmc_delay (); - /* __SLOW_DOWN_IO; */ - retval = (retval << 1) | ((LMC_CSR_READ (sc, csr_9) & 0x80000) ? 1 : 0); - LMC_CSR_WRITE (sc, csr_9, 0x40000 | 0x10000); - lmc_delay (); - /* __SLOW_DOWN_IO; */ - } - - return (retval >> 1) & 0xffff; -} - -void lmc_mii_writereg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno, unsigned data) /*fold00*/ -{ - int i = 32; - int command = (0x5002 << 16) | (devaddr << 23) | (regno << 18) | data; - - LMC_MII_SYNC (sc); - - i = 31; - while (i >= 0) - { - int datav; - - if (command & (1 << i)) - datav = 0x20000; - else - datav = 0x00000; - - LMC_CSR_WRITE (sc, csr_9, datav); - lmc_delay (); - /* __SLOW_DOWN_IO; */ - LMC_CSR_WRITE (sc, csr_9, (datav | 0x10000)); - lmc_delay (); - /* __SLOW_DOWN_IO; */ - i--; - } - - i = 2; - while (i > 0) - { - LMC_CSR_WRITE (sc, csr_9, 0x40000); - lmc_delay (); - /* __SLOW_DOWN_IO; */ - LMC_CSR_WRITE (sc, csr_9, 0x50000); - lmc_delay (); - /* __SLOW_DOWN_IO; */ - i--; - } -} - -static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/ -{ - int i; - - /* Initialize the receive rings and buffers. */ - sc->lmc_txfull = 0; - sc->lmc_next_rx = 0; - sc->lmc_next_tx = 0; - sc->lmc_taint_rx = 0; - sc->lmc_taint_tx = 0; - - /* - * Setup each one of the receiver buffers - * allocate an skbuff for each one, setup the descriptor table - * and point each buffer at the next one - */ - - for (i = 0; i < LMC_RXDESCS; i++) - { - struct sk_buff *skb; - - if (sc->lmc_rxq[i] == NULL) - { - skb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2); - if(skb == NULL){ - printk(KERN_WARNING "%s: Failed to allocate receiver ring, will try again\n", sc->name); - sc->failed_ring = 1; - break; - } - else{ - sc->lmc_rxq[i] = skb; - } - } - else - { - skb = sc->lmc_rxq[i]; - } - - skb->dev = sc->lmc_device; - - /* owned by 21140 */ - sc->lmc_rxring[i].status = 0x80000000; - - /* used to be PKT_BUF_SZ now uses skb since we lose some to head room */ - sc->lmc_rxring[i].length = skb_tailroom(skb); - - /* use to be tail which is dumb since you're thinking why write - * to the end of the packj,et but since there's nothing there tail == data - */ - sc->lmc_rxring[i].buffer1 = virt_to_bus (skb->data); - - /* This is fair since the structure is static and we have the next address */ - sc->lmc_rxring[i].buffer2 = virt_to_bus (&sc->lmc_rxring[i + 1]); - - } - - /* - * Sets end of ring - */ - if (i != 0) { - sc->lmc_rxring[i - 1].length |= 0x02000000; /* Set end of buffers flag */ - sc->lmc_rxring[i - 1].buffer2 = virt_to_bus(&sc->lmc_rxring[0]); /* Point back to the start */ - } - LMC_CSR_WRITE (sc, csr_rxlist, virt_to_bus (sc->lmc_rxring)); /* write base address */ - - /* Initialize the transmit rings and buffers */ - for (i = 0; i < LMC_TXDESCS; i++) - { - if (sc->lmc_txq[i] != NULL){ /* have buffer */ - dev_kfree_skb(sc->lmc_txq[i]); /* free it */ - sc->lmc_device->stats.tx_dropped++; /* We just dropped a packet */ - } - sc->lmc_txq[i] = NULL; - sc->lmc_txring[i].status = 0x00000000; - sc->lmc_txring[i].buffer2 = virt_to_bus (&sc->lmc_txring[i + 1]); - } - sc->lmc_txring[i - 1].buffer2 = virt_to_bus (&sc->lmc_txring[0]); - LMC_CSR_WRITE (sc, csr_txlist, virt_to_bus (sc->lmc_txring)); -} - -void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits) /*fold00*/ -{ - sc->lmc_gpio_io &= ~bits; - LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io)); -} - -void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits) /*fold00*/ -{ - sc->lmc_gpio_io |= bits; - LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io)); -} - -void lmc_led_on(lmc_softc_t * const sc, u32 led) /*fold00*/ -{ - if ((~sc->lmc_miireg16) & led) /* Already on! */ - return; - - sc->lmc_miireg16 &= ~led; - lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16); -} - -void lmc_led_off(lmc_softc_t * const sc, u32 led) /*fold00*/ -{ - if (sc->lmc_miireg16 & led) /* Already set don't do anything */ - return; - - sc->lmc_miireg16 |= led; - lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16); -} - -static void lmc_reset(lmc_softc_t * const sc) /*fold00*/ -{ - sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET; - lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16); - - sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET; - lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16); - - /* - * make some of the GPIO pins be outputs - */ - lmc_gpio_mkoutput(sc, LMC_GEP_RESET); - - /* - * RESET low to force state reset. This also forces - * the transmitter clock to be internal, but we expect to reset - * that later anyway. - */ - sc->lmc_gpio &= ~(LMC_GEP_RESET); - LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); - - /* - * hold for more than 10 microseconds - */ - udelay(50); - - /* - * stop driving Xilinx-related signals - */ - lmc_gpio_mkinput(sc, LMC_GEP_RESET); - - /* - * Call media specific init routine - */ - sc->lmc_media->init(sc); - - sc->extra_stats.resetCount++; -} - -static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/ -{ - u32 val; - - /* - * disable all interrupts - */ - sc->lmc_intrmask = 0; - LMC_CSR_WRITE(sc, csr_intr, sc->lmc_intrmask); - - /* - * Reset the chip with a software reset command. - * Wait 10 microseconds (actually 50 PCI cycles but at - * 33MHz that comes to two microseconds but wait a - * bit longer anyways) - */ - LMC_CSR_WRITE(sc, csr_busmode, TULIP_BUSMODE_SWRESET); - udelay(25); -#ifdef __sparc__ - sc->lmc_busmode = LMC_CSR_READ(sc, csr_busmode); - sc->lmc_busmode = 0x00100000; - sc->lmc_busmode &= ~TULIP_BUSMODE_SWRESET; - LMC_CSR_WRITE(sc, csr_busmode, sc->lmc_busmode); -#endif - sc->lmc_cmdmode = LMC_CSR_READ(sc, csr_command); - - /* - * We want: - * no ethernet address in frames we write - * disable padding (txdesc, padding disable) - * ignore runt frames (rdes0 bit 15) - * no receiver watchdog or transmitter jabber timer - * (csr15 bit 0,14 == 1) - * if using 16-bit CRC, turn off CRC (trans desc, crc disable) - */ - - sc->lmc_cmdmode |= ( TULIP_CMD_PROMISCUOUS - | TULIP_CMD_FULLDUPLEX - | TULIP_CMD_PASSBADPKT - | TULIP_CMD_NOHEARTBEAT - | TULIP_CMD_PORTSELECT - | TULIP_CMD_RECEIVEALL - | TULIP_CMD_MUSTBEONE - ); - sc->lmc_cmdmode &= ~( TULIP_CMD_OPERMODE - | TULIP_CMD_THRESHOLDCTL - | TULIP_CMD_STOREFWD - | TULIP_CMD_TXTHRSHLDCTL - ); - - LMC_CSR_WRITE(sc, csr_command, sc->lmc_cmdmode); - - /* - * disable receiver watchdog and transmit jabber - */ - val = LMC_CSR_READ(sc, csr_sia_general); - val |= (TULIP_WATCHDOG_TXDISABLE | TULIP_WATCHDOG_RXDISABLE); - LMC_CSR_WRITE(sc, csr_sia_general, val); -} - -static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, /*fold00*/ - size_t csr_size) -{ - sc->lmc_csrs.csr_busmode = csr_base + 0 * csr_size; - sc->lmc_csrs.csr_txpoll = csr_base + 1 * csr_size; - sc->lmc_csrs.csr_rxpoll = csr_base + 2 * csr_size; - sc->lmc_csrs.csr_rxlist = csr_base + 3 * csr_size; - sc->lmc_csrs.csr_txlist = csr_base + 4 * csr_size; - sc->lmc_csrs.csr_status = csr_base + 5 * csr_size; - sc->lmc_csrs.csr_command = csr_base + 6 * csr_size; - sc->lmc_csrs.csr_intr = csr_base + 7 * csr_size; - sc->lmc_csrs.csr_missed_frames = csr_base + 8 * csr_size; - sc->lmc_csrs.csr_9 = csr_base + 9 * csr_size; - sc->lmc_csrs.csr_10 = csr_base + 10 * csr_size; - sc->lmc_csrs.csr_11 = csr_base + 11 * csr_size; - sc->lmc_csrs.csr_12 = csr_base + 12 * csr_size; - sc->lmc_csrs.csr_13 = csr_base + 13 * csr_size; - sc->lmc_csrs.csr_14 = csr_base + 14 * csr_size; - sc->lmc_csrs.csr_15 = csr_base + 15 * csr_size; -} - -static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue) -{ - lmc_softc_t *sc = dev_to_sc(dev); - u32 csr6; - unsigned long flags; - - spin_lock_irqsave(&sc->lmc_lock, flags); - - printk("%s: Xmitter busy|\n", dev->name); - - sc->extra_stats.tx_tbusy_calls++; - if (time_is_before_jiffies(dev_trans_start(dev) + TX_TIMEOUT)) - goto bug_out; - - /* - * Chip seems to have locked up - * Reset it - * This whips out all our descriptor - * table and starts from scartch - */ - - LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO, - LMC_CSR_READ (sc, csr_status), - sc->extra_stats.tx_ProcTimeout); - - lmc_running_reset (dev); - - LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0); - LMC_EVENT_LOG(LMC_EVENT_RESET2, - lmc_mii_readreg (sc, 0, 16), - lmc_mii_readreg (sc, 0, 17)); - - /* restart the tx processes */ - csr6 = LMC_CSR_READ (sc, csr_command); - LMC_CSR_WRITE (sc, csr_command, csr6 | 0x0002); - LMC_CSR_WRITE (sc, csr_command, csr6 | 0x2002); - - /* immediate transmit */ - LMC_CSR_WRITE (sc, csr_txpoll, 0); - - sc->lmc_device->stats.tx_errors++; - sc->extra_stats.tx_ProcTimeout++; /* -baz */ - - netif_trans_update(dev); /* prevent tx timeout */ - -bug_out: - - spin_unlock_irqrestore(&sc->lmc_lock, flags); -} diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c deleted file mode 100644 index ec1ac7b1f3fd..000000000000 --- a/drivers/net/wan/lmc/lmc_media.c +++ /dev/null @@ -1,1206 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* $Id: lmc_media.c,v 1.13 2000/04/11 05:25:26 asj Exp $ */ - -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/timer.h> -#include <linux/ptrace.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/interrupt.h> -#include <linux/in.h> -#include <linux/if_arp.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/inet.h> -#include <linux/bitops.h> - -#include <asm/processor.h> /* Processor type for cache alignment. */ -#include <asm/io.h> -#include <asm/dma.h> - -#include <linux/uaccess.h> - -#include "lmc.h" -#include "lmc_var.h" -#include "lmc_ioctl.h" -#include "lmc_debug.h" - -#define CONFIG_LMC_IGNORE_HARDWARE_HANDSHAKE 1 - - /* - * Copyright (c) 1997-2000 LAN Media Corporation (LMC) - * All rights reserved. www.lanmedia.com - * - * This code is written by: - * Andrew Stanley-Jones (asj@cban.com) - * Rob Braun (bbraun@vix.com), - * Michael Graff (explorer@vix.com) and - * Matt Thomas (matt@3am-software.com). - */ - -/* - * protocol independent method. - */ -static void lmc_set_protocol (lmc_softc_t * const, lmc_ctl_t *); - -/* - * media independent methods to check on media status, link, light LEDs, - * etc. - */ -static void lmc_ds3_init (lmc_softc_t * const); -static void lmc_ds3_default (lmc_softc_t * const); -static void lmc_ds3_set_status (lmc_softc_t * const, lmc_ctl_t *); -static void lmc_ds3_set_100ft (lmc_softc_t * const, int); -static int lmc_ds3_get_link_status (lmc_softc_t * const); -static void lmc_ds3_set_crc_length (lmc_softc_t * const, int); -static void lmc_ds3_set_scram (lmc_softc_t * const, int); -static void lmc_ds3_watchdog (lmc_softc_t * const); - -static void lmc_hssi_init (lmc_softc_t * const); -static void lmc_hssi_default (lmc_softc_t * const); -static void lmc_hssi_set_status (lmc_softc_t * const, lmc_ctl_t *); -static void lmc_hssi_set_clock (lmc_softc_t * const, int); -static int lmc_hssi_get_link_status (lmc_softc_t * const); -static void lmc_hssi_set_link_status (lmc_softc_t * const, int); -static void lmc_hssi_set_crc_length (lmc_softc_t * const, int); -static void lmc_hssi_watchdog (lmc_softc_t * const); - -static void lmc_ssi_init (lmc_softc_t * const); -static void lmc_ssi_default (lmc_softc_t * const); -static void lmc_ssi_set_status (lmc_softc_t * const, lmc_ctl_t *); -static void lmc_ssi_set_clock (lmc_softc_t * const, int); -static void lmc_ssi_set_speed (lmc_softc_t * const, lmc_ctl_t *); -static int lmc_ssi_get_link_status (lmc_softc_t * const); -static void lmc_ssi_set_link_status (lmc_softc_t * const, int); -static void lmc_ssi_set_crc_length (lmc_softc_t * const, int); -static void lmc_ssi_watchdog (lmc_softc_t * const); - -static void lmc_t1_init (lmc_softc_t * const); -static void lmc_t1_default (lmc_softc_t * const); -static void lmc_t1_set_status (lmc_softc_t * const, lmc_ctl_t *); -static int lmc_t1_get_link_status (lmc_softc_t * const); -static void lmc_t1_set_circuit_type (lmc_softc_t * const, int); -static void lmc_t1_set_crc_length (lmc_softc_t * const, int); -static void lmc_t1_set_clock (lmc_softc_t * const, int); -static void lmc_t1_watchdog (lmc_softc_t * const); - -static void lmc_dummy_set_1 (lmc_softc_t * const, int); -static void lmc_dummy_set2_1 (lmc_softc_t * const, lmc_ctl_t *); - -static inline void write_av9110_bit (lmc_softc_t *, int); -static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32); - -lmc_media_t lmc_ds3_media = { - .init = lmc_ds3_init, /* special media init stuff */ - .defaults = lmc_ds3_default, /* reset to default state */ - .set_status = lmc_ds3_set_status, /* reset status to state provided */ - .set_clock_source = lmc_dummy_set_1, /* set clock source */ - .set_speed = lmc_dummy_set2_1, /* set line speed */ - .set_cable_length = lmc_ds3_set_100ft, /* set cable length */ - .set_scrambler = lmc_ds3_set_scram, /* set scrambler */ - .get_link_status = lmc_ds3_get_link_status, /* get link status */ - .set_link_status = lmc_dummy_set_1, /* set link status */ - .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */ - .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */ - .watchdog = lmc_ds3_watchdog -}; - -lmc_media_t lmc_hssi_media = { - .init = lmc_hssi_init, /* special media init stuff */ - .defaults = lmc_hssi_default, /* reset to default state */ - .set_status = lmc_hssi_set_status, /* reset status to state provided */ - .set_clock_source = lmc_hssi_set_clock, /* set clock source */ - .set_speed = lmc_dummy_set2_1, /* set line speed */ - .set_cable_length = lmc_dummy_set_1, /* set cable length */ - .set_scrambler = lmc_dummy_set_1, /* set scrambler */ - .get_link_status = lmc_hssi_get_link_status, /* get link status */ - .set_link_status = lmc_hssi_set_link_status, /* set link status */ - .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */ - .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */ - .watchdog = lmc_hssi_watchdog -}; - -lmc_media_t lmc_ssi_media = { - .init = lmc_ssi_init, /* special media init stuff */ - .defaults = lmc_ssi_default, /* reset to default state */ - .set_status = lmc_ssi_set_status, /* reset status to state provided */ - .set_clock_source = lmc_ssi_set_clock, /* set clock source */ - .set_speed = lmc_ssi_set_speed, /* set line speed */ - .set_cable_length = lmc_dummy_set_1, /* set cable length */ - .set_scrambler = lmc_dummy_set_1, /* set scrambler */ - .get_link_status = lmc_ssi_get_link_status, /* get link status */ - .set_link_status = lmc_ssi_set_link_status, /* set link status */ - .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */ - .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */ - .watchdog = lmc_ssi_watchdog -}; - -lmc_media_t lmc_t1_media = { - .init = lmc_t1_init, /* special media init stuff */ - .defaults = lmc_t1_default, /* reset to default state */ - .set_status = lmc_t1_set_status, /* reset status to state provided */ - .set_clock_source = lmc_t1_set_clock, /* set clock source */ - .set_speed = lmc_dummy_set2_1, /* set line speed */ - .set_cable_length = lmc_dummy_set_1, /* set cable length */ - .set_scrambler = lmc_dummy_set_1, /* set scrambler */ - .get_link_status = lmc_t1_get_link_status, /* get link status */ - .set_link_status = lmc_dummy_set_1, /* set link status */ - .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */ - .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */ - .watchdog = lmc_t1_watchdog -}; - -static void -lmc_dummy_set_1 (lmc_softc_t * const sc, int a) -{ -} - -static void -lmc_dummy_set2_1 (lmc_softc_t * const sc, lmc_ctl_t * a) -{ -} - -/* - * HSSI methods - */ - -static void -lmc_hssi_init (lmc_softc_t * const sc) -{ - sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC5200; - - lmc_gpio_mkoutput (sc, LMC_GEP_HSSI_CLOCK); -} - -static void -lmc_hssi_default (lmc_softc_t * const sc) -{ - sc->lmc_miireg16 = LMC_MII16_LED_ALL; - - sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN); - sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT); - sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16); -} - -/* - * Given a user provided state, set ourselves up to match it. This will - * always reset the card if needed. - */ -static void -lmc_hssi_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl) -{ - if (ctl == NULL) - { - sc->lmc_media->set_clock_source (sc, sc->ictl.clock_source); - lmc_set_protocol (sc, NULL); - - return; - } - - /* - * check for change in clock source - */ - if (ctl->clock_source && !sc->ictl.clock_source) - { - sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_INT); - sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_INT; - } - else if (!ctl->clock_source && sc->ictl.clock_source) - { - sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT; - sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT); - } - - lmc_set_protocol (sc, ctl); -} - -/* - * 1 == internal, 0 == external - */ -static void -lmc_hssi_set_clock (lmc_softc_t * const sc, int ie) -{ - int old; - old = sc->ictl.clock_source; - if (ie == LMC_CTL_CLOCK_SOURCE_EXT) - { - sc->lmc_gpio |= LMC_GEP_HSSI_CLOCK; - LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); - sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT; - if(old != ie) - printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS); - } - else - { - sc->lmc_gpio &= ~(LMC_GEP_HSSI_CLOCK); - LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); - sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT; - if(old != ie) - printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS); - } -} - -/* - * return hardware link status. - * 0 == link is down, 1 == link is up. - */ -static int -lmc_hssi_get_link_status (lmc_softc_t * const sc) -{ - /* - * We're using the same code as SSI since - * they're practically the same - */ - return lmc_ssi_get_link_status(sc); -} - -static void -lmc_hssi_set_link_status (lmc_softc_t * const sc, int state) -{ - if (state == LMC_LINK_UP) - sc->lmc_miireg16 |= LMC_MII16_HSSI_TA; - else - sc->lmc_miireg16 &= ~LMC_MII16_HSSI_TA; - - lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); -} - -/* - * 0 == 16bit, 1 == 32bit - */ -static void -lmc_hssi_set_crc_length (lmc_softc_t * const sc, int state) -{ - if (state == LMC_CTL_CRC_LENGTH_32) - { - /* 32 bit */ - sc->lmc_miireg16 |= LMC_MII16_HSSI_CRC; - sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32; - } - else - { - /* 16 bit */ - sc->lmc_miireg16 &= ~LMC_MII16_HSSI_CRC; - sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16; - } - - lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); -} - -static void -lmc_hssi_watchdog (lmc_softc_t * const sc) -{ - /* HSSI is blank */ -} - -/* - * DS3 methods - */ - -/* - * Set cable length - */ -static void -lmc_ds3_set_100ft (lmc_softc_t * const sc, int ie) -{ - if (ie == LMC_CTL_CABLE_LENGTH_GT_100FT) - { - sc->lmc_miireg16 &= ~LMC_MII16_DS3_ZERO; - sc->ictl.cable_length = LMC_CTL_CABLE_LENGTH_GT_100FT; - } - else if (ie == LMC_CTL_CABLE_LENGTH_LT_100FT) - { - sc->lmc_miireg16 |= LMC_MII16_DS3_ZERO; - sc->ictl.cable_length = LMC_CTL_CABLE_LENGTH_LT_100FT; - } - lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); -} - -static void -lmc_ds3_default (lmc_softc_t * const sc) -{ - sc->lmc_miireg16 = LMC_MII16_LED_ALL; - - sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN); - sc->lmc_media->set_cable_length (sc, LMC_CTL_CABLE_LENGTH_LT_100FT); - sc->lmc_media->set_scrambler (sc, LMC_CTL_OFF); - sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16); -} - -/* - * Given a user provided state, set ourselves up to match it. This will - * always reset the card if needed. - */ -static void -lmc_ds3_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl) -{ - if (ctl == NULL) - { - sc->lmc_media->set_cable_length (sc, sc->ictl.cable_length); - sc->lmc_media->set_scrambler (sc, sc->ictl.scrambler_onoff); - lmc_set_protocol (sc, NULL); - - return; - } - - /* - * check for change in cable length setting - */ - if (ctl->cable_length && !sc->ictl.cable_length) - lmc_ds3_set_100ft (sc, LMC_CTL_CABLE_LENGTH_GT_100FT); - else if (!ctl->cable_length && sc->ictl.cable_length) - lmc_ds3_set_100ft (sc, LMC_CTL_CABLE_LENGTH_LT_100FT); - - /* - * Check for change in scrambler setting (requires reset) - */ - if (ctl->scrambler_onoff && !sc->ictl.scrambler_onoff) - lmc_ds3_set_scram (sc, LMC_CTL_ON); - else if (!ctl->scrambler_onoff && sc->ictl.scrambler_onoff) - lmc_ds3_set_scram (sc, LMC_CTL_OFF); - - lmc_set_protocol (sc, ctl); -} - -static void -lmc_ds3_init (lmc_softc_t * const sc) -{ - int i; - - sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC5245; - - /* writes zeros everywhere */ - for (i = 0; i < 21; i++) - { - lmc_mii_writereg (sc, 0, 17, i); - lmc_mii_writereg (sc, 0, 18, 0); - } - - /* set some essential bits */ - lmc_mii_writereg (sc, 0, 17, 1); - lmc_mii_writereg (sc, 0, 18, 0x25); /* ser, xtx */ - - lmc_mii_writereg (sc, 0, 17, 5); - lmc_mii_writereg (sc, 0, 18, 0x80); /* emode */ - - lmc_mii_writereg (sc, 0, 17, 14); - lmc_mii_writereg (sc, 0, 18, 0x30); /* rcgen, tcgen */ - - /* clear counters and latched bits */ - for (i = 0; i < 21; i++) - { - lmc_mii_writereg (sc, 0, 17, i); - lmc_mii_readreg (sc, 0, 18); - } -} - -/* - * 1 == DS3 payload scrambled, 0 == not scrambled - */ -static void -lmc_ds3_set_scram (lmc_softc_t * const sc, int ie) -{ - if (ie == LMC_CTL_ON) - { - sc->lmc_miireg16 |= LMC_MII16_DS3_SCRAM; - sc->ictl.scrambler_onoff = LMC_CTL_ON; - } - else - { - sc->lmc_miireg16 &= ~LMC_MII16_DS3_SCRAM; - sc->ictl.scrambler_onoff = LMC_CTL_OFF; - } - lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); -} - -/* - * return hardware link status. - * 0 == link is down, 1 == link is up. - */ -static int -lmc_ds3_get_link_status (lmc_softc_t * const sc) -{ - u16 link_status, link_status_11; - int ret = 1; - - lmc_mii_writereg (sc, 0, 17, 7); - link_status = lmc_mii_readreg (sc, 0, 18); - - /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions - * led0 yellow = far-end adapter is in Red alarm condition - * led1 blue = received an Alarm Indication signal - * (upstream failure) - * led2 Green = power to adapter, Gate Array loaded & driver - * attached - * led3 red = Loss of Signal (LOS) or out of frame (OOF) - * conditions detected on T3 receive signal - */ - - lmc_led_on(sc, LMC_DS3_LED2); - - if ((link_status & LMC_FRAMER_REG0_DLOS) || - (link_status & LMC_FRAMER_REG0_OOFS)){ - ret = 0; - if(sc->last_led_err[3] != 1){ - u16 r1; - lmc_mii_writereg (sc, 0, 17, 01); /* Turn on Xbit error as our cisco does */ - r1 = lmc_mii_readreg (sc, 0, 18); - r1 &= 0xfe; - lmc_mii_writereg(sc, 0, 18, r1); - printk(KERN_WARNING "%s: Red Alarm - Loss of Signal or Loss of Framing\n", sc->name); - } - lmc_led_on(sc, LMC_DS3_LED3); /* turn on red LED */ - sc->last_led_err[3] = 1; - } - else { - lmc_led_off(sc, LMC_DS3_LED3); /* turn on red LED */ - if(sc->last_led_err[3] == 1){ - u16 r1; - lmc_mii_writereg (sc, 0, 17, 01); /* Turn off Xbit error */ - r1 = lmc_mii_readreg (sc, 0, 18); - r1 |= 0x01; - lmc_mii_writereg(sc, 0, 18, r1); - } - sc->last_led_err[3] = 0; - } - - lmc_mii_writereg(sc, 0, 17, 0x10); - link_status_11 = lmc_mii_readreg(sc, 0, 18); - if((link_status & LMC_FRAMER_REG0_AIS) || - (link_status_11 & LMC_FRAMER_REG10_XBIT)) { - ret = 0; - if(sc->last_led_err[0] != 1){ - printk(KERN_WARNING "%s: AIS Alarm or XBit Error\n", sc->name); - printk(KERN_WARNING "%s: Remote end has loss of signal or framing\n", sc->name); - } - lmc_led_on(sc, LMC_DS3_LED0); - sc->last_led_err[0] = 1; - } - else { - lmc_led_off(sc, LMC_DS3_LED0); - sc->last_led_err[0] = 0; - } - - lmc_mii_writereg (sc, 0, 17, 9); - link_status = lmc_mii_readreg (sc, 0, 18); - - if(link_status & LMC_FRAMER_REG9_RBLUE){ - ret = 0; - if(sc->last_led_err[1] != 1){ - printk(KERN_WARNING "%s: Blue Alarm - Receiving all 1's\n", sc->name); - } - lmc_led_on(sc, LMC_DS3_LED1); - sc->last_led_err[1] = 1; - } - else { - lmc_led_off(sc, LMC_DS3_LED1); - sc->last_led_err[1] = 0; - } - - return ret; -} - -/* - * 0 == 16bit, 1 == 32bit - */ -static void -lmc_ds3_set_crc_length (lmc_softc_t * const sc, int state) -{ - if (state == LMC_CTL_CRC_LENGTH_32) - { - /* 32 bit */ - sc->lmc_miireg16 |= LMC_MII16_DS3_CRC; - sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32; - } - else - { - /* 16 bit */ - sc->lmc_miireg16 &= ~LMC_MII16_DS3_CRC; - sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16; - } - - lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); -} - -static void -lmc_ds3_watchdog (lmc_softc_t * const sc) -{ - -} - - -/* - * SSI methods - */ - -static void lmc_ssi_init(lmc_softc_t * const sc) -{ - u16 mii17; - int cable; - - sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1000; - - mii17 = lmc_mii_readreg(sc, 0, 17); - - cable = (mii17 & LMC_MII17_SSI_CABLE_MASK) >> LMC_MII17_SSI_CABLE_SHIFT; - sc->ictl.cable_type = cable; - - lmc_gpio_mkoutput(sc, LMC_GEP_SSI_TXCLOCK); -} - -static void -lmc_ssi_default (lmc_softc_t * const sc) -{ - sc->lmc_miireg16 = LMC_MII16_LED_ALL; - - /* - * make TXCLOCK always be an output - */ - lmc_gpio_mkoutput (sc, LMC_GEP_SSI_TXCLOCK); - - sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN); - sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT); - sc->lmc_media->set_speed (sc, NULL); - sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16); -} - -/* - * Given a user provided state, set ourselves up to match it. This will - * always reset the card if needed. - */ -static void -lmc_ssi_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl) -{ - if (ctl == NULL) - { - sc->lmc_media->set_clock_source (sc, sc->ictl.clock_source); - sc->lmc_media->set_speed (sc, &sc->ictl); - lmc_set_protocol (sc, NULL); - - return; - } - - /* - * check for change in clock source - */ - if (ctl->clock_source == LMC_CTL_CLOCK_SOURCE_INT - && sc->ictl.clock_source == LMC_CTL_CLOCK_SOURCE_EXT) - { - sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_INT); - sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_INT; - } - else if (ctl->clock_source == LMC_CTL_CLOCK_SOURCE_EXT - && sc->ictl.clock_source == LMC_CTL_CLOCK_SOURCE_INT) - { - sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT); - sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT; - } - - if (ctl->clock_rate != sc->ictl.clock_rate) - sc->lmc_media->set_speed (sc, ctl); - - lmc_set_protocol (sc, ctl); -} - -/* - * 1 == internal, 0 == external - */ -static void -lmc_ssi_set_clock (lmc_softc_t * const sc, int ie) -{ - int old; - old = ie; - if (ie == LMC_CTL_CLOCK_SOURCE_EXT) - { - sc->lmc_gpio &= ~(LMC_GEP_SSI_TXCLOCK); - LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); - sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT; - if(ie != old) - printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS); - } - else - { - sc->lmc_gpio |= LMC_GEP_SSI_TXCLOCK; - LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); - sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT; - if(ie != old) - printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS); - } -} - -static void -lmc_ssi_set_speed (lmc_softc_t * const sc, lmc_ctl_t * ctl) -{ - lmc_ctl_t *ictl = &sc->ictl; - lmc_av9110_t *av; - - /* original settings for clock rate of: - * 100 Khz (8,25,0,0,2) were incorrect - * they should have been 80,125,1,3,3 - * There are 17 param combinations to produce this freq. - * For 1.5 Mhz use 120,100,1,1,2 (226 param. combinations) - */ - if (ctl == NULL) - { - av = &ictl->cardspec.ssi; - ictl->clock_rate = 1500000; - av->f = ictl->clock_rate; - av->n = 120; - av->m = 100; - av->v = 1; - av->x = 1; - av->r = 2; - - write_av9110 (sc, av->n, av->m, av->v, av->x, av->r); - return; - } - - av = &ctl->cardspec.ssi; - - if (av->f == 0) - return; - - ictl->clock_rate = av->f; /* really, this is the rate we are */ - ictl->cardspec.ssi = *av; - - write_av9110 (sc, av->n, av->m, av->v, av->x, av->r); -} - -/* - * return hardware link status. - * 0 == link is down, 1 == link is up. - */ -static int -lmc_ssi_get_link_status (lmc_softc_t * const sc) -{ - u16 link_status; - u32 ticks; - int ret = 1; - int hw_hdsk = 1; - - /* - * missing CTS? Hmm. If we require CTS on, we may never get the - * link to come up, so omit it in this test. - * - * Also, it seems that with a loopback cable, DCD isn't asserted, - * so just check for things like this: - * DSR _must_ be asserted. - * One of DCD or CTS must be asserted. - */ - - /* LMC 1000 (SSI) LED definitions - * led0 Green = power to adapter, Gate Array loaded & - * driver attached - * led1 Green = DSR and DTR and RTS and CTS are set - * led2 Green = Cable detected - * led3 red = No timing is available from the - * cable or the on-board frequency - * generator. - */ - - link_status = lmc_mii_readreg (sc, 0, 16); - - /* Is the transmit clock still available */ - ticks = LMC_CSR_READ (sc, csr_gp_timer); - ticks = 0x0000ffff - (ticks & 0x0000ffff); - - lmc_led_on (sc, LMC_MII16_LED0); - - /* ====== transmit clock determination ===== */ - if (sc->lmc_timing == LMC_CTL_CLOCK_SOURCE_INT) { - lmc_led_off(sc, LMC_MII16_LED3); - } - else if (ticks == 0 ) { /* no clock found ? */ - ret = 0; - if (sc->last_led_err[3] != 1) { - sc->extra_stats.tx_lossOfClockCnt++; - printk(KERN_WARNING "%s: Lost Clock, Link Down\n", sc->name); - } - sc->last_led_err[3] = 1; - lmc_led_on (sc, LMC_MII16_LED3); /* turn ON red LED */ - } - else { - if(sc->last_led_err[3] == 1) - printk(KERN_WARNING "%s: Clock Returned\n", sc->name); - sc->last_led_err[3] = 0; - lmc_led_off (sc, LMC_MII16_LED3); /* turn OFF red LED */ - } - - if ((link_status & LMC_MII16_SSI_DSR) == 0) { /* Also HSSI CA */ - ret = 0; - hw_hdsk = 0; - } - -#ifdef CONFIG_LMC_IGNORE_HARDWARE_HANDSHAKE - if ((link_status & (LMC_MII16_SSI_CTS | LMC_MII16_SSI_DCD)) == 0){ - ret = 0; - hw_hdsk = 0; - } -#endif - - if(hw_hdsk == 0){ - if(sc->last_led_err[1] != 1) - printk(KERN_WARNING "%s: DSR not asserted\n", sc->name); - sc->last_led_err[1] = 1; - lmc_led_off(sc, LMC_MII16_LED1); - } - else { - if(sc->last_led_err[1] != 0) - printk(KERN_WARNING "%s: DSR now asserted\n", sc->name); - sc->last_led_err[1] = 0; - lmc_led_on(sc, LMC_MII16_LED1); - } - - if(ret == 1) { - lmc_led_on(sc, LMC_MII16_LED2); /* Over all good status? */ - } - - return ret; -} - -static void -lmc_ssi_set_link_status (lmc_softc_t * const sc, int state) -{ - if (state == LMC_LINK_UP) - { - sc->lmc_miireg16 |= (LMC_MII16_SSI_DTR | LMC_MII16_SSI_RTS); - printk (LMC_PRINTF_FMT ": asserting DTR and RTS\n", LMC_PRINTF_ARGS); - } - else - { - sc->lmc_miireg16 &= ~(LMC_MII16_SSI_DTR | LMC_MII16_SSI_RTS); - printk (LMC_PRINTF_FMT ": deasserting DTR and RTS\n", LMC_PRINTF_ARGS); - } - - lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); - -} - -/* - * 0 == 16bit, 1 == 32bit - */ -static void -lmc_ssi_set_crc_length (lmc_softc_t * const sc, int state) -{ - if (state == LMC_CTL_CRC_LENGTH_32) - { - /* 32 bit */ - sc->lmc_miireg16 |= LMC_MII16_SSI_CRC; - sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32; - sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_4; - - } - else - { - /* 16 bit */ - sc->lmc_miireg16 &= ~LMC_MII16_SSI_CRC; - sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16; - sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_2; - } - - lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); -} - -/* - * These are bits to program the ssi frequency generator - */ -static inline void -write_av9110_bit (lmc_softc_t * sc, int c) -{ - /* - * set the data bit as we need it. - */ - sc->lmc_gpio &= ~(LMC_GEP_CLK); - if (c & 0x01) - sc->lmc_gpio |= LMC_GEP_DATA; - else - sc->lmc_gpio &= ~(LMC_GEP_DATA); - LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); - - /* - * set the clock to high - */ - sc->lmc_gpio |= LMC_GEP_CLK; - LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); - - /* - * set the clock to low again. - */ - sc->lmc_gpio &= ~(LMC_GEP_CLK); - LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); -} - -static void write_av9110(lmc_softc_t *sc, u32 n, u32 m, u32 v, u32 x, u32 r) -{ - int i; - -#if 0 - printk (LMC_PRINTF_FMT ": speed %u, %d %d %d %d %d\n", - LMC_PRINTF_ARGS, sc->ictl.clock_rate, n, m, v, x, r); -#endif - - sc->lmc_gpio |= LMC_GEP_SSI_GENERATOR; - sc->lmc_gpio &= ~(LMC_GEP_DATA | LMC_GEP_CLK); - LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); - - /* - * Set the TXCLOCK, GENERATOR, SERIAL, and SERIALCLK - * as outputs. - */ - lmc_gpio_mkoutput (sc, (LMC_GEP_DATA | LMC_GEP_CLK - | LMC_GEP_SSI_GENERATOR)); - - sc->lmc_gpio &= ~(LMC_GEP_SSI_GENERATOR); - LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); - - /* - * a shifting we will go... - */ - for (i = 0; i < 7; i++) - write_av9110_bit (sc, n >> i); - for (i = 0; i < 7; i++) - write_av9110_bit (sc, m >> i); - for (i = 0; i < 1; i++) - write_av9110_bit (sc, v >> i); - for (i = 0; i < 2; i++) - write_av9110_bit (sc, x >> i); - for (i = 0; i < 2; i++) - write_av9110_bit (sc, r >> i); - for (i = 0; i < 5; i++) - write_av9110_bit (sc, 0x17 >> i); - - /* - * stop driving serial-related signals - */ - lmc_gpio_mkinput (sc, - (LMC_GEP_DATA | LMC_GEP_CLK - | LMC_GEP_SSI_GENERATOR)); -} - -static void lmc_ssi_watchdog(lmc_softc_t * const sc) -{ - u16 mii17 = lmc_mii_readreg(sc, 0, 17); - if (((mii17 >> 3) & 7) == 7) - lmc_led_off(sc, LMC_MII16_LED2); - else - lmc_led_on(sc, LMC_MII16_LED2); -} - -/* - * T1 methods - */ - -/* - * The framer regs are multiplexed through MII regs 17 & 18 - * write the register address to MII reg 17 and the * data to MII reg 18. */ -static void -lmc_t1_write (lmc_softc_t * const sc, int a, int d) -{ - lmc_mii_writereg (sc, 0, 17, a); - lmc_mii_writereg (sc, 0, 18, d); -} - -/* Save a warning -static int -lmc_t1_read (lmc_softc_t * const sc, int a) -{ - lmc_mii_writereg (sc, 0, 17, a); - return lmc_mii_readreg (sc, 0, 18); -} -*/ - - -static void -lmc_t1_init (lmc_softc_t * const sc) -{ - u16 mii16; - int i; - - sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1200; - mii16 = lmc_mii_readreg (sc, 0, 16); - - /* reset 8370 */ - mii16 &= ~LMC_MII16_T1_RST; - lmc_mii_writereg (sc, 0, 16, mii16 | LMC_MII16_T1_RST); - lmc_mii_writereg (sc, 0, 16, mii16); - - /* set T1 or E1 line. Uses sc->lmcmii16 reg in function so update it */ - sc->lmc_miireg16 = mii16; - lmc_t1_set_circuit_type(sc, LMC_CTL_CIRCUIT_TYPE_T1); - mii16 = sc->lmc_miireg16; - - lmc_t1_write (sc, 0x01, 0x1B); /* CR0 - primary control */ - lmc_t1_write (sc, 0x02, 0x42); /* JAT_CR - jitter atten config */ - lmc_t1_write (sc, 0x14, 0x00); /* LOOP - loopback config */ - lmc_t1_write (sc, 0x15, 0x00); /* DL3_TS - external data link timeslot */ - lmc_t1_write (sc, 0x18, 0xFF); /* PIO - programmable I/O */ - lmc_t1_write (sc, 0x19, 0x30); /* POE - programmable OE */ - lmc_t1_write (sc, 0x1A, 0x0F); /* CMUX - clock input mux */ - lmc_t1_write (sc, 0x20, 0x41); /* LIU_CR - RX LIU config */ - lmc_t1_write (sc, 0x22, 0x76); /* RLIU_CR - RX LIU config */ - lmc_t1_write (sc, 0x40, 0x03); /* RCR0 - RX config */ - lmc_t1_write (sc, 0x45, 0x00); /* RALM - RX alarm config */ - lmc_t1_write (sc, 0x46, 0x05); /* LATCH - RX alarm/err/cntr latch */ - lmc_t1_write (sc, 0x68, 0x40); /* TLIU_CR - TX LIU config */ - lmc_t1_write (sc, 0x70, 0x0D); /* TCR0 - TX framer config */ - lmc_t1_write (sc, 0x71, 0x05); /* TCR1 - TX config */ - lmc_t1_write (sc, 0x72, 0x0B); /* TFRM - TX frame format */ - lmc_t1_write (sc, 0x73, 0x00); /* TERROR - TX error insert */ - lmc_t1_write (sc, 0x74, 0x00); /* TMAN - TX manual Sa/FEBE config */ - lmc_t1_write (sc, 0x75, 0x00); /* TALM - TX alarm signal config */ - lmc_t1_write (sc, 0x76, 0x00); /* TPATT - TX test pattern config */ - lmc_t1_write (sc, 0x77, 0x00); /* TLB - TX inband loopback config */ - lmc_t1_write (sc, 0x90, 0x05); /* CLAD_CR - clock rate adapter config */ - lmc_t1_write (sc, 0x91, 0x05); /* CSEL - clad freq sel */ - lmc_t1_write (sc, 0xA6, 0x00); /* DL1_CTL - DL1 control */ - lmc_t1_write (sc, 0xB1, 0x00); /* DL2_CTL - DL2 control */ - lmc_t1_write (sc, 0xD0, 0x47); /* SBI_CR - sys bus iface config */ - lmc_t1_write (sc, 0xD1, 0x70); /* RSB_CR - RX sys bus config */ - lmc_t1_write (sc, 0xD4, 0x30); /* TSB_CR - TX sys bus config */ - for (i = 0; i < 32; i++) - { - lmc_t1_write (sc, 0x0E0 + i, 0x00); /* SBCn - sys bus per-channel ctl */ - lmc_t1_write (sc, 0x100 + i, 0x00); /* TPCn - TX per-channel ctl */ - lmc_t1_write (sc, 0x180 + i, 0x00); /* RPCn - RX per-channel ctl */ - } - for (i = 1; i < 25; i++) - { - lmc_t1_write (sc, 0x0E0 + i, 0x0D); /* SBCn - sys bus per-channel ctl */ - } - - mii16 |= LMC_MII16_T1_XOE; - lmc_mii_writereg (sc, 0, 16, mii16); - sc->lmc_miireg16 = mii16; -} - -static void -lmc_t1_default (lmc_softc_t * const sc) -{ - sc->lmc_miireg16 = LMC_MII16_LED_ALL; - sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN); - sc->lmc_media->set_circuit_type (sc, LMC_CTL_CIRCUIT_TYPE_T1); - sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16); - /* Right now we can only clock from out internal source */ - sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT; -} -/* * Given a user provided state, set ourselves up to match it. This will * always reset the card if needed. - */ -static void -lmc_t1_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl) -{ - if (ctl == NULL) - { - sc->lmc_media->set_circuit_type (sc, sc->ictl.circuit_type); - lmc_set_protocol (sc, NULL); - - return; - } - /* - * check for change in circuit type */ - if (ctl->circuit_type == LMC_CTL_CIRCUIT_TYPE_T1 - && sc->ictl.circuit_type == - LMC_CTL_CIRCUIT_TYPE_E1) sc->lmc_media->set_circuit_type (sc, - LMC_CTL_CIRCUIT_TYPE_E1); - else if (ctl->circuit_type == LMC_CTL_CIRCUIT_TYPE_E1 - && sc->ictl.circuit_type == LMC_CTL_CIRCUIT_TYPE_T1) - sc->lmc_media->set_circuit_type (sc, LMC_CTL_CIRCUIT_TYPE_T1); - lmc_set_protocol (sc, ctl); -} -/* - * return hardware link status. - * 0 == link is down, 1 == link is up. - */ static int -lmc_t1_get_link_status (lmc_softc_t * const sc) -{ - u16 link_status; - int ret = 1; - - /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions - * led0 yellow = far-end adapter is in Red alarm condition - * led1 blue = received an Alarm Indication signal - * (upstream failure) - * led2 Green = power to adapter, Gate Array loaded & driver - * attached - * led3 red = Loss of Signal (LOS) or out of frame (OOF) - * conditions detected on T3 receive signal - */ - lmc_led_on(sc, LMC_DS3_LED2); - - lmc_mii_writereg (sc, 0, 17, T1FRAMER_ALARM1_STATUS); - link_status = lmc_mii_readreg (sc, 0, 18); - - - if (link_status & T1F_RAIS) { /* turn on blue LED */ - ret = 0; - if(sc->last_led_err[1] != 1){ - printk(KERN_WARNING "%s: Receive AIS/Blue Alarm. Far end in RED alarm\n", sc->name); - } - lmc_led_on(sc, LMC_DS3_LED1); - sc->last_led_err[1] = 1; - } - else { - if(sc->last_led_err[1] != 0){ - printk(KERN_WARNING "%s: End AIS/Blue Alarm\n", sc->name); - } - lmc_led_off (sc, LMC_DS3_LED1); - sc->last_led_err[1] = 0; - } - - /* - * Yellow Alarm is nasty evil stuff, looks at data patterns - * inside the channel and confuses it with HDLC framing - * ignore all yellow alarms. - * - * Do listen to MultiFrame Yellow alarm which while implemented - * different ways isn't in the channel and hence somewhat - * more reliable - */ - - if (link_status & T1F_RMYEL) { - ret = 0; - if(sc->last_led_err[0] != 1){ - printk(KERN_WARNING "%s: Receive Yellow AIS Alarm\n", sc->name); - } - lmc_led_on(sc, LMC_DS3_LED0); - sc->last_led_err[0] = 1; - } - else { - if(sc->last_led_err[0] != 0){ - printk(KERN_WARNING "%s: End of Yellow AIS Alarm\n", sc->name); - } - lmc_led_off(sc, LMC_DS3_LED0); - sc->last_led_err[0] = 0; - } - - /* - * Loss of signal and los of frame - * Use the green bit to identify which one lit the led - */ - if(link_status & T1F_RLOF){ - ret = 0; - if(sc->last_led_err[3] != 1){ - printk(KERN_WARNING "%s: Local Red Alarm: Loss of Framing\n", sc->name); - } - lmc_led_on(sc, LMC_DS3_LED3); - sc->last_led_err[3] = 1; - - } - else { - if(sc->last_led_err[3] != 0){ - printk(KERN_WARNING "%s: End Red Alarm (LOF)\n", sc->name); - } - if( ! (link_status & T1F_RLOS)) - lmc_led_off(sc, LMC_DS3_LED3); - sc->last_led_err[3] = 0; - } - - if(link_status & T1F_RLOS){ - ret = 0; - if(sc->last_led_err[2] != 1){ - printk(KERN_WARNING "%s: Local Red Alarm: Loss of Signal\n", sc->name); - } - lmc_led_on(sc, LMC_DS3_LED3); - sc->last_led_err[2] = 1; - - } - else { - if(sc->last_led_err[2] != 0){ - printk(KERN_WARNING "%s: End Red Alarm (LOS)\n", sc->name); - } - if( ! (link_status & T1F_RLOF)) - lmc_led_off(sc, LMC_DS3_LED3); - sc->last_led_err[2] = 0; - } - - sc->lmc_xinfo.t1_alarm1_status = link_status; - - lmc_mii_writereg (sc, 0, 17, T1FRAMER_ALARM2_STATUS); - sc->lmc_xinfo.t1_alarm2_status = lmc_mii_readreg (sc, 0, 18); - - return ret; -} - -/* - * 1 == T1 Circuit Type , 0 == E1 Circuit Type - */ -static void -lmc_t1_set_circuit_type (lmc_softc_t * const sc, int ie) -{ - if (ie == LMC_CTL_CIRCUIT_TYPE_T1) { - sc->lmc_miireg16 |= LMC_MII16_T1_Z; - sc->ictl.circuit_type = LMC_CTL_CIRCUIT_TYPE_T1; - printk(KERN_INFO "%s: In T1 Mode\n", sc->name); - } - else { - sc->lmc_miireg16 &= ~LMC_MII16_T1_Z; - sc->ictl.circuit_type = LMC_CTL_CIRCUIT_TYPE_E1; - printk(KERN_INFO "%s: In E1 Mode\n", sc->name); - } - - lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); - -} - -/* - * 0 == 16bit, 1 == 32bit */ -static void -lmc_t1_set_crc_length (lmc_softc_t * const sc, int state) -{ - if (state == LMC_CTL_CRC_LENGTH_32) - { - /* 32 bit */ - sc->lmc_miireg16 |= LMC_MII16_T1_CRC; - sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32; - sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_4; - - } - else - { - /* 16 bit */ sc->lmc_miireg16 &= ~LMC_MII16_T1_CRC; - sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16; - sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_2; - - } - - lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); -} - -/* - * 1 == internal, 0 == external - */ -static void -lmc_t1_set_clock (lmc_softc_t * const sc, int ie) -{ - int old; - old = ie; - if (ie == LMC_CTL_CLOCK_SOURCE_EXT) - { - sc->lmc_gpio &= ~(LMC_GEP_SSI_TXCLOCK); - LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); - sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT; - if(old != ie) - printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS); - } - else - { - sc->lmc_gpio |= LMC_GEP_SSI_TXCLOCK; - LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); - sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT; - if(old != ie) - printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS); - } -} - -static void -lmc_t1_watchdog (lmc_softc_t * const sc) -{ -} - -static void -lmc_set_protocol (lmc_softc_t * const sc, lmc_ctl_t * ctl) -{ - if (!ctl) - sc->ictl.keepalive_onoff = LMC_CTL_ON; -} diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c deleted file mode 100644 index e5487616a816..000000000000 --- a/drivers/net/wan/lmc/lmc_proto.c +++ /dev/null @@ -1,106 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only - /* - * Copyright (c) 1997-2000 LAN Media Corporation (LMC) - * All rights reserved. www.lanmedia.com - * - * This code is written by: - * Andrew Stanley-Jones (asj@cban.com) - * Rob Braun (bbraun@vix.com), - * Michael Graff (explorer@vix.com) and - * Matt Thomas (matt@3am-software.com). - * - * With Help By: - * David Boggs - * Ron Crane - * Allan Cox - * - * Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards. - */ - -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/timer.h> -#include <linux/ptrace.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/interrupt.h> -#include <linux/in.h> -#include <linux/if_arp.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/inet.h> -#include <linux/workqueue.h> -#include <linux/proc_fs.h> -#include <linux/bitops.h> -#include <asm/processor.h> /* Processor type for cache alignment. */ -#include <asm/io.h> -#include <asm/dma.h> -#include <linux/smp.h> - -#include "lmc.h" -#include "lmc_var.h" -#include "lmc_debug.h" -#include "lmc_ioctl.h" -#include "lmc_proto.h" - -// attach -void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/ -{ - if (sc->if_type == LMC_NET) { - struct net_device *dev = sc->lmc_device; - /* - * They set a few basics because they don't use HDLC - */ - dev->flags |= IFF_POINTOPOINT; - dev->hard_header_len = 0; - dev->addr_len = 0; - } -} - -int lmc_proto_open(lmc_softc_t *sc) -{ - int ret = 0; - - if (sc->if_type == LMC_PPP) { - ret = hdlc_open(sc->lmc_device); - if (ret < 0) - printk(KERN_WARNING "%s: HDLC open failed: %d\n", - sc->name, ret); - } - return ret; -} - -void lmc_proto_close(lmc_softc_t *sc) -{ - if (sc->if_type == LMC_PPP) - hdlc_close(sc->lmc_device); -} - -__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/ -{ - switch(sc->if_type){ - case LMC_PPP: - return hdlc_type_trans(skb, sc->lmc_device); - case LMC_NET: - return htons(ETH_P_802_2); - case LMC_RAW: /* Packet type for skbuff kind of useless */ - return htons(ETH_P_802_2); - default: - printk(KERN_WARNING "%s: No protocol set for this interface, assuming 802.2 (which is wrong!!)\n", sc->name); - return htons(ETH_P_802_2); - } -} - -void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/ -{ - switch(sc->if_type){ - case LMC_PPP: - case LMC_NET: - default: - netif_rx(skb); - break; - case LMC_RAW: - break; - } -} diff --git a/drivers/net/wan/lmc/lmc_proto.h b/drivers/net/wan/lmc/lmc_proto.h deleted file mode 100644 index e56e7072de44..000000000000 --- a/drivers/net/wan/lmc/lmc_proto.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LMC_PROTO_H_ -#define _LMC_PROTO_H_ - -#include <linux/hdlc.h> - -void lmc_proto_attach(lmc_softc_t *sc); -int lmc_proto_open(lmc_softc_t *sc); -void lmc_proto_close(lmc_softc_t *sc); -__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb); -void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb); - -static inline lmc_softc_t* dev_to_sc(struct net_device *dev) -{ - return (lmc_softc_t *)dev_to_hdlc(dev)->priv; -} - -#endif diff --git a/drivers/net/wan/lmc/lmc_var.h b/drivers/net/wan/lmc/lmc_var.h deleted file mode 100644 index 99f0aa787a35..000000000000 --- a/drivers/net/wan/lmc/lmc_var.h +++ /dev/null @@ -1,468 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -#ifndef _LMC_VAR_H_ -#define _LMC_VAR_H_ - - /* - * Copyright (c) 1997-2000 LAN Media Corporation (LMC) - * All rights reserved. www.lanmedia.com - * - * This code is written by: - * Andrew Stanley-Jones (asj@cban.com) - * Rob Braun (bbraun@vix.com), - * Michael Graff (explorer@vix.com) and - * Matt Thomas (matt@3am-software.com). - */ - -#include <linux/timer.h> - -/* - * basic definitions used in lmc include files - */ - -typedef struct lmc___softc lmc_softc_t; -typedef struct lmc___media lmc_media_t; -typedef struct lmc___ctl lmc_ctl_t; - -#define lmc_csrptr_t unsigned long - -#define LMC_REG_RANGE 0x80 - -#define LMC_PRINTF_FMT "%s" -#define LMC_PRINTF_ARGS (sc->lmc_device->name) - -#define TX_TIMEOUT (2*HZ) - -#define LMC_TXDESCS 32 -#define LMC_RXDESCS 32 - -#define LMC_LINK_UP 1 -#define LMC_LINK_DOWN 0 - -/* These macros for generic read and write to and from the dec chip */ -#define LMC_CSR_READ(sc, csr) \ - inl((sc)->lmc_csrs.csr) -#define LMC_CSR_WRITE(sc, reg, val) \ - outl((val), (sc)->lmc_csrs.reg) - -//#ifdef _LINUX_DELAY_H -// #define SLOW_DOWN_IO udelay(2); -// #undef __SLOW_DOWN_IO -// #define __SLOW_DOWN_IO udelay(2); -//#endif - -#define DELAY(n) SLOW_DOWN_IO - -#define lmc_delay() inl(sc->lmc_csrs.csr_9) - -/* This macro sync's up with the mii so that reads and writes can take place */ -#define LMC_MII_SYNC(sc) do {int n=32; while( n >= 0 ) { \ - LMC_CSR_WRITE((sc), csr_9, 0x20000); \ - lmc_delay(); \ - LMC_CSR_WRITE((sc), csr_9, 0x30000); \ - lmc_delay(); \ - n--; }} while(0) - -struct lmc_regfile_t { - lmc_csrptr_t csr_busmode; /* CSR0 */ - lmc_csrptr_t csr_txpoll; /* CSR1 */ - lmc_csrptr_t csr_rxpoll; /* CSR2 */ - lmc_csrptr_t csr_rxlist; /* CSR3 */ - lmc_csrptr_t csr_txlist; /* CSR4 */ - lmc_csrptr_t csr_status; /* CSR5 */ - lmc_csrptr_t csr_command; /* CSR6 */ - lmc_csrptr_t csr_intr; /* CSR7 */ - lmc_csrptr_t csr_missed_frames; /* CSR8 */ - lmc_csrptr_t csr_9; /* CSR9 */ - lmc_csrptr_t csr_10; /* CSR10 */ - lmc_csrptr_t csr_11; /* CSR11 */ - lmc_csrptr_t csr_12; /* CSR12 */ - lmc_csrptr_t csr_13; /* CSR13 */ - lmc_csrptr_t csr_14; /* CSR14 */ - lmc_csrptr_t csr_15; /* CSR15 */ -}; - -#define csr_enetrom csr_9 /* 21040 */ -#define csr_reserved csr_10 /* 21040 */ -#define csr_full_duplex csr_11 /* 21040 */ -#define csr_bootrom csr_10 /* 21041/21140A/?? */ -#define csr_gp csr_12 /* 21140* */ -#define csr_watchdog csr_15 /* 21140* */ -#define csr_gp_timer csr_11 /* 21041/21140* */ -#define csr_srom_mii csr_9 /* 21041/21140* */ -#define csr_sia_status csr_12 /* 2104x */ -#define csr_sia_connectivity csr_13 /* 2104x */ -#define csr_sia_tx_rx csr_14 /* 2104x */ -#define csr_sia_general csr_15 /* 2104x */ - -/* tulip length/control transmit descriptor definitions - * used to define bits in the second tulip_desc_t field (length) - * for the transmit descriptor -baz */ - -#define LMC_TDES_FIRST_BUFFER_SIZE ((u32)(0x000007FF)) -#define LMC_TDES_SECOND_BUFFER_SIZE ((u32)(0x003FF800)) -#define LMC_TDES_HASH_FILTERING ((u32)(0x00400000)) -#define LMC_TDES_DISABLE_PADDING ((u32)(0x00800000)) -#define LMC_TDES_SECOND_ADDR_CHAINED ((u32)(0x01000000)) -#define LMC_TDES_END_OF_RING ((u32)(0x02000000)) -#define LMC_TDES_ADD_CRC_DISABLE ((u32)(0x04000000)) -#define LMC_TDES_SETUP_PACKET ((u32)(0x08000000)) -#define LMC_TDES_INVERSE_FILTERING ((u32)(0x10000000)) -#define LMC_TDES_FIRST_SEGMENT ((u32)(0x20000000)) -#define LMC_TDES_LAST_SEGMENT ((u32)(0x40000000)) -#define LMC_TDES_INTERRUPT_ON_COMPLETION ((u32)(0x80000000)) - -#define TDES_SECOND_BUFFER_SIZE_BIT_NUMBER 11 -#define TDES_COLLISION_COUNT_BIT_NUMBER 3 - -/* Constants for the RCV descriptor RDES */ - -#define LMC_RDES_OVERFLOW ((u32)(0x00000001)) -#define LMC_RDES_CRC_ERROR ((u32)(0x00000002)) -#define LMC_RDES_DRIBBLING_BIT ((u32)(0x00000004)) -#define LMC_RDES_REPORT_ON_MII_ERR ((u32)(0x00000008)) -#define LMC_RDES_RCV_WATCHDOG_TIMEOUT ((u32)(0x00000010)) -#define LMC_RDES_FRAME_TYPE ((u32)(0x00000020)) -#define LMC_RDES_COLLISION_SEEN ((u32)(0x00000040)) -#define LMC_RDES_FRAME_TOO_LONG ((u32)(0x00000080)) -#define LMC_RDES_LAST_DESCRIPTOR ((u32)(0x00000100)) -#define LMC_RDES_FIRST_DESCRIPTOR ((u32)(0x00000200)) -#define LMC_RDES_MULTICAST_FRAME ((u32)(0x00000400)) -#define LMC_RDES_RUNT_FRAME ((u32)(0x00000800)) -#define LMC_RDES_DATA_TYPE ((u32)(0x00003000)) -#define LMC_RDES_LENGTH_ERROR ((u32)(0x00004000)) -#define LMC_RDES_ERROR_SUMMARY ((u32)(0x00008000)) -#define LMC_RDES_FRAME_LENGTH ((u32)(0x3FFF0000)) -#define LMC_RDES_OWN_BIT ((u32)(0x80000000)) - -#define RDES_FRAME_LENGTH_BIT_NUMBER 16 - -#define LMC_RDES_ERROR_MASK ( (u32)( \ - LMC_RDES_OVERFLOW \ - | LMC_RDES_DRIBBLING_BIT \ - | LMC_RDES_REPORT_ON_MII_ERR \ - | LMC_RDES_COLLISION_SEEN ) ) - - -/* - * Ioctl info - */ - -typedef struct { - u32 n; - u32 m; - u32 v; - u32 x; - u32 r; - u32 f; - u32 exact; -} lmc_av9110_t; - -/* - * Common structure passed to the ioctl code. - */ -struct lmc___ctl { - u32 cardtype; - u32 clock_source; /* HSSI, T1 */ - u32 clock_rate; /* T1 */ - u32 crc_length; - u32 cable_length; /* DS3 */ - u32 scrambler_onoff; /* DS3 */ - u32 cable_type; /* T1 */ - u32 keepalive_onoff; /* protocol */ - u32 ticks; /* ticks/sec */ - union { - lmc_av9110_t ssi; - } cardspec; - u32 circuit_type; /* T1 or E1 */ -}; - - -/* - * Careful, look at the data sheet, there's more to this - * structure than meets the eye. It should probably be: - * - * struct tulip_desc_t { - * u8 own:1; - * u32 status:31; - * u32 control:10; - * u32 buffer1; - * u32 buffer2; - * }; - * You could also expand status control to provide more bit information - */ - -struct tulip_desc_t { - s32 status; - s32 length; - u32 buffer1; - u32 buffer2; -}; - -/* - * media independent methods to check on media status, link, light LEDs, - * etc. - */ -struct lmc___media { - void (* init)(lmc_softc_t * const); - void (* defaults)(lmc_softc_t * const); - void (* set_status)(lmc_softc_t * const, lmc_ctl_t *); - void (* set_clock_source)(lmc_softc_t * const, int); - void (* set_speed)(lmc_softc_t * const, lmc_ctl_t *); - void (* set_cable_length)(lmc_softc_t * const, int); - void (* set_scrambler)(lmc_softc_t * const, int); - int (* get_link_status)(lmc_softc_t * const); - void (* set_link_status)(lmc_softc_t * const, int); - void (* set_crc_length)(lmc_softc_t * const, int); - void (* set_circuit_type)(lmc_softc_t * const, int); - void (* watchdog)(lmc_softc_t * const); -}; - - -#define STATCHECK 0xBEEFCAFE - -struct lmc_extra_statistics -{ - u32 version_size; - u32 lmc_cardtype; - - u32 tx_ProcTimeout; - u32 tx_IntTimeout; - u32 tx_NoCompleteCnt; - u32 tx_MaxXmtsB4Int; - u32 tx_TimeoutCnt; - u32 tx_OutOfSyncPtr; - u32 tx_tbusy0; - u32 tx_tbusy1; - u32 tx_tbusy_calls; - u32 resetCount; - u32 lmc_txfull; - u32 tbusy; - u32 dirtyTx; - u32 lmc_next_tx; - u32 otherTypeCnt; - u32 lastType; - u32 lastTypeOK; - u32 txLoopCnt; - u32 usedXmtDescripCnt; - u32 txIndexCnt; - u32 rxIntLoopCnt; - - u32 rx_SmallPktCnt; - u32 rx_BadPktSurgeCnt; - u32 rx_BuffAllocErr; - u32 tx_lossOfClockCnt; - - /* T1 error counters */ - u32 framingBitErrorCount; - u32 lineCodeViolationCount; - - u32 lossOfFrameCount; - u32 changeOfFrameAlignmentCount; - u32 severelyErroredFrameCount; - - u32 check; -}; - -typedef struct lmc_xinfo { - u32 Magic0; /* BEEFCAFE */ - - u32 PciCardType; - u32 PciSlotNumber; /* PCI slot number */ - - u16 DriverMajorVersion; - u16 DriverMinorVersion; - u16 DriverSubVersion; - - u16 XilinxRevisionNumber; - u16 MaxFrameSize; - - u16 t1_alarm1_status; - u16 t1_alarm2_status; - - int link_status; - u32 mii_reg16; - - u32 Magic1; /* DEADBEEF */ -} LMC_XINFO; - - -/* - * forward decl - */ -struct lmc___softc { - char *name; - u8 board_idx; - struct lmc_extra_statistics extra_stats; - struct net_device *lmc_device; - - int hang, rxdesc, bad_packet, some_counter; - u32 txgo; - struct lmc_regfile_t lmc_csrs; - volatile u32 lmc_txtick; - volatile u32 lmc_rxtick; - u32 lmc_flags; - u32 lmc_intrmask; /* our copy of csr_intr */ - u32 lmc_cmdmode; /* our copy of csr_cmdmode */ - u32 lmc_busmode; /* our copy of csr_busmode */ - u32 lmc_gpio_io; /* state of in/out settings */ - u32 lmc_gpio; /* state of outputs */ - struct sk_buff* lmc_txq[LMC_TXDESCS]; - struct sk_buff* lmc_rxq[LMC_RXDESCS]; - volatile - struct tulip_desc_t lmc_rxring[LMC_RXDESCS]; - volatile - struct tulip_desc_t lmc_txring[LMC_TXDESCS]; - unsigned int lmc_next_rx, lmc_next_tx; - volatile - unsigned int lmc_taint_tx, lmc_taint_rx; - int lmc_tx_start, lmc_txfull; - int lmc_txbusy; - u16 lmc_miireg16; - int lmc_ok; - int last_link_status; - int lmc_cardtype; - u32 last_frameerr; - lmc_media_t *lmc_media; - struct timer_list timer; - lmc_ctl_t ictl; - u32 TxDescriptControlInit; - - int tx_TimeoutInd; /* additional driver state */ - int tx_TimeoutDisplay; - unsigned int lastlmc_taint_tx; - int lasttx_packets; - u32 tx_clockState; - u32 lmc_crcSize; - LMC_XINFO lmc_xinfo; - char lmc_yel, lmc_blue, lmc_red; /* for T1 and DS3 */ - char lmc_timing; /* for HSSI and SSI */ - int got_irq; - - char last_led_err[4]; - - u32 last_int; - u32 num_int; - - spinlock_t lmc_lock; - u16 if_type; /* HDLC/PPP or NET */ - - /* Failure cases */ - u8 failed_ring; - u8 failed_recv_alloc; - - /* Structure check */ - u32 check; -}; - -#define LMC_PCI_TIME 1 -#define LMC_EXT_TIME 0 - -#define PKT_BUF_SZ 1542 /* was 1536 */ - -/* CSR5 settings */ -#define TIMER_INT 0x00000800 -#define TP_LINK_FAIL 0x00001000 -#define TP_LINK_PASS 0x00000010 -#define NORMAL_INT 0x00010000 -#define ABNORMAL_INT 0x00008000 -#define RX_JABBER_INT 0x00000200 -#define RX_DIED 0x00000100 -#define RX_NOBUFF 0x00000080 -#define RX_INT 0x00000040 -#define TX_FIFO_UNDER 0x00000020 -#define TX_JABBER 0x00000008 -#define TX_NOBUFF 0x00000004 -#define TX_DIED 0x00000002 -#define TX_INT 0x00000001 - -/* CSR6 settings */ -#define OPERATION_MODE 0x00000200 /* Full Duplex */ -#define PROMISC_MODE 0x00000040 /* Promiscuous Mode */ -#define RECEIVE_ALL 0x40000000 /* Receive All */ -#define PASS_BAD_FRAMES 0x00000008 /* Pass Bad Frames */ - -/* Dec control registers CSR6 as well */ -#define LMC_DEC_ST 0x00002000 -#define LMC_DEC_SR 0x00000002 - -/* CSR15 settings */ -#define RECV_WATCHDOG_DISABLE 0x00000010 -#define JABBER_DISABLE 0x00000001 - -/* More settings */ -/* - * aSR6 -- Command (Operation Mode) Register - */ -#define TULIP_CMD_RECEIVEALL 0x40000000L /* (RW) Receivel all frames? */ -#define TULIP_CMD_MUSTBEONE 0x02000000L /* (RW) Must Be One (21140) */ -#define TULIP_CMD_TXTHRSHLDCTL 0x00400000L /* (RW) Transmit Threshold Mode (21140) */ -#define TULIP_CMD_STOREFWD 0x00200000L /* (RW) Store and Forward (21140) */ -#define TULIP_CMD_NOHEARTBEAT 0x00080000L /* (RW) No Heartbeat (21140) */ -#define TULIP_CMD_PORTSELECT 0x00040000L /* (RW) Post Select (100Mb) (21140) */ -#define TULIP_CMD_FULLDUPLEX 0x00000200L /* (RW) Full Duplex Mode */ -#define TULIP_CMD_OPERMODE 0x00000C00L /* (RW) Operating Mode */ -#define TULIP_CMD_PROMISCUOUS 0x00000041L /* (RW) Promiscuous Mode */ -#define TULIP_CMD_PASSBADPKT 0x00000008L /* (RW) Pass Bad Frames */ -#define TULIP_CMD_THRESHOLDCTL 0x0000C000L /* (RW) Threshold Control */ - -#define TULIP_GP_PINSET 0x00000100L -#define TULIP_BUSMODE_SWRESET 0x00000001L -#define TULIP_WATCHDOG_TXDISABLE 0x00000001L -#define TULIP_WATCHDOG_RXDISABLE 0x00000010L - -#define TULIP_STS_NORMALINTR 0x00010000L /* (RW) Normal Interrupt */ -#define TULIP_STS_ABNRMLINTR 0x00008000L /* (RW) Abnormal Interrupt */ -#define TULIP_STS_ERI 0x00004000L /* (RW) Early Receive Interrupt */ -#define TULIP_STS_SYSERROR 0x00002000L /* (RW) System Error */ -#define TULIP_STS_GTE 0x00000800L /* (RW) General Pupose Timer Exp */ -#define TULIP_STS_ETI 0x00000400L /* (RW) Early Transmit Interrupt */ -#define TULIP_STS_RXWT 0x00000200L /* (RW) Receiver Watchdog Timeout */ -#define TULIP_STS_RXSTOPPED 0x00000100L /* (RW) Receiver Process Stopped */ -#define TULIP_STS_RXNOBUF 0x00000080L /* (RW) Receive Buf Unavail */ -#define TULIP_STS_RXINTR 0x00000040L /* (RW) Receive Interrupt */ -#define TULIP_STS_TXUNDERFLOW 0x00000020L /* (RW) Transmit Underflow */ -#define TULIP_STS_TXJABER 0x00000008L /* (RW) Jabber timeout */ -#define TULIP_STS_TXNOBUF 0x00000004L -#define TULIP_STS_TXSTOPPED 0x00000002L /* (RW) Transmit Process Stopped */ -#define TULIP_STS_TXINTR 0x00000001L /* (RW) Transmit Interrupt */ - -#define TULIP_STS_RXS_STOPPED 0x00000000L /* 000 - Stopped */ - -#define TULIP_STS_RXSTOPPED 0x00000100L /* (RW) Receive Process Stopped */ -#define TULIP_STS_RXNOBUF 0x00000080L - -#define TULIP_CMD_TXRUN 0x00002000L /* (RW) Start/Stop Transmitter */ -#define TULIP_CMD_RXRUN 0x00000002L /* (RW) Start/Stop Receive Filtering */ -#define TULIP_DSTS_TxDEFERRED 0x00000001 /* Initially Deferred */ -#define TULIP_DSTS_OWNER 0x80000000 /* Owner (1 = 21040) */ -#define TULIP_DSTS_RxMIIERR 0x00000008 -#define LMC_DSTS_ERRSUM (TULIP_DSTS_RxMIIERR) - -#define TULIP_DEFAULT_INTR_MASK (TULIP_STS_NORMALINTR \ - | TULIP_STS_RXINTR \ - | TULIP_STS_TXINTR \ - | TULIP_STS_ABNRMLINTR \ - | TULIP_STS_SYSERROR \ - | TULIP_STS_TXSTOPPED \ - | TULIP_STS_TXUNDERFLOW\ - | TULIP_STS_RXSTOPPED ) - -#define DESC_OWNED_BY_SYSTEM ((u32)(0x00000000)) -#define DESC_OWNED_BY_DC21X4 ((u32)(0x80000000)) - -#ifndef TULIP_CMD_RECEIVEALL -#define TULIP_CMD_RECEIVEALL 0x40000000L -#endif - -/* Adapter module number */ -#define LMC_ADAP_HSSI 2 -#define LMC_ADAP_DS3 3 -#define LMC_ADAP_SSI 4 -#define LMC_ADAP_T1 5 - -#define LMC_MTU 1500 - -#define LMC_CRC_LEN_16 2 /* 16-bit CRC */ -#define LMC_CRC_LEN_32 4 - -#endif /* _LMC_VAR_H_ */ diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c deleted file mode 100644 index eddd20aab691..000000000000 --- a/drivers/net/wan/sealevel.c +++ /dev/null @@ -1,352 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* Sealevel Systems 4021 driver. - * - * (c) Copyright 1999, 2001 Alan Cox - * (c) Copyright 2001 Red Hat Inc. - * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl> - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/net.h> -#include <linux/skbuff.h> -#include <linux/netdevice.h> -#include <linux/if_arp.h> -#include <linux/delay.h> -#include <linux/hdlc.h> -#include <linux/ioport.h> -#include <linux/init.h> -#include <linux/slab.h> -#include <net/arp.h> - -#include <asm/irq.h> -#include <asm/io.h> -#include <asm/dma.h> -#include <asm/byteorder.h> -#include "z85230.h" - -struct slvl_device { - struct z8530_channel *chan; - int channel; -}; - -struct slvl_board { - struct slvl_device dev[2]; - struct z8530_dev board; - int iobase; -}; - - /* Network driver support routines */ - -static inline struct slvl_device *dev_to_chan(struct net_device *dev) -{ - return (struct slvl_device *)dev_to_hdlc(dev)->priv; -} - -/* Frame receive. Simple for our card as we do HDLC and there - * is no funny garbage involved - */ - -static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb) -{ - /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ - skb_trim(skb, skb->len - 2); - skb->protocol = hdlc_type_trans(skb, c->netdevice); - skb_reset_mac_header(skb); - skb->dev = c->netdevice; - netif_rx(skb); -} - - /* We've been placed in the UP state */ - -static int sealevel_open(struct net_device *d) -{ - struct slvl_device *slvl = dev_to_chan(d); - int err = -1; - int unit = slvl->channel; - - /* Link layer up. */ - - switch (unit) { - case 0: - err = z8530_sync_dma_open(d, slvl->chan); - break; - case 1: - err = z8530_sync_open(d, slvl->chan); - break; - } - - if (err) - return err; - - err = hdlc_open(d); - if (err) { - switch (unit) { - case 0: - z8530_sync_dma_close(d, slvl->chan); - break; - case 1: - z8530_sync_close(d, slvl->chan); - break; - } - return err; - } - - slvl->chan->rx_function = sealevel_input; - - netif_start_queue(d); - return 0; -} - -static int sealevel_close(struct net_device *d) -{ - struct slvl_device *slvl = dev_to_chan(d); - int unit = slvl->channel; - - /* Discard new frames */ - - slvl->chan->rx_function = z8530_null_rx; - - hdlc_close(d); - netif_stop_queue(d); - - switch (unit) { - case 0: - z8530_sync_dma_close(d, slvl->chan); - break; - case 1: - z8530_sync_close(d, slvl->chan); - break; - } - return 0; -} - -/* Passed network frames, fire them downwind. */ - -static netdev_tx_t sealevel_queue_xmit(struct sk_buff *skb, - struct net_device *d) -{ - return z8530_queue_xmit(dev_to_chan(d)->chan, skb); -} - -static int sealevel_attach(struct net_device *dev, unsigned short encoding, - unsigned short parity) -{ - if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT) - return 0; - return -EINVAL; -} - -static const struct net_device_ops sealevel_ops = { - .ndo_open = sealevel_open, - .ndo_stop = sealevel_close, - .ndo_start_xmit = hdlc_start_xmit, - .ndo_siocwandev = hdlc_ioctl, -}; - -static int slvl_setup(struct slvl_device *sv, int iobase, int irq) -{ - struct net_device *dev = alloc_hdlcdev(sv); - - if (!dev) - return -1; - - dev_to_hdlc(dev)->attach = sealevel_attach; - dev_to_hdlc(dev)->xmit = sealevel_queue_xmit; - dev->netdev_ops = &sealevel_ops; - dev->base_addr = iobase; - dev->irq = irq; - - if (register_hdlc_device(dev)) { - pr_err("unable to register HDLC device\n"); - free_netdev(dev); - return -1; - } - - sv->chan->netdevice = dev; - return 0; -} - -/* Allocate and setup Sealevel board. */ - -static __init struct slvl_board *slvl_init(int iobase, int irq, - int txdma, int rxdma, int slow) -{ - struct z8530_dev *dev; - struct slvl_board *b; - - /* Get the needed I/O space */ - - if (!request_region(iobase, 8, "Sealevel 4021")) { - pr_warn("I/O 0x%X already in use\n", iobase); - return NULL; - } - - b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL); - if (!b) - goto err_kzalloc; - - b->dev[0].chan = &b->board.chanA; - b->dev[0].channel = 0; - - b->dev[1].chan = &b->board.chanB; - b->dev[1].channel = 1; - - dev = &b->board; - - /* Stuff in the I/O addressing */ - - dev->active = 0; - - b->iobase = iobase; - - /* Select 8530 delays for the old board */ - - if (slow) - iobase |= Z8530_PORT_SLEEP; - - dev->chanA.ctrlio = iobase + 1; - dev->chanA.dataio = iobase; - dev->chanB.ctrlio = iobase + 3; - dev->chanB.dataio = iobase + 2; - - dev->chanA.irqs = &z8530_nop; - dev->chanB.irqs = &z8530_nop; - - /* Assert DTR enable DMA */ - - outb(3 | (1 << 7), b->iobase + 4); - - /* We want a fast IRQ for this device. Actually we'd like an even faster - * IRQ ;) - This is one driver RtLinux is made for - */ - - if (request_irq(irq, z8530_interrupt, 0, - "SeaLevel", dev) < 0) { - pr_warn("IRQ %d already in use\n", irq); - goto err_request_irq; - } - - dev->irq = irq; - dev->chanA.private = &b->dev[0]; - dev->chanB.private = &b->dev[1]; - dev->chanA.dev = dev; - dev->chanB.dev = dev; - - dev->chanA.txdma = 3; - dev->chanA.rxdma = 1; - if (request_dma(dev->chanA.txdma, "SeaLevel (TX)")) - goto err_dma_tx; - - if (request_dma(dev->chanA.rxdma, "SeaLevel (RX)")) - goto err_dma_rx; - - disable_irq(irq); - - /* Begin normal initialise */ - - if (z8530_init(dev) != 0) { - pr_err("Z8530 series device not found\n"); - enable_irq(irq); - goto free_hw; - } - if (dev->type == Z85C30) { - z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream); - z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream); - } else { - z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230); - z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230); - } - - /* Now we can take the IRQ */ - - enable_irq(irq); - - if (slvl_setup(&b->dev[0], iobase, irq)) - goto free_hw; - if (slvl_setup(&b->dev[1], iobase, irq)) - goto free_netdev0; - - z8530_describe(dev, "I/O", iobase); - dev->active = 1; - return b; - -free_netdev0: - unregister_hdlc_device(b->dev[0].chan->netdevice); - free_netdev(b->dev[0].chan->netdevice); -free_hw: - free_dma(dev->chanA.rxdma); -err_dma_rx: - free_dma(dev->chanA.txdma); -err_dma_tx: - free_irq(irq, dev); -err_request_irq: - kfree(b); -err_kzalloc: - release_region(iobase, 8); - return NULL; -} - -static void __exit slvl_shutdown(struct slvl_board *b) -{ - int u; - - z8530_shutdown(&b->board); - - for (u = 0; u < 2; u++) { - struct net_device *d = b->dev[u].chan->netdevice; - - unregister_hdlc_device(d); - free_netdev(d); - } - - free_irq(b->board.irq, &b->board); - free_dma(b->board.chanA.rxdma); - free_dma(b->board.chanA.txdma); - /* DMA off on the card, drop DTR */ - outb(0, b->iobase); - release_region(b->iobase, 8); - kfree(b); -} - -static int io = 0x238; -static int txdma = 1; -static int rxdma = 3; -static int irq = 5; -static bool slow; - -module_param_hw(io, int, ioport, 0); -MODULE_PARM_DESC(io, "The I/O base of the Sealevel card"); -module_param_hw(txdma, int, dma, 0); -MODULE_PARM_DESC(txdma, "Transmit DMA channel"); -module_param_hw(rxdma, int, dma, 0); -MODULE_PARM_DESC(rxdma, "Receive DMA channel"); -module_param_hw(irq, int, irq, 0); -MODULE_PARM_DESC(irq, "The interrupt line setting for the SeaLevel card"); -module_param(slow, bool, 0); -MODULE_PARM_DESC(slow, "Set this for an older Sealevel card such as the 4012"); - -MODULE_AUTHOR("Alan Cox"); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Modular driver for the SeaLevel 4021"); - -static struct slvl_board *slvl_unit; - -static int __init slvl_init_module(void) -{ - slvl_unit = slvl_init(io, irq, txdma, rxdma, slow); - - return slvl_unit ? 0 : -ENODEV; -} - -static void __exit slvl_cleanup_module(void) -{ - if (slvl_unit) - slvl_shutdown(slvl_unit); -} - -module_init(slvl_init_module); -module_exit(slvl_cleanup_module); diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c deleted file mode 100644 index 982a03488a00..000000000000 --- a/drivers/net/wan/z85230.c +++ /dev/null @@ -1,1641 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* (c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk> - * (c) Copyright 2000, 2001 Red Hat Inc - * - * Development of this driver was funded by Equiinet Ltd - * http://www.equiinet.com - * - * ChangeLog: - * - * Asynchronous mode dropped for 2.2. For 2.5 we will attempt the - * unification of all the Z85x30 asynchronous drivers for real. - * - * DMA now uses get_free_page as kmalloc buffers may span a 64K - * boundary. - * - * Modified for SMP safety and SMP locking by Alan Cox - * <alan@lxorguk.ukuu.org.uk> - * - * Performance - * - * Z85230: - * Non DMA you want a 486DX50 or better to do 64Kbits. 9600 baud - * X.25 is not unrealistic on all machines. DMA mode can in theory - * handle T1/E1 quite nicely. In practice the limit seems to be about - * 512Kbit->1Mbit depending on motherboard. - * - * Z85C30: - * 64K will take DMA, 9600 baud X.25 should be ok. - * - * Z8530: - * Synchronous mode without DMA is unlikely to pass about 2400 baud. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/net.h> -#include <linux/skbuff.h> -#include <linux/netdevice.h> -#include <linux/if_arp.h> -#include <linux/delay.h> -#include <linux/hdlc.h> -#include <linux/ioport.h> -#include <linux/init.h> -#include <linux/gfp.h> -#include <asm/dma.h> -#include <asm/io.h> -#define RT_LOCK -#define RT_UNLOCK -#include <linux/spinlock.h> - -#include "z85230.h" - -/** - * z8530_read_port - Architecture specific interface function - * @p: port to read - * - * Provided port access methods. The Comtrol SV11 requires no delays - * between accesses and uses PC I/O. Some drivers may need a 5uS delay - * - * In the longer term this should become an architecture specific - * section so that this can become a generic driver interface for all - * platforms. For now we only handle PC I/O ports with or without the - * dread 5uS sanity delay. - * - * The caller must hold sufficient locks to avoid violating the horrible - * 5uS delay rule. - */ - -static inline int z8530_read_port(unsigned long p) -{ - u8 r = inb(Z8530_PORT_OF(p)); - - if (p & Z8530_PORT_SLEEP) /* gcc should figure this out efficiently ! */ - udelay(5); - return r; -} - -/** - * z8530_write_port - Architecture specific interface function - * @p: port to write - * @d: value to write - * - * Write a value to a port with delays if need be. Note that the - * caller must hold locks to avoid read/writes from other contexts - * violating the 5uS rule - * - * In the longer term this should become an architecture specific - * section so that this can become a generic driver interface for all - * platforms. For now we only handle PC I/O ports with or without the - * dread 5uS sanity delay. - */ - -static inline void z8530_write_port(unsigned long p, u8 d) -{ - outb(d, Z8530_PORT_OF(p)); - if (p & Z8530_PORT_SLEEP) - udelay(5); -} - -static void z8530_rx_done(struct z8530_channel *c); -static void z8530_tx_done(struct z8530_channel *c); - -/** - * read_zsreg - Read a register from a Z85230 - * @c: Z8530 channel to read from (2 per chip) - * @reg: Register to read - * FIXME: Use a spinlock. - * - * Most of the Z8530 registers are indexed off the control registers. - * A read is done by writing to the control register and reading the - * register back. The caller must hold the lock - */ - -static inline u8 read_zsreg(struct z8530_channel *c, u8 reg) -{ - if (reg) - z8530_write_port(c->ctrlio, reg); - return z8530_read_port(c->ctrlio); -} - -/** - * read_zsdata - Read the data port of a Z8530 channel - * @c: The Z8530 channel to read the data port from - * - * The data port provides fast access to some things. We still - * have all the 5uS delays to worry about. - */ - -static inline u8 read_zsdata(struct z8530_channel *c) -{ - u8 r; - - r = z8530_read_port(c->dataio); - return r; -} - -/** - * write_zsreg - Write to a Z8530 channel register - * @c: The Z8530 channel - * @reg: Register number - * @val: Value to write - * - * Write a value to an indexed register. The caller must hold the lock - * to honour the irritating delay rules. We know about register 0 - * being fast to access. - * - * Assumes c->lock is held. - */ -static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val) -{ - if (reg) - z8530_write_port(c->ctrlio, reg); - z8530_write_port(c->ctrlio, val); -} - -/** - * write_zsctrl - Write to a Z8530 control register - * @c: The Z8530 channel - * @val: Value to write - * - * Write directly to the control register on the Z8530 - */ - -static inline void write_zsctrl(struct z8530_channel *c, u8 val) -{ - z8530_write_port(c->ctrlio, val); -} - -/** - * write_zsdata - Write to a Z8530 control register - * @c: The Z8530 channel - * @val: Value to write - * - * Write directly to the data register on the Z8530 - */ -static inline void write_zsdata(struct z8530_channel *c, u8 val) -{ - z8530_write_port(c->dataio, val); -} - -/* Register loading parameters for a dead port - */ - -u8 z8530_dead_port[] = { - 255 -}; -EXPORT_SYMBOL(z8530_dead_port); - -/* Register loading parameters for currently supported circuit types - */ - -/* Data clocked by telco end. This is the correct data for the UK - * "kilostream" service, and most other similar services. - */ - -u8 z8530_hdlc_kilostream[] = { - 4, SYNC_ENAB | SDLC | X1CLK, - 2, 0, /* No vector */ - 1, 0, - 3, ENT_HM | RxCRC_ENAB | Rx8, - 5, TxCRC_ENAB | RTS | TxENAB | Tx8 | DTR, - 9, 0, /* Disable interrupts */ - 6, 0xFF, - 7, FLAG, - 10, ABUNDER | NRZ | CRCPS,/*MARKIDLE ??*/ - 11, TCTRxCP, - 14, DISDPLL, - 15, DCDIE | SYNCIE | CTSIE | TxUIE | BRKIE, - 1, EXT_INT_ENAB | TxINT_ENAB | INT_ALL_Rx, - 9, NV | MIE | NORESET, - 255 -}; -EXPORT_SYMBOL(z8530_hdlc_kilostream); - -/* As above but for enhanced chips. - */ - -u8 z8530_hdlc_kilostream_85230[] = { - 4, SYNC_ENAB | SDLC | X1CLK, - 2, 0, /* No vector */ - 1, 0, - 3, ENT_HM | RxCRC_ENAB | Rx8, - 5, TxCRC_ENAB | RTS | TxENAB | Tx8 | DTR, - 9, 0, /* Disable interrupts */ - 6, 0xFF, - 7, FLAG, - 10, ABUNDER | NRZ | CRCPS, /* MARKIDLE?? */ - 11, TCTRxCP, - 14, DISDPLL, - 15, DCDIE | SYNCIE | CTSIE | TxUIE | BRKIE, - 1, EXT_INT_ENAB | TxINT_ENAB | INT_ALL_Rx, - 9, NV | MIE | NORESET, - 23, 3, /* Extended mode AUTO TX and EOM*/ - - 255 -}; -EXPORT_SYMBOL(z8530_hdlc_kilostream_85230); - -/** - * z8530_flush_fifo - Flush on chip RX FIFO - * @c: Channel to flush - * - * Flush the receive FIFO. There is no specific option for this, we - * blindly read bytes and discard them. Reading when there is no data - * is harmless. The 8530 has a 4 byte FIFO, the 85230 has 8 bytes. - * - * All locking is handled for the caller. On return data may still be - * present if it arrived during the flush. - */ - -static void z8530_flush_fifo(struct z8530_channel *c) -{ - read_zsreg(c, R1); - read_zsreg(c, R1); - read_zsreg(c, R1); - read_zsreg(c, R1); - if (c->dev->type == Z85230) { - read_zsreg(c, R1); - read_zsreg(c, R1); - read_zsreg(c, R1); - read_zsreg(c, R1); - } -} - -/** - * z8530_rtsdtr - Control the outgoing DTS/RTS line - * @c: The Z8530 channel to control; - * @set: 1 to set, 0 to clear - * - * Sets or clears DTR/RTS on the requested line. All locking is handled - * by the caller. For now we assume all boards use the actual RTS/DTR - * on the chip. Apparently one or two don't. We'll scream about them - * later. - */ - -static void z8530_rtsdtr(struct z8530_channel *c, int set) -{ - if (set) - c->regs[5] |= (RTS | DTR); - else - c->regs[5] &= ~(RTS | DTR); - write_zsreg(c, R5, c->regs[5]); -} - -/** - * z8530_rx - Handle a PIO receive event - * @c: Z8530 channel to process - * - * Receive handler for receiving in PIO mode. This is much like the - * async one but not quite the same or as complex - * - * Note: Its intended that this handler can easily be separated from - * the main code to run realtime. That'll be needed for some machines - * (eg to ever clock 64kbits on a sparc ;)). - * - * The RT_LOCK macros don't do anything now. Keep the code covered - * by them as short as possible in all circumstances - clocks cost - * baud. The interrupt handler is assumed to be atomic w.r.t. to - * other code - this is true in the RT case too. - * - * We only cover the sync cases for this. If you want 2Mbit async - * do it yourself but consider medical assistance first. This non DMA - * synchronous mode is portable code. The DMA mode assumes PCI like - * ISA DMA - * - * Called with the device lock held - */ - -static void z8530_rx(struct z8530_channel *c) -{ - u8 ch, stat; - - while (1) { - /* FIFO empty ? */ - if (!(read_zsreg(c, R0) & 1)) - break; - ch = read_zsdata(c); - stat = read_zsreg(c, R1); - - /* Overrun ? - */ - if (c->count < c->max) { - *c->dptr++ = ch; - c->count++; - } - - if (stat & END_FR) { - /* Error ? - */ - if (stat & (Rx_OVR | CRC_ERR)) { - /* Rewind the buffer and return */ - if (c->skb) - c->dptr = c->skb->data; - c->count = 0; - if (stat & Rx_OVR) { - pr_warn("%s: overrun\n", c->dev->name); - c->rx_overrun++; - } - if (stat & CRC_ERR) { - c->rx_crc_err++; - /* printk("crc error\n"); */ - } - /* Shove the frame upstream */ - } else { - /* Drop the lock for RX processing, or - * there are deadlocks - */ - z8530_rx_done(c); - write_zsctrl(c, RES_Rx_CRC); - } - } - } - /* Clear irq - */ - write_zsctrl(c, ERR_RES); - write_zsctrl(c, RES_H_IUS); -} - -/** - * z8530_tx - Handle a PIO transmit event - * @c: Z8530 channel to process - * - * Z8530 transmit interrupt handler for the PIO mode. The basic - * idea is to attempt to keep the FIFO fed. We fill as many bytes - * in as possible, its quite possible that we won't keep up with the - * data rate otherwise. - */ - -static void z8530_tx(struct z8530_channel *c) -{ - while (c->txcount) { - /* FIFO full ? */ - if (!(read_zsreg(c, R0) & 4)) - return; - c->txcount--; - /* Shovel out the byte - */ - write_zsreg(c, R8, *c->tx_ptr++); - write_zsctrl(c, RES_H_IUS); - /* We are about to underflow */ - if (c->txcount == 0) { - write_zsctrl(c, RES_EOM_L); - write_zsreg(c, R10, c->regs[10] & ~ABUNDER); - } - } - - /* End of frame TX - fire another one - */ - - write_zsctrl(c, RES_Tx_P); - - z8530_tx_done(c); - write_zsctrl(c, RES_H_IUS); -} - -/** - * z8530_status - Handle a PIO status exception - * @chan: Z8530 channel to process - * - * A status event occurred in PIO synchronous mode. There are several - * reasons the chip will bother us here. A transmit underrun means we - * failed to feed the chip fast enough and just broke a packet. A DCD - * change is a line up or down. - */ - -static void z8530_status(struct z8530_channel *chan) -{ - u8 status, altered; - - status = read_zsreg(chan, R0); - altered = chan->status ^ status; - - chan->status = status; - - if (status & TxEOM) { -/* printk("%s: Tx underrun.\n", chan->dev->name); */ - chan->netdevice->stats.tx_fifo_errors++; - write_zsctrl(chan, ERR_RES); - z8530_tx_done(chan); - } - - if (altered & chan->dcdcheck) { - if (status & chan->dcdcheck) { - pr_info("%s: DCD raised\n", chan->dev->name); - write_zsreg(chan, R3, chan->regs[3] | RxENABLE); - if (chan->netdevice) - netif_carrier_on(chan->netdevice); - } else { - pr_info("%s: DCD lost\n", chan->dev->name); - write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE); - z8530_flush_fifo(chan); - if (chan->netdevice) - netif_carrier_off(chan->netdevice); - } - } - write_zsctrl(chan, RES_EXT_INT); - write_zsctrl(chan, RES_H_IUS); -} - -struct z8530_irqhandler z8530_sync = { - .rx = z8530_rx, - .tx = z8530_tx, - .status = z8530_status, -}; -EXPORT_SYMBOL(z8530_sync); - -/** - * z8530_dma_rx - Handle a DMA RX event - * @chan: Channel to handle - * - * Non bus mastering DMA interfaces for the Z8x30 devices. This - * is really pretty PC specific. The DMA mode means that most receive - * events are handled by the DMA hardware. We get a kick here only if - * a frame ended. - */ - -static void z8530_dma_rx(struct z8530_channel *chan) -{ - if (chan->rxdma_on) { - /* Special condition check only */ - u8 status; - - read_zsreg(chan, R7); - read_zsreg(chan, R6); - - status = read_zsreg(chan, R1); - - if (status & END_FR) - z8530_rx_done(chan); /* Fire up the next one */ - - write_zsctrl(chan, ERR_RES); - write_zsctrl(chan, RES_H_IUS); - } else { - /* DMA is off right now, drain the slow way */ - z8530_rx(chan); - } -} - -/** - * z8530_dma_tx - Handle a DMA TX event - * @chan: The Z8530 channel to handle - * - * We have received an interrupt while doing DMA transmissions. It - * shouldn't happen. Scream loudly if it does. - */ -static void z8530_dma_tx(struct z8530_channel *chan) -{ - if (!chan->dma_tx) { - pr_warn("Hey who turned the DMA off?\n"); - z8530_tx(chan); - return; - } - /* This shouldn't occur in DMA mode */ - pr_err("DMA tx - bogus event!\n"); - z8530_tx(chan); -} - -/** - * z8530_dma_status - Handle a DMA status exception - * @chan: Z8530 channel to process - * - * A status event occurred on the Z8530. We receive these for two reasons - * when in DMA mode. Firstly if we finished a packet transfer we get one - * and kick the next packet out. Secondly we may see a DCD change. - * - */ -static void z8530_dma_status(struct z8530_channel *chan) -{ - u8 status, altered; - - status = read_zsreg(chan, R0); - altered = chan->status ^ status; - - chan->status = status; - - if (chan->dma_tx) { - if (status & TxEOM) { - unsigned long flags; - - flags = claim_dma_lock(); - disable_dma(chan->txdma); - clear_dma_ff(chan->txdma); - chan->txdma_on = 0; - release_dma_lock(flags); - z8530_tx_done(chan); - } - } - - if (altered & chan->dcdcheck) { - if (status & chan->dcdcheck) { - pr_info("%s: DCD raised\n", chan->dev->name); - write_zsreg(chan, R3, chan->regs[3] | RxENABLE); - if (chan->netdevice) - netif_carrier_on(chan->netdevice); - } else { - pr_info("%s: DCD lost\n", chan->dev->name); - write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE); - z8530_flush_fifo(chan); - if (chan->netdevice) - netif_carrier_off(chan->netdevice); - } - } - - write_zsctrl(chan, RES_EXT_INT); - write_zsctrl(chan, RES_H_IUS); -} - -static struct z8530_irqhandler z8530_dma_sync = { - .rx = z8530_dma_rx, - .tx = z8530_dma_tx, - .status = z8530_dma_status, -}; - -static struct z8530_irqhandler z8530_txdma_sync = { - .rx = z8530_rx, - .tx = z8530_dma_tx, - .status = z8530_dma_status, -}; - -/** - * z8530_rx_clear - Handle RX events from a stopped chip - * @c: Z8530 channel to shut up - * - * Receive interrupt vectors for a Z8530 that is in 'parked' mode. - * For machines with PCI Z85x30 cards, or level triggered interrupts - * (eg the MacII) we must clear the interrupt cause or die. - */ - -static void z8530_rx_clear(struct z8530_channel *c) -{ - /* Data and status bytes - */ - u8 stat; - - read_zsdata(c); - stat = read_zsreg(c, R1); - - if (stat & END_FR) - write_zsctrl(c, RES_Rx_CRC); - /* Clear irq - */ - write_zsctrl(c, ERR_RES); - write_zsctrl(c, RES_H_IUS); -} - -/** - * z8530_tx_clear - Handle TX events from a stopped chip - * @c: Z8530 channel to shut up - * - * Transmit interrupt vectors for a Z8530 that is in 'parked' mode. - * For machines with PCI Z85x30 cards, or level triggered interrupts - * (eg the MacII) we must clear the interrupt cause or die. - */ - -static void z8530_tx_clear(struct z8530_channel *c) -{ - write_zsctrl(c, RES_Tx_P); - write_zsctrl(c, RES_H_IUS); -} - -/** - * z8530_status_clear - Handle status events from a stopped chip - * @chan: Z8530 channel to shut up - * - * Status interrupt vectors for a Z8530 that is in 'parked' mode. - * For machines with PCI Z85x30 cards, or level triggered interrupts - * (eg the MacII) we must clear the interrupt cause or die. - */ - -static void z8530_status_clear(struct z8530_channel *chan) -{ - u8 status = read_zsreg(chan, R0); - - if (status & TxEOM) - write_zsctrl(chan, ERR_RES); - write_zsctrl(chan, RES_EXT_INT); - write_zsctrl(chan, RES_H_IUS); -} - -struct z8530_irqhandler z8530_nop = { - .rx = z8530_rx_clear, - .tx = z8530_tx_clear, - .status = z8530_status_clear, -}; -EXPORT_SYMBOL(z8530_nop); - -/** - * z8530_interrupt - Handle an interrupt from a Z8530 - * @irq: Interrupt number - * @dev_id: The Z8530 device that is interrupting. - * - * A Z85[2]30 device has stuck its hand in the air for attention. - * We scan both the channels on the chip for events and then call - * the channel specific call backs for each channel that has events. - * We have to use callback functions because the two channels can be - * in different modes. - * - * Locking is done for the handlers. Note that locking is done - * at the chip level (the 5uS delay issue is per chip not per - * channel). c->lock for both channels points to dev->lock - */ - -irqreturn_t z8530_interrupt(int irq, void *dev_id) -{ - struct z8530_dev *dev = dev_id; - u8 intr; - static volatile int locker=0; - int work = 0; - struct z8530_irqhandler *irqs; - - if (locker) { - pr_err("IRQ re-enter\n"); - return IRQ_NONE; - } - locker = 1; - - spin_lock(&dev->lock); - - while (++work < 5000) { - intr = read_zsreg(&dev->chanA, R3); - if (!(intr & - (CHARxIP | CHATxIP | CHAEXT | CHBRxIP | CHBTxIP | CHBEXT))) - break; - - /* This holds the IRQ status. On the 8530 you must read it - * from chan A even though it applies to the whole chip - */ - - /* Now walk the chip and see what it is wanting - it may be - * an IRQ for someone else remember - */ - - irqs = dev->chanA.irqs; - - if (intr & (CHARxIP | CHATxIP | CHAEXT)) { - if (intr & CHARxIP) - irqs->rx(&dev->chanA); - if (intr & CHATxIP) - irqs->tx(&dev->chanA); - if (intr & CHAEXT) - irqs->status(&dev->chanA); - } - - irqs = dev->chanB.irqs; - - if (intr & (CHBRxIP | CHBTxIP | CHBEXT)) { - if (intr & CHBRxIP) - irqs->rx(&dev->chanB); - if (intr & CHBTxIP) - irqs->tx(&dev->chanB); - if (intr & CHBEXT) - irqs->status(&dev->chanB); - } - } - spin_unlock(&dev->lock); - if (work == 5000) - pr_err("%s: interrupt jammed - abort(0x%X)!\n", - dev->name, intr); - /* Ok all done */ - locker = 0; - return IRQ_HANDLED; -} -EXPORT_SYMBOL(z8530_interrupt); - -static const u8 reg_init[16] = { - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0x55, 0, 0, 0 -}; - -/** - * z8530_sync_open - Open a Z8530 channel for PIO - * @dev: The network interface we are using - * @c: The Z8530 channel to open in synchronous PIO mode - * - * Switch a Z8530 into synchronous mode without DMA assist. We - * raise the RTS/DTR and commence network operation. - */ -int z8530_sync_open(struct net_device *dev, struct z8530_channel *c) -{ - unsigned long flags; - - spin_lock_irqsave(c->lock, flags); - - c->sync = 1; - c->mtu = dev->mtu + 64; - c->count = 0; - c->skb = NULL; - c->skb2 = NULL; - c->irqs = &z8530_sync; - - /* This loads the double buffer up */ - z8530_rx_done(c); /* Load the frame ring */ - z8530_rx_done(c); /* Load the backup frame */ - z8530_rtsdtr(c, 1); - c->dma_tx = 0; - c->regs[R1] |= TxINT_ENAB; - write_zsreg(c, R1, c->regs[R1]); - write_zsreg(c, R3, c->regs[R3] | RxENABLE); - - spin_unlock_irqrestore(c->lock, flags); - return 0; -} -EXPORT_SYMBOL(z8530_sync_open); - -/** - * z8530_sync_close - Close a PIO Z8530 channel - * @dev: Network device to close - * @c: Z8530 channel to disassociate and move to idle - * - * Close down a Z8530 interface and switch its interrupt handlers - * to discard future events. - */ -int z8530_sync_close(struct net_device *dev, struct z8530_channel *c) -{ - u8 chk; - unsigned long flags; - - spin_lock_irqsave(c->lock, flags); - c->irqs = &z8530_nop; - c->max = 0; - c->sync = 0; - - chk = read_zsreg(c, R0); - write_zsreg(c, R3, c->regs[R3]); - z8530_rtsdtr(c, 0); - - spin_unlock_irqrestore(c->lock, flags); - return 0; -} -EXPORT_SYMBOL(z8530_sync_close); - -/** - * z8530_sync_dma_open - Open a Z8530 for DMA I/O - * @dev: The network device to attach - * @c: The Z8530 channel to configure in sync DMA mode. - * - * Set up a Z85x30 device for synchronous DMA in both directions. Two - * ISA DMA channels must be available for this to work. We assume ISA - * DMA driven I/O and PC limits on access. - */ -int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c) -{ - unsigned long cflags, dflags; - - c->sync = 1; - c->mtu = dev->mtu + 64; - c->count = 0; - c->skb = NULL; - c->skb2 = NULL; - - /* Load the DMA interfaces up - */ - c->rxdma_on = 0; - c->txdma_on = 0; - - /* Allocate the DMA flip buffers. Limit by page size. - * Everyone runs 1500 mtu or less on wan links so this - * should be fine. - */ - - if (c->mtu > PAGE_SIZE / 2) - return -EMSGSIZE; - - c->rx_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); - if (!c->rx_buf[0]) - return -ENOBUFS; - c->rx_buf[1] = c->rx_buf[0] + PAGE_SIZE / 2; - - c->tx_dma_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); - if (!c->tx_dma_buf[0]) { - free_page((unsigned long)c->rx_buf[0]); - c->rx_buf[0] = NULL; - return -ENOBUFS; - } - c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE / 2; - - c->tx_dma_used = 0; - c->dma_tx = 1; - c->dma_num = 0; - c->dma_ready = 1; - - /* Enable DMA control mode - */ - - spin_lock_irqsave(c->lock, cflags); - - /* TX DMA via DIR/REQ - */ - - c->regs[R14] |= DTRREQ; - write_zsreg(c, R14, c->regs[R14]); - - c->regs[R1] &= ~TxINT_ENAB; - write_zsreg(c, R1, c->regs[R1]); - - /* RX DMA via W/Req - */ - - c->regs[R1] |= WT_FN_RDYFN; - c->regs[R1] |= WT_RDY_RT; - c->regs[R1] |= INT_ERR_Rx; - c->regs[R1] &= ~TxINT_ENAB; - write_zsreg(c, R1, c->regs[R1]); - c->regs[R1] |= WT_RDY_ENAB; - write_zsreg(c, R1, c->regs[R1]); - - /* DMA interrupts - */ - - /* Set up the DMA configuration - */ - - dflags = claim_dma_lock(); - - disable_dma(c->rxdma); - clear_dma_ff(c->rxdma); - set_dma_mode(c->rxdma, DMA_MODE_READ | 0x10); - set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0])); - set_dma_count(c->rxdma, c->mtu); - enable_dma(c->rxdma); - - disable_dma(c->txdma); - clear_dma_ff(c->txdma); - set_dma_mode(c->txdma, DMA_MODE_WRITE); - disable_dma(c->txdma); - - release_dma_lock(dflags); - - /* Select the DMA interrupt handlers - */ - - c->rxdma_on = 1; - c->txdma_on = 1; - c->tx_dma_used = 1; - - c->irqs = &z8530_dma_sync; - z8530_rtsdtr(c, 1); - write_zsreg(c, R3, c->regs[R3] | RxENABLE); - - spin_unlock_irqrestore(c->lock, cflags); - - return 0; -} -EXPORT_SYMBOL(z8530_sync_dma_open); - -/** - * z8530_sync_dma_close - Close down DMA I/O - * @dev: Network device to detach - * @c: Z8530 channel to move into discard mode - * - * Shut down a DMA mode synchronous interface. Halt the DMA, and - * free the buffers. - */ -int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c) -{ - u8 chk; - unsigned long flags; - - c->irqs = &z8530_nop; - c->max = 0; - c->sync = 0; - - /* Disable the PC DMA channels - */ - - flags = claim_dma_lock(); - disable_dma(c->rxdma); - clear_dma_ff(c->rxdma); - - c->rxdma_on = 0; - - disable_dma(c->txdma); - clear_dma_ff(c->txdma); - release_dma_lock(flags); - - c->txdma_on = 0; - c->tx_dma_used = 0; - - spin_lock_irqsave(c->lock, flags); - - /* Disable DMA control mode - */ - - c->regs[R1] &= ~WT_RDY_ENAB; - write_zsreg(c, R1, c->regs[R1]); - c->regs[R1] &= ~(WT_RDY_RT | WT_FN_RDYFN | INT_ERR_Rx); - c->regs[R1] |= INT_ALL_Rx; - write_zsreg(c, R1, c->regs[R1]); - c->regs[R14] &= ~DTRREQ; - write_zsreg(c, R14, c->regs[R14]); - - if (c->rx_buf[0]) { - free_page((unsigned long)c->rx_buf[0]); - c->rx_buf[0] = NULL; - } - if (c->tx_dma_buf[0]) { - free_page((unsigned long)c->tx_dma_buf[0]); - c->tx_dma_buf[0] = NULL; - } - chk = read_zsreg(c, R0); - write_zsreg(c, R3, c->regs[R3]); - z8530_rtsdtr(c, 0); - - spin_unlock_irqrestore(c->lock, flags); - - return 0; -} -EXPORT_SYMBOL(z8530_sync_dma_close); - -/** - * z8530_sync_txdma_open - Open a Z8530 for TX driven DMA - * @dev: The network device to attach - * @c: The Z8530 channel to configure in sync DMA mode. - * - * Set up a Z85x30 device for synchronous DMA transmission. One - * ISA DMA channel must be available for this to work. The receive - * side is run in PIO mode, but then it has the bigger FIFO. - */ - -int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c) -{ - unsigned long cflags, dflags; - - printk("Opening sync interface for TX-DMA\n"); - c->sync = 1; - c->mtu = dev->mtu + 64; - c->count = 0; - c->skb = NULL; - c->skb2 = NULL; - - /* Allocate the DMA flip buffers. Limit by page size. - * Everyone runs 1500 mtu or less on wan links so this - * should be fine. - */ - - if (c->mtu > PAGE_SIZE / 2) - return -EMSGSIZE; - - c->tx_dma_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); - if (!c->tx_dma_buf[0]) - return -ENOBUFS; - - c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE / 2; - - spin_lock_irqsave(c->lock, cflags); - - /* Load the PIO receive ring - */ - - z8530_rx_done(c); - z8530_rx_done(c); - - /* Load the DMA interfaces up - */ - - c->rxdma_on = 0; - c->txdma_on = 0; - - c->tx_dma_used = 0; - c->dma_num = 0; - c->dma_ready = 1; - c->dma_tx = 1; - - /* Enable DMA control mode - */ - - /* TX DMA via DIR/REQ - */ - c->regs[R14] |= DTRREQ; - write_zsreg(c, R14, c->regs[R14]); - - c->regs[R1] &= ~TxINT_ENAB; - write_zsreg(c, R1, c->regs[R1]); - - /* Set up the DMA configuration - */ - - dflags = claim_dma_lock(); - - disable_dma(c->txdma); - clear_dma_ff(c->txdma); - set_dma_mode(c->txdma, DMA_MODE_WRITE); - disable_dma(c->txdma); - - release_dma_lock(dflags); - - /* Select the DMA interrupt handlers - */ - - c->rxdma_on = 0; - c->txdma_on = 1; - c->tx_dma_used = 1; - - c->irqs = &z8530_txdma_sync; - z8530_rtsdtr(c, 1); - write_zsreg(c, R3, c->regs[R3] | RxENABLE); - spin_unlock_irqrestore(c->lock, cflags); - - return 0; -} -EXPORT_SYMBOL(z8530_sync_txdma_open); - -/** - * z8530_sync_txdma_close - Close down a TX driven DMA channel - * @dev: Network device to detach - * @c: Z8530 channel to move into discard mode - * - * Shut down a DMA/PIO split mode synchronous interface. Halt the DMA, - * and free the buffers. - */ - -int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c) -{ - unsigned long dflags, cflags; - u8 chk; - - spin_lock_irqsave(c->lock, cflags); - - c->irqs = &z8530_nop; - c->max = 0; - c->sync = 0; - - /* Disable the PC DMA channels - */ - - dflags = claim_dma_lock(); - - disable_dma(c->txdma); - clear_dma_ff(c->txdma); - c->txdma_on = 0; - c->tx_dma_used = 0; - - release_dma_lock(dflags); - - /* Disable DMA control mode - */ - - c->regs[R1] &= ~WT_RDY_ENAB; - write_zsreg(c, R1, c->regs[R1]); - c->regs[R1] &= ~(WT_RDY_RT | WT_FN_RDYFN | INT_ERR_Rx); - c->regs[R1] |= INT_ALL_Rx; - write_zsreg(c, R1, c->regs[R1]); - c->regs[R14] &= ~DTRREQ; - write_zsreg(c, R14, c->regs[R14]); - - if (c->tx_dma_buf[0]) { - free_page((unsigned long)c->tx_dma_buf[0]); - c->tx_dma_buf[0] = NULL; - } - chk = read_zsreg(c, R0); - write_zsreg(c, R3, c->regs[R3]); - z8530_rtsdtr(c, 0); - - spin_unlock_irqrestore(c->lock, cflags); - return 0; -} -EXPORT_SYMBOL(z8530_sync_txdma_close); - -/* Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny - * it exists... - */ -static const char * const z8530_type_name[] = { - "Z8530", - "Z85C30", - "Z85230" -}; - -/** - * z8530_describe - Uniformly describe a Z8530 port - * @dev: Z8530 device to describe - * @mapping: string holding mapping type (eg "I/O" or "Mem") - * @io: the port value in question - * - * Describe a Z8530 in a standard format. We must pass the I/O as - * the port offset isn't predictable. The main reason for this function - * is to try and get a common format of report. - */ - -void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io) -{ - pr_info("%s: %s found at %s 0x%lX, IRQ %d\n", - dev->name, - z8530_type_name[dev->type], - mapping, - Z8530_PORT_OF(io), - dev->irq); -} -EXPORT_SYMBOL(z8530_describe); - -/* Locked operation part of the z8530 init code - */ -static inline int do_z8530_init(struct z8530_dev *dev) -{ - /* NOP the interrupt handlers first - we might get a - * floating IRQ transition when we reset the chip - */ - dev->chanA.irqs = &z8530_nop; - dev->chanB.irqs = &z8530_nop; - dev->chanA.dcdcheck = DCD; - dev->chanB.dcdcheck = DCD; - - /* Reset the chip */ - write_zsreg(&dev->chanA, R9, 0xC0); - udelay(200); - /* Now check its valid */ - write_zsreg(&dev->chanA, R12, 0xAA); - if (read_zsreg(&dev->chanA, R12) != 0xAA) - return -ENODEV; - write_zsreg(&dev->chanA, R12, 0x55); - if (read_zsreg(&dev->chanA, R12) != 0x55) - return -ENODEV; - - dev->type = Z8530; - - /* See the application note. - */ - - write_zsreg(&dev->chanA, R15, 0x01); - - /* If we can set the low bit of R15 then - * the chip is enhanced. - */ - - if (read_zsreg(&dev->chanA, R15) == 0x01) { - /* This C30 versus 230 detect is from Klaus Kudielka's dmascc */ - /* Put a char in the fifo */ - write_zsreg(&dev->chanA, R8, 0); - if (read_zsreg(&dev->chanA, R0) & Tx_BUF_EMP) - dev->type = Z85230; /* Has a FIFO */ - else - dev->type = Z85C30; /* Z85C30, 1 byte FIFO */ - } - - /* The code assumes R7' and friends are - * off. Use write_zsext() for these and keep - * this bit clear. - */ - - write_zsreg(&dev->chanA, R15, 0); - - /* At this point it looks like the chip is behaving - */ - - memcpy(dev->chanA.regs, reg_init, 16); - memcpy(dev->chanB.regs, reg_init, 16); - - return 0; -} - -/** - * z8530_init - Initialise a Z8530 device - * @dev: Z8530 device to initialise. - * - * Configure up a Z8530/Z85C30 or Z85230 chip. We check the device - * is present, identify the type and then program it to hopefully - * keep quite and behave. This matters a lot, a Z8530 in the wrong - * state will sometimes get into stupid modes generating 10Khz - * interrupt streams and the like. - * - * We set the interrupt handler up to discard any events, in case - * we get them during reset or setp. - * - * Return 0 for success, or a negative value indicating the problem - * in errno form. - */ - -int z8530_init(struct z8530_dev *dev) -{ - unsigned long flags; - int ret; - - /* Set up the chip level lock */ - spin_lock_init(&dev->lock); - dev->chanA.lock = &dev->lock; - dev->chanB.lock = &dev->lock; - - spin_lock_irqsave(&dev->lock, flags); - ret = do_z8530_init(dev); - spin_unlock_irqrestore(&dev->lock, flags); - - return ret; -} -EXPORT_SYMBOL(z8530_init); - -/** - * z8530_shutdown - Shutdown a Z8530 device - * @dev: The Z8530 chip to shutdown - * - * We set the interrupt handlers to silence any interrupts. We then - * reset the chip and wait 100uS to be sure the reset completed. Just - * in case the caller then tries to do stuff. - * - * This is called without the lock held - */ -int z8530_shutdown(struct z8530_dev *dev) -{ - unsigned long flags; - /* Reset the chip */ - - spin_lock_irqsave(&dev->lock, flags); - dev->chanA.irqs = &z8530_nop; - dev->chanB.irqs = &z8530_nop; - write_zsreg(&dev->chanA, R9, 0xC0); - /* We must lock the udelay, the chip is offlimits here */ - udelay(100); - spin_unlock_irqrestore(&dev->lock, flags); - return 0; -} -EXPORT_SYMBOL(z8530_shutdown); - -/** - * z8530_channel_load - Load channel data - * @c: Z8530 channel to configure - * @rtable: table of register, value pairs - * FIXME: ioctl to allow user uploaded tables - * - * Load a Z8530 channel up from the system data. We use +16 to - * indicate the "prime" registers. The value 255 terminates the - * table. - */ - -int z8530_channel_load(struct z8530_channel *c, u8 *rtable) -{ - unsigned long flags; - - spin_lock_irqsave(c->lock, flags); - - while (*rtable != 255) { - int reg = *rtable++; - - if (reg > 0x0F) - write_zsreg(c, R15, c->regs[15] | 1); - write_zsreg(c, reg & 0x0F, *rtable); - if (reg > 0x0F) - write_zsreg(c, R15, c->regs[15] & ~1); - c->regs[reg] = *rtable++; - } - c->rx_function = z8530_null_rx; - c->skb = NULL; - c->tx_skb = NULL; - c->tx_next_skb = NULL; - c->mtu = 1500; - c->max = 0; - c->count = 0; - c->status = read_zsreg(c, R0); - c->sync = 1; - write_zsreg(c, R3, c->regs[R3] | RxENABLE); - - spin_unlock_irqrestore(c->lock, flags); - return 0; -} -EXPORT_SYMBOL(z8530_channel_load); - -/** - * z8530_tx_begin - Begin packet transmission - * @c: The Z8530 channel to kick - * - * This is the speed sensitive side of transmission. If we are called - * and no buffer is being transmitted we commence the next buffer. If - * nothing is queued we idle the sync. - * - * Note: We are handling this code path in the interrupt path, keep it - * fast or bad things will happen. - * - * Called with the lock held. - */ - -static void z8530_tx_begin(struct z8530_channel *c) -{ - unsigned long flags; - - if (c->tx_skb) - return; - - c->tx_skb = c->tx_next_skb; - c->tx_next_skb = NULL; - c->tx_ptr = c->tx_next_ptr; - - if (!c->tx_skb) { - /* Idle on */ - if (c->dma_tx) { - flags = claim_dma_lock(); - disable_dma(c->txdma); - /* Check if we crapped out. - */ - if (get_dma_residue(c->txdma)) { - c->netdevice->stats.tx_dropped++; - c->netdevice->stats.tx_fifo_errors++; - } - release_dma_lock(flags); - } - c->txcount = 0; - } else { - c->txcount = c->tx_skb->len; - - if (c->dma_tx) { - /* FIXME. DMA is broken for the original 8530, - * on the older parts we need to set a flag and - * wait for a further TX interrupt to fire this - * stage off - */ - - flags = claim_dma_lock(); - disable_dma(c->txdma); - - /* These two are needed by the 8530/85C30 - * and must be issued when idling. - */ - if (c->dev->type != Z85230) { - write_zsctrl(c, RES_Tx_CRC); - write_zsctrl(c, RES_EOM_L); - } - write_zsreg(c, R10, c->regs[10] & ~ABUNDER); - clear_dma_ff(c->txdma); - set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr)); - set_dma_count(c->txdma, c->txcount); - enable_dma(c->txdma); - release_dma_lock(flags); - write_zsctrl(c, RES_EOM_L); - write_zsreg(c, R5, c->regs[R5] | TxENAB); - } else { - /* ABUNDER off */ - write_zsreg(c, R10, c->regs[10]); - write_zsctrl(c, RES_Tx_CRC); - - while (c->txcount && (read_zsreg(c, R0) & Tx_BUF_EMP)) { - write_zsreg(c, R8, *c->tx_ptr++); - c->txcount--; - } - } - } - /* Since we emptied tx_skb we can ask for more - */ - netif_wake_queue(c->netdevice); -} - -/** - * z8530_tx_done - TX complete callback - * @c: The channel that completed a transmit. - * - * This is called when we complete a packet send. We wake the queue, - * start the next packet going and then free the buffer of the existing - * packet. This code is fairly timing sensitive. - * - * Called with the register lock held. - */ - -static void z8530_tx_done(struct z8530_channel *c) -{ - struct sk_buff *skb; - - /* Actually this can happen.*/ - if (!c->tx_skb) - return; - - skb = c->tx_skb; - c->tx_skb = NULL; - z8530_tx_begin(c); - c->netdevice->stats.tx_packets++; - c->netdevice->stats.tx_bytes += skb->len; - dev_consume_skb_irq(skb); -} - -/** - * z8530_null_rx - Discard a packet - * @c: The channel the packet arrived on - * @skb: The buffer - * - * We point the receive handler at this function when idle. Instead - * of processing the frames we get to throw them away. - */ -void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb) -{ - dev_kfree_skb_any(skb); -} -EXPORT_SYMBOL(z8530_null_rx); - -/** - * z8530_rx_done - Receive completion callback - * @c: The channel that completed a receive - * - * A new packet is complete. Our goal here is to get back into receive - * mode as fast as possible. On the Z85230 we could change to using - * ESCC mode, but on the older chips we have no choice. We flip to the - * new buffer immediately in DMA mode so that the DMA of the next - * frame can occur while we are copying the previous buffer to an sk_buff - * - * Called with the lock held - */ -static void z8530_rx_done(struct z8530_channel *c) -{ - struct sk_buff *skb; - int ct; - - /* Is our receive engine in DMA mode - */ - if (c->rxdma_on) { - /* Save the ready state and the buffer currently - * being used as the DMA target - */ - int ready = c->dma_ready; - unsigned char *rxb = c->rx_buf[c->dma_num]; - unsigned long flags; - - /* Complete this DMA. Necessary to find the length - */ - flags = claim_dma_lock(); - - disable_dma(c->rxdma); - clear_dma_ff(c->rxdma); - c->rxdma_on = 0; - ct = c->mtu - get_dma_residue(c->rxdma); - if (ct < 0) - ct = 2; /* Shit happens.. */ - c->dma_ready = 0; - - /* Normal case: the other slot is free, start the next DMA - * into it immediately. - */ - - if (ready) { - c->dma_num ^= 1; - set_dma_mode(c->rxdma, DMA_MODE_READ | 0x10); - set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num])); - set_dma_count(c->rxdma, c->mtu); - c->rxdma_on = 1; - enable_dma(c->rxdma); - /* Stop any frames that we missed the head of - * from passing - */ - write_zsreg(c, R0, RES_Rx_CRC); - } else { - /* Can't occur as we dont reenable the DMA irq until - * after the flip is done - */ - netdev_warn(c->netdevice, "DMA flip overrun!\n"); - } - - release_dma_lock(flags); - - /* Shove the old buffer into an sk_buff. We can't DMA - * directly into one on a PC - it might be above the 16Mb - * boundary. Optimisation - we could check to see if we - * can avoid the copy. Optimisation 2 - make the memcpy - * a copychecksum. - */ - - skb = dev_alloc_skb(ct); - if (!skb) { - c->netdevice->stats.rx_dropped++; - netdev_warn(c->netdevice, "Memory squeeze\n"); - } else { - skb_put(skb, ct); - skb_copy_to_linear_data(skb, rxb, ct); - c->netdevice->stats.rx_packets++; - c->netdevice->stats.rx_bytes += ct; - } - c->dma_ready = 1; - } else { - RT_LOCK; - skb = c->skb; - - /* The game we play for non DMA is similar. We want to - * get the controller set up for the next packet as fast - * as possible. We potentially only have one byte + the - * fifo length for this. Thus we want to flip to the new - * buffer and then mess around copying and allocating - * things. For the current case it doesn't matter but - * if you build a system where the sync irq isn't blocked - * by the kernel IRQ disable then you need only block the - * sync IRQ for the RT_LOCK area. - * - */ - ct = c->count; - - c->skb = c->skb2; - c->count = 0; - c->max = c->mtu; - if (c->skb) { - c->dptr = c->skb->data; - c->max = c->mtu; - } else { - c->count = 0; - c->max = 0; - } - RT_UNLOCK; - - c->skb2 = dev_alloc_skb(c->mtu); - if (c->skb2) - skb_put(c->skb2, c->mtu); - - c->netdevice->stats.rx_packets++; - c->netdevice->stats.rx_bytes += ct; - } - /* If we received a frame we must now process it. - */ - if (skb) { - skb_trim(skb, ct); - c->rx_function(c, skb); - } else { - c->netdevice->stats.rx_dropped++; - netdev_err(c->netdevice, "Lost a frame\n"); - } -} - -/** - * spans_boundary - Check a packet can be ISA DMA'd - * @skb: The buffer to check - * - * Returns true if the buffer cross a DMA boundary on a PC. The poor - * thing can only DMA within a 64K block not across the edges of it. - */ - -static inline int spans_boundary(struct sk_buff *skb) -{ - unsigned long a = (unsigned long)skb->data; - - a ^= (a + skb->len); - if (a & 0x00010000) /* If the 64K bit is different.. */ - return 1; - return 0; -} - -/** - * z8530_queue_xmit - Queue a packet - * @c: The channel to use - * @skb: The packet to kick down the channel - * - * Queue a packet for transmission. Because we have rather - * hard to hit interrupt latencies for the Z85230 per packet - * even in DMA mode we do the flip to DMA buffer if needed here - * not in the IRQ. - * - * Called from the network code. The lock is not held at this - * point. - */ -netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb) -{ - unsigned long flags; - - netif_stop_queue(c->netdevice); - if (c->tx_next_skb) - return NETDEV_TX_BUSY; - - /* PC SPECIFIC - DMA limits */ - /* If we will DMA the transmit and its gone over the ISA bus - * limit, then copy to the flip buffer - */ - - if (c->dma_tx && - ((unsigned long)(virt_to_bus(skb->data + skb->len)) >= - 16 * 1024 * 1024 || spans_boundary(skb))) { - /* Send the flip buffer, and flip the flippy bit. - * We don't care which is used when just so long as - * we never use the same buffer twice in a row. Since - * only one buffer can be going out at a time the other - * has to be safe. - */ - c->tx_next_ptr = c->tx_dma_buf[c->tx_dma_used]; - c->tx_dma_used ^= 1; /* Flip temp buffer */ - skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len); - } else { - c->tx_next_ptr = skb->data; - } - RT_LOCK; - c->tx_next_skb = skb; - RT_UNLOCK; - - spin_lock_irqsave(c->lock, flags); - z8530_tx_begin(c); - spin_unlock_irqrestore(c->lock, flags); - - return NETDEV_TX_OK; -} -EXPORT_SYMBOL(z8530_queue_xmit); - -/* Module support - */ -static const char banner[] __initconst = - KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n"; - -static int __init z85230_init_driver(void) -{ - printk(banner); - return 0; -} -module_init(z85230_init_driver); - -static void __exit z85230_cleanup_driver(void) -{ -} -module_exit(z85230_cleanup_driver); - -MODULE_AUTHOR("Red Hat Inc."); -MODULE_DESCRIPTION("Z85x30 synchronous driver core"); -MODULE_LICENSE("GPL"); diff --git a/drivers/net/wan/z85230.h b/drivers/net/wan/z85230.h deleted file mode 100644 index 462cb620bc5d..000000000000 --- a/drivers/net/wan/z85230.h +++ /dev/null @@ -1,407 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Description of Z8530 Z85C30 and Z85230 communications chips - * - * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) - * Copyright (C) 1998 Alan Cox <alan@lxorguk.ukuu.org.uk> - */ - -#ifndef _Z8530_H -#define _Z8530_H - -#include <linux/tty.h> -#include <linux/interrupt.h> - -/* Conversion routines to/from brg time constants from/to bits - * per second. - */ -#define BRG_TO_BPS(brg, freq) ((freq) / 2 / ((brg) + 2)) -#define BPS_TO_BRG(bps, freq) ((((freq) + (bps)) / (2 * (bps))) - 2) - -/* The Zilog register set */ - -#define FLAG 0x7e - -/* Write Register 0 */ -#define R0 0 /* Register selects */ -#define R1 1 -#define R2 2 -#define R3 3 -#define R4 4 -#define R5 5 -#define R6 6 -#define R7 7 -#define R8 8 -#define R9 9 -#define R10 10 -#define R11 11 -#define R12 12 -#define R13 13 -#define R14 14 -#define R15 15 - -#define RPRIME 16 /* Indicate a prime register access on 230 */ - -#define NULLCODE 0 /* Null Code */ -#define POINT_HIGH 0x8 /* Select upper half of registers */ -#define RES_EXT_INT 0x10 /* Reset Ext. Status Interrupts */ -#define SEND_ABORT 0x18 /* HDLC Abort */ -#define RES_RxINT_FC 0x20 /* Reset RxINT on First Character */ -#define RES_Tx_P 0x28 /* Reset TxINT Pending */ -#define ERR_RES 0x30 /* Error Reset */ -#define RES_H_IUS 0x38 /* Reset highest IUS */ - -#define RES_Rx_CRC 0x40 /* Reset Rx CRC Checker */ -#define RES_Tx_CRC 0x80 /* Reset Tx CRC Checker */ -#define RES_EOM_L 0xC0 /* Reset EOM latch */ - -/* Write Register 1 */ - -#define EXT_INT_ENAB 0x1 /* Ext Int Enable */ -#define TxINT_ENAB 0x2 /* Tx Int Enable */ -#define PAR_SPEC 0x4 /* Parity is special condition */ - -#define RxINT_DISAB 0 /* Rx Int Disable */ -#define RxINT_FCERR 0x8 /* Rx Int on First Character Only or Error */ -#define INT_ALL_Rx 0x10 /* Int on all Rx Characters or error */ -#define INT_ERR_Rx 0x18 /* Int on error only */ - -#define WT_RDY_RT 0x20 /* Wait/Ready on R/T */ -#define WT_FN_RDYFN 0x40 /* Wait/FN/Ready FN */ -#define WT_RDY_ENAB 0x80 /* Wait/Ready Enable */ - -/* Write Register #2 (Interrupt Vector) */ - -/* Write Register 3 */ - -#define RxENABLE 0x1 /* Rx Enable */ -#define SYNC_L_INH 0x2 /* Sync Character Load Inhibit */ -#define ADD_SM 0x4 /* Address Search Mode (SDLC) */ -#define RxCRC_ENAB 0x8 /* Rx CRC Enable */ -#define ENT_HM 0x10 /* Enter Hunt Mode */ -#define AUTO_ENAB 0x20 /* Auto Enables */ -#define Rx5 0x0 /* Rx 5 Bits/Character */ -#define Rx7 0x40 /* Rx 7 Bits/Character */ -#define Rx6 0x80 /* Rx 6 Bits/Character */ -#define Rx8 0xc0 /* Rx 8 Bits/Character */ - -/* Write Register 4 */ - -#define PAR_ENA 0x1 /* Parity Enable */ -#define PAR_EVEN 0x2 /* Parity Even/Odd* */ - -#define SYNC_ENAB 0 /* Sync Modes Enable */ -#define SB1 0x4 /* 1 stop bit/char */ -#define SB15 0x8 /* 1.5 stop bits/char */ -#define SB2 0xc /* 2 stop bits/char */ - -#define MONSYNC 0 /* 8 Bit Sync character */ -#define BISYNC 0x10 /* 16 bit sync character */ -#define SDLC 0x20 /* SDLC Mode (01111110 Sync Flag) */ -#define EXTSYNC 0x30 /* External Sync Mode */ - -#define X1CLK 0x0 /* x1 clock mode */ -#define X16CLK 0x40 /* x16 clock mode */ -#define X32CLK 0x80 /* x32 clock mode */ -#define X64CLK 0xC0 /* x64 clock mode */ - -/* Write Register 5 */ - -#define TxCRC_ENAB 0x1 /* Tx CRC Enable */ -#define RTS 0x2 /* RTS */ -#define SDLC_CRC 0x4 /* SDLC/CRC-16 */ -#define TxENAB 0x8 /* Tx Enable */ -#define SND_BRK 0x10 /* Send Break */ -#define Tx5 0x0 /* Tx 5 bits (or less)/character */ -#define Tx7 0x20 /* Tx 7 bits/character */ -#define Tx6 0x40 /* Tx 6 bits/character */ -#define Tx8 0x60 /* Tx 8 bits/character */ -#define DTR 0x80 /* DTR */ - -/* Write Register 6 (Sync bits 0-7/SDLC Address Field) */ - -/* Write Register 7 (Sync bits 8-15/SDLC 01111110) */ - -/* Write Register 8 (transmit buffer) */ - -/* Write Register 9 (Master interrupt control) */ -#define VIS 1 /* Vector Includes Status */ -#define NV 2 /* No Vector */ -#define DLC 4 /* Disable Lower Chain */ -#define MIE 8 /* Master Interrupt Enable */ -#define STATHI 0x10 /* Status high */ -#define NORESET 0 /* No reset on write to R9 */ -#define CHRB 0x40 /* Reset channel B */ -#define CHRA 0x80 /* Reset channel A */ -#define FHWRES 0xc0 /* Force hardware reset */ - -/* Write Register 10 (misc control bits) */ -#define BIT6 1 /* 6 bit/8bit sync */ -#define LOOPMODE 2 /* SDLC Loop mode */ -#define ABUNDER 4 /* Abort/flag on SDLC xmit underrun */ -#define MARKIDLE 8 /* Mark/flag on idle */ -#define GAOP 0x10 /* Go active on poll */ -#define NRZ 0 /* NRZ mode */ -#define NRZI 0x20 /* NRZI mode */ -#define FM1 0x40 /* FM1 (transition = 1) */ -#define FM0 0x60 /* FM0 (transition = 0) */ -#define CRCPS 0x80 /* CRC Preset I/O */ - -/* Write Register 11 (Clock Mode control) */ -#define TRxCXT 0 /* TRxC = Xtal output */ -#define TRxCTC 1 /* TRxC = Transmit clock */ -#define TRxCBR 2 /* TRxC = BR Generator Output */ -#define TRxCDP 3 /* TRxC = DPLL output */ -#define TRxCOI 4 /* TRxC O/I */ -#define TCRTxCP 0 /* Transmit clock = RTxC pin */ -#define TCTRxCP 8 /* Transmit clock = TRxC pin */ -#define TCBR 0x10 /* Transmit clock = BR Generator output */ -#define TCDPLL 0x18 /* Transmit clock = DPLL output */ -#define RCRTxCP 0 /* Receive clock = RTxC pin */ -#define RCTRxCP 0x20 /* Receive clock = TRxC pin */ -#define RCBR 0x40 /* Receive clock = BR Generator output */ -#define RCDPLL 0x60 /* Receive clock = DPLL output */ -#define RTxCX 0x80 /* RTxC Xtal/No Xtal */ - -/* Write Register 12 (lower byte of baud rate generator time constant) */ - -/* Write Register 13 (upper byte of baud rate generator time constant) */ - -/* Write Register 14 (Misc control bits) */ -#define BRENABL 1 /* Baud rate generator enable */ -#define BRSRC 2 /* Baud rate generator source */ -#define DTRREQ 4 /* DTR/Request function */ -#define AUTOECHO 8 /* Auto Echo */ -#define LOOPBAK 0x10 /* Local loopback */ -#define SEARCH 0x20 /* Enter search mode */ -#define RMC 0x40 /* Reset missing clock */ -#define DISDPLL 0x60 /* Disable DPLL */ -#define SSBR 0x80 /* Set DPLL source = BR generator */ -#define SSRTxC 0xa0 /* Set DPLL source = RTxC */ -#define SFMM 0xc0 /* Set FM mode */ -#define SNRZI 0xe0 /* Set NRZI mode */ - -/* Write Register 15 (external/status interrupt control) */ -#define PRIME 1 /* R5' etc register access (Z85C30/230 only) */ -#define ZCIE 2 /* Zero count IE */ -#define FIFOE 4 /* Z85230 only */ -#define DCDIE 8 /* DCD IE */ -#define SYNCIE 0x10 /* Sync/hunt IE */ -#define CTSIE 0x20 /* CTS IE */ -#define TxUIE 0x40 /* Tx Underrun/EOM IE */ -#define BRKIE 0x80 /* Break/Abort IE */ - - -/* Read Register 0 */ -#define Rx_CH_AV 0x1 /* Rx Character Available */ -#define ZCOUNT 0x2 /* Zero count */ -#define Tx_BUF_EMP 0x4 /* Tx Buffer empty */ -#define DCD 0x8 /* DCD */ -#define SYNC_HUNT 0x10 /* Sync/hunt */ -#define CTS 0x20 /* CTS */ -#define TxEOM 0x40 /* Tx underrun */ -#define BRK_ABRT 0x80 /* Break/Abort */ - -/* Read Register 1 */ -#define ALL_SNT 0x1 /* All sent */ -/* Residue Data for 8 Rx bits/char programmed */ -#define RES3 0x8 /* 0/3 */ -#define RES4 0x4 /* 0/4 */ -#define RES5 0xc /* 0/5 */ -#define RES6 0x2 /* 0/6 */ -#define RES7 0xa /* 0/7 */ -#define RES8 0x6 /* 0/8 */ -#define RES18 0xe /* 1/8 */ -#define RES28 0x0 /* 2/8 */ -/* Special Rx Condition Interrupts */ -#define PAR_ERR 0x10 /* Parity error */ -#define Rx_OVR 0x20 /* Rx Overrun Error */ -#define CRC_ERR 0x40 /* CRC/Framing Error */ -#define END_FR 0x80 /* End of Frame (SDLC) */ - -/* Read Register 2 (channel b only) - Interrupt vector */ - -/* Read Register 3 (interrupt pending register) ch a only */ -#define CHBEXT 0x1 /* Channel B Ext/Stat IP */ -#define CHBTxIP 0x2 /* Channel B Tx IP */ -#define CHBRxIP 0x4 /* Channel B Rx IP */ -#define CHAEXT 0x8 /* Channel A Ext/Stat IP */ -#define CHATxIP 0x10 /* Channel A Tx IP */ -#define CHARxIP 0x20 /* Channel A Rx IP */ - -/* Read Register 8 (receive data register) */ - -/* Read Register 10 (misc status bits) */ -#define ONLOOP 2 /* On loop */ -#define LOOPSEND 0x10 /* Loop sending */ -#define CLK2MIS 0x40 /* Two clocks missing */ -#define CLK1MIS 0x80 /* One clock missing */ - -/* Read Register 12 (lower byte of baud rate generator constant) */ - -/* Read Register 13 (upper byte of baud rate generator constant) */ - -/* Read Register 15 (value of WR 15) */ - - -/* - * Interrupt handling functions for this SCC - */ - -struct z8530_channel; - -struct z8530_irqhandler -{ - void (*rx)(struct z8530_channel *); - void (*tx)(struct z8530_channel *); - void (*status)(struct z8530_channel *); -}; - -/* - * A channel of the Z8530 - */ - -struct z8530_channel -{ - struct z8530_irqhandler *irqs; /* IRQ handlers */ - /* - * Synchronous - */ - u16 count; /* Buyes received */ - u16 max; /* Most we can receive this frame */ - u16 mtu; /* MTU of the device */ - u8 *dptr; /* Pointer into rx buffer */ - struct sk_buff *skb; /* Buffer dptr points into */ - struct sk_buff *skb2; /* Pending buffer */ - u8 status; /* Current DCD */ - u8 dcdcheck; /* which bit to check for line */ - u8 sync; /* Set if in sync mode */ - - u8 regs[32]; /* Register map for the chip */ - u8 pendregs[32]; /* Pending register values */ - - struct sk_buff *tx_skb; /* Buffer being transmitted */ - struct sk_buff *tx_next_skb; /* Next transmit buffer */ - u8 *tx_ptr; /* Byte pointer into the buffer */ - u8 *tx_next_ptr; /* Next pointer to use */ - u8 *tx_dma_buf[2]; /* TX flip buffers for DMA */ - u8 tx_dma_used; /* Flip buffer usage toggler */ - u16 txcount; /* Count of bytes to transmit */ - - void (*rx_function)(struct z8530_channel *, struct sk_buff *); - - /* - * Sync DMA - */ - - u8 rxdma; /* DMA channels */ - u8 txdma; - u8 rxdma_on; /* DMA active if flag set */ - u8 txdma_on; - u8 dma_num; /* Buffer we are DMAing into */ - u8 dma_ready; /* Is the other buffer free */ - u8 dma_tx; /* TX is to use DMA */ - u8 *rx_buf[2]; /* The flip buffers */ - - /* - * System - */ - - struct z8530_dev *dev; /* Z85230 chip instance we are from */ - unsigned long ctrlio; /* I/O ports */ - unsigned long dataio; - - /* - * For PC we encode this way. - */ -#define Z8530_PORT_SLEEP 0x80000000 -#define Z8530_PORT_OF(x) ((x)&0xFFFF) - - u32 rx_overrun; /* Overruns - not done yet */ - u32 rx_crc_err; - - /* - * Bound device pointers - */ - - void *private; /* For our owner */ - struct net_device *netdevice; /* Network layer device */ - - spinlock_t *lock; /* Device lock */ -}; - -/* - * Each Z853x0 device. - */ - -struct z8530_dev -{ - char *name; /* Device instance name */ - struct z8530_channel chanA; /* SCC channel A */ - struct z8530_channel chanB; /* SCC channel B */ - int type; -#define Z8530 0 /* NMOS dinosaur */ -#define Z85C30 1 /* CMOS - better */ -#define Z85230 2 /* CMOS with real FIFO */ - int irq; /* Interrupt for the device */ - int active; /* Soft interrupt enable - the Mac doesn't - always have a hard disable on its 8530s... */ - spinlock_t lock; -}; - - -/* - * Functions - */ - -extern u8 z8530_dead_port[]; -extern u8 z8530_hdlc_kilostream_85230[]; -extern u8 z8530_hdlc_kilostream[]; -irqreturn_t z8530_interrupt(int, void *); -void z8530_describe(struct z8530_dev *, char *mapping, unsigned long io); -int z8530_init(struct z8530_dev *); -int z8530_shutdown(struct z8530_dev *); -int z8530_sync_open(struct net_device *, struct z8530_channel *); -int z8530_sync_close(struct net_device *, struct z8530_channel *); -int z8530_sync_dma_open(struct net_device *, struct z8530_channel *); -int z8530_sync_dma_close(struct net_device *, struct z8530_channel *); -int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *); -int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *); -int z8530_channel_load(struct z8530_channel *, u8 *); -netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb); -void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb); - - -/* - * Standard interrupt vector sets - */ - -extern struct z8530_irqhandler z8530_sync, z8530_async, z8530_nop; - -/* - * Asynchronous Interfacing - */ - -/* - * The size of the serial xmit buffer is 1 page, or 4096 bytes - */ - -#define SERIAL_XMIT_SIZE 4096 -#define WAKEUP_CHARS 256 - -/* - * Events are used to schedule things to happen at timer-interrupt - * time, instead of at rs interrupt time. - */ -#define RS_EVENT_WRITE_WAKEUP 0 - -/* Internal flags used only by kernel/chr_drv/serial.c */ -#define ZILOG_INITIALIZED 0x80000000 /* Serial port was initialized */ -#define ZILOG_CALLOUT_ACTIVE 0x40000000 /* Call out device is active */ -#define ZILOG_NORMAL_ACTIVE 0x20000000 /* Normal device is active */ -#define ZILOG_BOOT_AUTOCONF 0x10000000 /* Autoconfigure port on bootup */ -#define ZILOG_CLOSING 0x08000000 /* Serial port is closing */ -#define ZILOG_CTS_FLOW 0x04000000 /* Do CTS flow control */ -#define ZILOG_CHECK_CD 0x02000000 /* i.e., CLOCAL */ - -#endif /* !(_Z8530_H) */ diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c index 0fad1331303c..aa9a7a5970fd 100644 --- a/drivers/net/wireguard/device.c +++ b/drivers/net/wireguard/device.c @@ -19,6 +19,7 @@ #include <linux/if_arp.h> #include <linux/icmp.h> #include <linux/suspend.h> +#include <net/dst_metadata.h> #include <net/icmp.h> #include <net/rtnetlink.h> #include <net/ip_tunnels.h> @@ -167,7 +168,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev) goto err_peer; } - mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; + mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; __skb_queue_head_init(&packets); if (!skb_is_gso(skb)) { diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index 00ddeee123c2..9f84a6fde0c2 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -1500,7 +1500,7 @@ static int ar5523_load_firmware(struct usb_device *dev) return -ENOENT; } - txblock = kmalloc(sizeof(*txblock), GFP_KERNEL); + txblock = kzalloc(sizeof(*txblock), GFP_KERNEL); if (!txblock) goto out; @@ -1512,7 +1512,6 @@ static int ar5523_load_firmware(struct usb_device *dev) if (!fwbuf) goto out_free_rxblock; - memset(txblock, 0, sizeof(struct ar5523_fwblock)); txblock->flags = cpu_to_be32(AR5523_WRITE_BLOCK); txblock->total = cpu_to_be32(fw->size); diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index 9162b02b7211..24283c02a5ef 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -1633,7 +1633,7 @@ static void ath10k_sdio_hif_power_down(struct ath10k *ar) return; } - ret = mmc_hw_reset(ar_sdio->func->card->host); + ret = mmc_hw_reset(ar_sdio->func->card); if (ret) ath10k_warn(ar, "unable to reset sdio: %d\n", ret); diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index c76cac5d6849..1957e1713548 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -3150,6 +3150,20 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw, arvif->do_not_send_tmpl = true; else arvif->do_not_send_tmpl = false; + + if (vif->bss_conf.he_support) { + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + WMI_VDEV_PARAM_BA_MODE, + WMI_BA_MODE_BUFFER_SIZE_256); + if (ret) + ath11k_warn(ar->ab, + "failed to set BA BUFFER SIZE 256 for vdev: %d\n", + arvif->vdev_id); + else + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, + "Set BA BUFFER SIZE 256 for VDEV: %d\n", + arvif->vdev_id); + } } if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) { @@ -3185,14 +3199,6 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw, if (arvif->is_up && vif->bss_conf.he_support && vif->bss_conf.he_oper.params) { - ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, - WMI_VDEV_PARAM_BA_MODE, - WMI_BA_MODE_BUFFER_SIZE_256); - if (ret) - ath11k_warn(ar->ab, - "failed to set BA BUFFER SIZE 256 for vdev: %d\n", - arvif->vdev_id); - param_id = WMI_VDEV_PARAM_HEOPS_0_31; param_value = vif->bss_conf.he_oper.params; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index eac34063af5a..77144647f4fc 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -839,7 +839,7 @@ static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix) continue; txinfo = IEEE80211_SKB_CB(bf->bf_mpdu); - fi = (struct ath_frame_info *)&txinfo->rate_driver_data[0]; + fi = (struct ath_frame_info *)&txinfo->status.status_driver_data[0]; if (fi->keyix == keyix) return true; } diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 512f441d81e5..ba16a7f3e23d 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -141,8 +141,8 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb) { struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); BUILD_BUG_ON(sizeof(struct ath_frame_info) > - sizeof(tx_info->rate_driver_data)); - return (struct ath_frame_info *) &tx_info->rate_driver_data[0]; + sizeof(tx_info->status.status_driver_data)); + return (struct ath_frame_info *) &tx_info->status.status_driver_data[0]; } static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno) @@ -2542,6 +2542,16 @@ skip_tx_complete: spin_unlock_irqrestore(&sc->tx.txbuflock, flags); } +static void ath_clear_tx_status(struct ieee80211_tx_info *tx_info) +{ + void *ptr = &tx_info->status; + + memset(ptr + sizeof(tx_info->status.rates), 0, + sizeof(tx_info->status) - + sizeof(tx_info->status.rates) - + sizeof(tx_info->status.status_driver_data)); +} + static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, struct ath_tx_status *ts, int nframes, int nbad, int txok) @@ -2553,6 +2563,8 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, struct ath_hw *ah = sc->sc_ah; u8 i, tx_rateindex; + ath_clear_tx_status(tx_info); + if (txok) tx_info->status.ack_signal = ts->ts_rssi; @@ -2567,6 +2579,13 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, tx_info->status.ampdu_len = nframes; tx_info->status.ampdu_ack_len = nframes - nbad; + tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; + + for (i = tx_rateindex + 1; i < hw->max_rates; i++) { + tx_info->status.rates[i].count = 0; + tx_info->status.rates[i].idx = -1; + } + if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 && (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) { /* @@ -2588,16 +2607,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, tx_info->status.rates[tx_rateindex].count = hw->max_rate_tries; } - - for (i = tx_rateindex + 1; i < hw->max_rates; i++) { - tx_info->status.rates[i].count = 0; - tx_info->status.rates[i].idx = -1; - } - - tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; - - /* we report airtime in ath_tx_count_airtime(), don't report twice */ - tx_info->status.tx_time = 0; } static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index ba3c159111d3..212fbbe1cd7e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -557,7 +557,7 @@ enum brcmf_sdio_frmtype { BRCMF_SDIO_FT_SUB, }; -#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu)) +#define SDIOD_DRVSTR_KEY(chip, pmu) (((unsigned int)(chip) << 16) | (pmu)) /* SDIO Pad drive strength to select value mappings */ struct sdiod_drive_str { @@ -4165,7 +4165,7 @@ static int brcmf_sdio_bus_reset(struct device *dev) /* reset the adapter */ sdio_claim_host(sdiodev->func1); - mmc_hw_reset(sdiodev->func1->card->host); + mmc_hw_reset(sdiodev->func1->card); sdio_release_host(sdiodev->func1); brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN); diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index e931672bb836..76004bda0c02 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -2659,7 +2659,7 @@ static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter) /* Run a HW reset of the SDIO interface. */ sdio_claim_host(func); - ret = mmc_hw_reset(func->card->host); + ret = mmc_hw_reset(func->card); sdio_release_host(func); switch (ret) { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c index 8a22ee581674..df85ebc6e1df 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c @@ -80,7 +80,7 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id) mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9); /* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */ - mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf); + mt76_rmw_field(dev, 0x15a0c, 0xfU << 28, 0xf); /* RG_SSUSB_CDR_BR_PE1D = 0x3 */ mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3); diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index 7b4e8cc36b49..cf8d909fa826 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -145,7 +145,7 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue) * To guarantee that the SDIO card is power cycled, as required to make * the FW programming to succeed, let's do a brute force HW reset. */ - mmc_hw_reset(card->host); + mmc_hw_reset(card); sdio_enable_func(func); sdio_release_host(func); diff --git a/drivers/net/wwan/wwan_hwsim.c b/drivers/net/wwan/wwan_hwsim.c index 5b62cf3b3c42..fad642f9ffd8 100644 --- a/drivers/net/wwan/wwan_hwsim.c +++ b/drivers/net/wwan/wwan_hwsim.c @@ -33,6 +33,7 @@ static struct dentry *wwan_hwsim_debugfs_devcreate; static DEFINE_SPINLOCK(wwan_hwsim_devs_lock); static LIST_HEAD(wwan_hwsim_devs); static unsigned int wwan_hwsim_dev_idx; +static struct workqueue_struct *wwan_wq; struct wwan_hwsim_dev { struct list_head list; @@ -371,7 +372,7 @@ static ssize_t wwan_hwsim_debugfs_portdestroy_write(struct file *file, * waiting this callback to finish in the debugfs_remove() call. So, * use workqueue. */ - schedule_work(&port->del_work); + queue_work(wwan_wq, &port->del_work); return count; } @@ -416,7 +417,7 @@ static ssize_t wwan_hwsim_debugfs_devdestroy_write(struct file *file, * waiting this callback to finish in the debugfs_remove() call. So, * use workqueue. */ - schedule_work(&dev->del_work); + queue_work(wwan_wq, &dev->del_work); return count; } @@ -506,9 +507,15 @@ static int __init wwan_hwsim_init(void) if (wwan_hwsim_devsnum < 0 || wwan_hwsim_devsnum > 128) return -EINVAL; + wwan_wq = alloc_workqueue("wwan_wq", 0, 0); + if (!wwan_wq) + return -ENOMEM; + wwan_hwsim_class = class_create(THIS_MODULE, "wwan_hwsim"); - if (IS_ERR(wwan_hwsim_class)) - return PTR_ERR(wwan_hwsim_class); + if (IS_ERR(wwan_hwsim_class)) { + err = PTR_ERR(wwan_hwsim_class); + goto err_wq_destroy; + } wwan_hwsim_debugfs_topdir = debugfs_create_dir("wwan_hwsim", NULL); wwan_hwsim_debugfs_devcreate = @@ -523,9 +530,13 @@ static int __init wwan_hwsim_init(void) return 0; err_clean_devs: + debugfs_remove(wwan_hwsim_debugfs_devcreate); /* Avoid new devs */ wwan_hwsim_free_devs(); + flush_workqueue(wwan_wq); /* Wait deletion works completion */ debugfs_remove(wwan_hwsim_debugfs_topdir); class_destroy(wwan_hwsim_class); +err_wq_destroy: + destroy_workqueue(wwan_wq); return err; } @@ -534,9 +545,10 @@ static void __exit wwan_hwsim_exit(void) { debugfs_remove(wwan_hwsim_debugfs_devcreate); /* Avoid new devs */ wwan_hwsim_free_devs(); - flush_scheduled_work(); /* Wait deletion works completion */ + flush_workqueue(wwan_wq); /* Wait deletion works completion */ debugfs_remove(wwan_hwsim_debugfs_topdir); class_destroy(wwan_hwsim_class); + destroy_workqueue(wwan_wq); } module_init(wwan_hwsim_init); diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index fe8e21ad8ed9..8e035374a370 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -42,7 +42,6 @@ #include <xen/balloon.h> #define XENVIF_QUEUE_LENGTH 32 -#define XENVIF_NAPI_WEIGHT 64 /* Number of bytes allowed on the internal guest Rx queue. */ #define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE) @@ -739,7 +738,7 @@ int xenvif_connect_data(struct xenvif_queue *queue, atomic_set(&queue->inflight_packets, 0); netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, - XENVIF_NAPI_WEIGHT); + NAPI_POLL_WEIGHT); queue->stalled = true; |